aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-class-regulator136
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/DocBook/regulator.tmpl304
-rw-r--r--Documentation/RCU/00-INDEX2
-rw-r--r--Documentation/RCU/rcubarrier.txt304
-rw-r--r--Documentation/bad_memory.txt45
-rw-r--r--Documentation/cgroups/cgroups.txt9
-rw-r--r--Documentation/controllers/memcg_test.txt342
-rw-r--r--Documentation/controllers/memory.txt135
-rw-r--r--Documentation/crypto/async-tx-api.txt96
-rw-r--r--Documentation/development-process/4.Coding6
-rw-r--r--Documentation/dmaengine.txt1
-rw-r--r--Documentation/filesystems/btrfs.txt91
-rw-r--r--Documentation/filesystems/ext4.txt85
-rw-r--r--Documentation/hwmon/abituguru-datasheet4
-rw-r--r--Documentation/kernel-parameters.txt53
-rw-r--r--Documentation/powerpc/dts-bindings/4xx/ndfc.txt39
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/board.txt32
-rw-r--r--Documentation/scsi/scsi_fc_transport.txt4
-rw-r--r--Documentation/w1/masters/00-INDEX2
-rw-r--r--Documentation/w1/masters/mxc-w111
-rw-r--r--Documentation/w1/w1.netlink164
-rw-r--r--MAINTAINERS11
-rw-r--r--arch/arm/configs/clps7500_defconfig801
-rw-r--r--arch/arm/kernel/isa.c1
-rw-r--r--arch/arm/mach-at91/at91cap9.c1
-rw-r--r--arch/arm/mach-at91/at91rm9200.c1
-rw-r--r--arch/arm/mach-at91/at91sam9260.c1
-rw-r--r--arch/arm/mach-at91/at91sam9261.c1
-rw-r--r--arch/arm/mach-at91/at91sam9263.c1
-rw-r--r--arch/arm/mach-at91/at91sam9rl.c1
-rw-r--r--arch/arm/mach-at91/board-sam9rlek.c1
-rw-r--r--arch/arm/mach-clps711x/edb7211-mm.c1
-rw-r--r--arch/arm/mach-clps711x/fortunet.c1
-rw-r--r--arch/arm/mach-davinci/devices.c1
-rw-r--r--arch/arm/mach-davinci/include/mach/gpio.h1
-rw-r--r--arch/arm/mach-footbridge/common.c9
-rw-r--r--arch/arm/mach-footbridge/common.h1
-rw-r--r--arch/arm/mach-footbridge/dc21285.c23
-rw-r--r--arch/arm/mach-footbridge/isa-irq.c2
-rw-r--r--arch/arm/mach-h720x/h7202-eval.c1
-rw-r--r--arch/arm/mach-kirkwood/common.c1
-rw-r--r--arch/arm/mach-kirkwood/pcie.c1
-rw-r--r--arch/arm/mach-ks8695/devices.c1
-rw-r--r--arch/arm/mach-msm/devices.c1
-rw-r--r--arch/arm/mach-mv78xx0/pcie.c1
-rw-r--r--arch/arm/mach-mx2/devices.c1
-rw-r--r--arch/arm/mach-mx3/devices.c1
-rw-r--r--arch/arm/mach-netx/fb.c2
-rw-r--r--arch/arm/mach-netx/time.c2
-rw-r--r--arch/arm/mach-netx/xc.c1
-rw-r--r--arch/arm/mach-omap1/mcbsp.c1
-rw-r--r--arch/arm/mach-omap2/mcbsp.c1
-rw-r--r--arch/arm/mach-orion5x/pci.c1
-rw-r--r--arch/arm/mach-pnx4008/gpio.c1
-rw-r--r--arch/arm/mach-pnx4008/i2c.c1
-rw-r--r--arch/arm/mach-pxa/corgi.c54
-rw-r--r--arch/arm/mach-pxa/e350.c1
-rw-r--r--arch/arm/mach-pxa/e400.c1
-rw-r--r--arch/arm/mach-pxa/e740.c1
-rw-r--r--arch/arm/mach-pxa/e750.c53
-rw-r--r--arch/arm/mach-pxa/e800.c1
-rw-r--r--arch/arm/mach-pxa/include/mach/pxa3xx-regs.h2
-rw-r--r--arch/arm/mach-pxa/poodle.c51
-rw-r--r--arch/arm/mach-pxa/spitz.c77
-rw-r--r--arch/arm/mach-realview/platsmp.c1
-rw-r--r--arch/arm/mach-s3c2410/include/mach/gpio.h1
-rw-r--r--arch/arm/mach-s3c2410/include/mach/irqs.h4
-rw-r--r--arch/arm/mach-s3c2440/mach-at2440evb.c2
-rw-r--r--arch/arm/mach-s3c6400/include/mach/irqs.h4
-rw-r--r--arch/arm/plat-omap/i2c.c1
-rw-r--r--arch/arm/plat-s3c/dev-fb.c1
-rw-r--r--arch/arm/plat-s3c/dev-i2c0.c1
-rw-r--r--arch/arm/plat-s3c/dev-i2c1.c1
-rw-r--r--arch/arm/plat-s3c24xx/gpiolib.c18
-rw-r--r--arch/arm/plat-s3c24xx/pwm.c2
-rw-r--r--arch/arm/plat-s3c64xx/include/plat/irqs.h2
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c15
-rw-r--r--arch/ia64/include/asm/acpi-ext.h1
-rw-r--r--arch/ia64/include/asm/sn/acpi.h2
-rw-r--r--arch/ia64/kernel/acpi.c1
-rw-r--r--arch/ia64/sn/kernel/io_acpi_init.c103
-rw-r--r--arch/ia64/sn/kernel/io_common.c5
-rw-r--r--arch/parisc/Makefile2
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/byteorder.h77
-rw-r--r--arch/parisc/include/asm/checksum.h2
-rw-r--r--arch/parisc/include/asm/io.h12
-rw-r--r--arch/parisc/include/asm/mmu_context.h13
-rw-r--r--arch/parisc/include/asm/processor.h4
-rw-r--r--arch/parisc/include/asm/swab.h66
-rw-r--r--arch/parisc/include/asm/uaccess.h2
-rw-r--r--arch/parisc/kernel/drivers.c40
-rw-r--r--arch/parisc/kernel/hpmc.S8
-rw-r--r--arch/parisc/kernel/irq.c11
-rw-r--r--arch/parisc/kernel/pdc_cons.c2
-rw-r--r--arch/parisc/kernel/perf.c4
-rw-r--r--arch/parisc/kernel/processor.c68
-rw-r--r--arch/parisc/kernel/setup.c11
-rw-r--r--arch/parisc/kernel/smp.c32
-rw-r--r--arch/parisc/kernel/time.c4
-rw-r--r--arch/parisc/kernel/topology.c4
-rw-r--r--arch/parisc/kernel/traps.c9
-rw-r--r--arch/parisc/kernel/unwind.c2
-rw-r--r--arch/parisc/lib/iomap.c2
-rw-r--r--arch/parisc/lib/memcpy.c2
-rw-r--r--arch/parisc/mm/fault.c58
-rw-r--r--arch/powerpc/Kconfig5
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/boot/dts/mpc836x_mds.dts43
-rw-r--r--arch/powerpc/boot/dts/mpc836x_rdk.dts19
-rw-r--r--arch/powerpc/boot/dts/mpc8641_hpcn.dts56
-rw-r--r--arch/powerpc/boot/install.sh14
-rw-r--r--arch/powerpc/configs/85xx/mpc8572_ds_defconfig43
-rw-r--r--arch/powerpc/include/asm/cell-pmu.h2
-rw-r--r--arch/powerpc/include/asm/ioctls.h2
-rw-r--r--arch/powerpc/include/asm/kexec.h55
-rw-r--r--arch/powerpc/include/asm/oprofile_impl.h6
-rw-r--r--arch/powerpc/include/asm/ps3.h2
-rw-r--r--arch/powerpc/include/asm/qe.h37
-rw-r--r--arch/powerpc/include/asm/qe_ic.h21
-rw-r--r--arch/powerpc/include/asm/spu.h2
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/cacheinfo.c837
-rw-r--r--arch/powerpc/kernel/cacheinfo.h8
-rw-r--r--arch/powerpc/kernel/pci-common.c71
-rw-r--r--arch/powerpc/kernel/pci_64.c9
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c1
-rw-r--r--arch/powerpc/kernel/prom.c14
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/sysfs.c300
-rw-r--r--arch/powerpc/mm/mmu_decl.h6
-rw-r--r--arch/powerpc/mm/numa.c62
-rw-r--r--arch/powerpc/mm/pgtable_32.c3
-rw-r--r--arch/powerpc/mm/tlb_nohash.c3
-rw-r--r--arch/powerpc/oprofile/cell/pr_util.h13
-rw-r--r--arch/powerpc/oprofile/cell/spu_profiler.c56
-rw-r--r--arch/powerpc/oprofile/common.c22
-rw-r--r--arch/powerpc/oprofile/op_model_cell.c748
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_common.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc831x_rdb.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.c9
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_rdb.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_mds.c81
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_rdk.c6
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_mds.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_rdb.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc83xx.h1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c7
-rw-r--r--arch/powerpc/platforms/85xx/smp.c1
-rw-r--r--arch/powerpc/platforms/Kconfig11
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype2
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c21
-rw-r--r--arch/powerpc/platforms/cell/beat_udbg.c4
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c2
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c2
-rw-r--r--arch/powerpc/platforms/cell/io-workarounds.c4
-rw-r--r--arch/powerpc/platforms/cell/iommu.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h2
-rw-r--r--arch/powerpc/platforms/iseries/Kconfig5
-rw-r--r--arch/powerpc/platforms/iseries/setup.c11
-rw-r--r--arch/powerpc/platforms/pasemi/cpufreq.c2
-rw-r--r--arch/powerpc/platforms/pasemi/dma_lib.c2
-rw-r--r--arch/powerpc/platforms/powermac/pci.c2
-rw-r--r--arch/powerpc/platforms/powermac/time.c11
-rw-r--r--arch/powerpc/platforms/ps3/device-init.c37
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c7
-rw-r--r--arch/powerpc/sysdev/fsl_soc.h5
-rw-r--r--arch/powerpc/sysdev/qe_lib/Kconfig3
-rw-r--r--arch/powerpc/sysdev/qe_lib/gpio.c195
-rw-r--r--arch/powerpc/sysdev/simple_gpio.c155
-rw-r--r--arch/powerpc/sysdev/simple_gpio.h12
-rw-r--r--arch/s390/include/asm/chpid.h2
-rw-r--r--arch/s390/include/asm/chsc.h1
-rw-r--r--arch/s390/include/asm/cmb.h3
-rw-r--r--arch/s390/include/asm/dasd.h2
-rw-r--r--arch/s390/include/asm/kvm.h2
-rw-r--r--arch/s390/include/asm/posix_types.h4
-rw-r--r--arch/s390/include/asm/ptrace.h7
-rw-r--r--arch/s390/include/asm/qeth.h1
-rw-r--r--arch/s390/include/asm/schid.h2
-rw-r--r--arch/s390/include/asm/swab.h2
-rw-r--r--arch/s390/include/asm/types.h6
-rw-r--r--arch/s390/kernel/entry.h2
-rw-r--r--arch/s390/kernel/smp.c3
-rw-r--r--arch/s390/kernel/sys_s390.c19
-rw-r--r--arch/s390/kernel/vdso.c3
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S4
-rw-r--r--arch/s390/kvm/diag.c2
-rw-r--r--arch/s390/kvm/interrupt.c6
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/sparc/kernel/sun4m_smp.c5
-rw-r--r--arch/x86/include/asm/bitops.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c17
-rw-r--r--arch/x86/kernel/acpi/cstate.c4
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longhaul.c2
-rw-r--r--arch/x86/kernel/e820.c21
-rw-r--r--arch/x86/kernel/early-quirks.c22
-rw-r--r--arch/x86/oprofile/op_model_amd.c149
-rw-r--r--block/Kconfig6
-rw-r--r--block/blk-map.c19
-rw-r--r--crypto/async_tx/async_tx.c350
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile5
-rw-r--r--drivers/acpi/Kconfig84
-rw-r--r--drivers/acpi/Makefile25
-rw-r--r--drivers/acpi/acpica/Makefile44
-rw-r--r--drivers/acpi/acpica/accommon.h63
-rw-r--r--drivers/acpi/acpica/acconfig.h (renamed from include/acpi/acconfig.h)8
-rw-r--r--drivers/acpi/acpica/acdebug.h (renamed from include/acpi/acdebug.h)0
-rw-r--r--drivers/acpi/acpica/acdispat.h (renamed from include/acpi/acdispat.h)0
-rw-r--r--drivers/acpi/acpica/acevents.h (renamed from include/acpi/acevents.h)6
-rw-r--r--drivers/acpi/acpica/acglobal.h (renamed from include/acpi/acglobal.h)9
-rw-r--r--drivers/acpi/acpica/achware.h (renamed from include/acpi/achware.h)22
-rw-r--r--drivers/acpi/acpica/acinterp.h (renamed from include/acpi/acinterp.h)0
-rw-r--r--drivers/acpi/acpica/aclocal.h (renamed from include/acpi/aclocal.h)86
-rw-r--r--drivers/acpi/acpica/acmacros.h (renamed from include/acpi/acmacros.h)122
-rw-r--r--drivers/acpi/acpica/acnamesp.h (renamed from include/acpi/acnamesp.h)5
-rw-r--r--drivers/acpi/acpica/acobject.h (renamed from include/acpi/acobject.h)0
-rw-r--r--drivers/acpi/acpica/acopcode.h (renamed from include/acpi/acopcode.h)0
-rw-r--r--drivers/acpi/acpica/acparser.h (renamed from include/acpi/acparser.h)0
-rw-r--r--drivers/acpi/acpica/acpredef.h (renamed from include/acpi/acpredef.h)0
-rw-r--r--drivers/acpi/acpica/acresrc.h (renamed from include/acpi/acresrc.h)0
-rw-r--r--drivers/acpi/acpica/acstruct.h (renamed from include/acpi/acstruct.h)0
-rw-r--r--drivers/acpi/acpica/actables.h (renamed from include/acpi/actables.h)2
-rw-r--r--drivers/acpi/acpica/acutils.h (renamed from include/acpi/acutils.h)36
-rw-r--r--drivers/acpi/acpica/amlcode.h (renamed from include/acpi/amlcode.h)0
-rw-r--r--drivers/acpi/acpica/amlresrc.h (renamed from include/acpi/amlresrc.h)0
-rw-r--r--drivers/acpi/acpica/dsfield.c (renamed from drivers/acpi/dispatcher/dsfield.c)11
-rw-r--r--drivers/acpi/acpica/dsinit.c (renamed from drivers/acpi/dispatcher/dsinit.c)7
-rw-r--r--drivers/acpi/acpica/dsmethod.c (renamed from drivers/acpi/dispatcher/dsmethod.c)14
-rw-r--r--drivers/acpi/acpica/dsmthdat.c (renamed from drivers/acpi/dispatcher/dsmthdat.c)7
-rw-r--r--drivers/acpi/acpica/dsobject.c (renamed from drivers/acpi/dispatcher/dsobject.c)11
-rw-r--r--drivers/acpi/acpica/dsopcode.c (renamed from drivers/acpi/dispatcher/dsopcode.c)66
-rw-r--r--drivers/acpi/acpica/dsutils.c (renamed from drivers/acpi/dispatcher/dsutils.c)13
-rw-r--r--drivers/acpi/acpica/dswexec.c (renamed from drivers/acpi/dispatcher/dswexec.c)13
-rw-r--r--drivers/acpi/acpica/dswload.c (renamed from drivers/acpi/dispatcher/dswload.c)13
-rw-r--r--drivers/acpi/acpica/dswscope.c (renamed from drivers/acpi/dispatcher/dswscope.c)3
-rw-r--r--drivers/acpi/acpica/dswstate.c (renamed from drivers/acpi/dispatcher/dswstate.c)7
-rw-r--r--drivers/acpi/acpica/evevent.c (renamed from drivers/acpi/events/evevent.c)17
-rw-r--r--drivers/acpi/acpica/evgpe.c (renamed from drivers/acpi/events/evgpe.c)53
-rw-r--r--drivers/acpi/acpica/evgpeblk.c (renamed from drivers/acpi/events/evgpeblk.c)82
-rw-r--r--drivers/acpi/acpica/evmisc.c (renamed from drivers/acpi/events/evmisc.c)62
-rw-r--r--drivers/acpi/acpica/evregion.c (renamed from drivers/acpi/events/evregion.c)140
-rw-r--r--drivers/acpi/acpica/evrgnini.c (renamed from drivers/acpi/events/evrgnini.c)46
-rw-r--r--drivers/acpi/acpica/evsci.c (renamed from drivers/acpi/events/evsci.c)13
-rw-r--r--drivers/acpi/acpica/evxface.c (renamed from drivers/acpi/events/evxface.c)9
-rw-r--r--drivers/acpi/acpica/evxfevnt.c (renamed from drivers/acpi/events/evxfevnt.c)170
-rw-r--r--drivers/acpi/acpica/evxfregn.c (renamed from drivers/acpi/events/evxfregn.c)5
-rw-r--r--drivers/acpi/acpica/exconfig.c (renamed from drivers/acpi/executer/exconfig.c)9
-rw-r--r--drivers/acpi/acpica/exconvrt.c (renamed from drivers/acpi/executer/exconvrt.c)5
-rw-r--r--drivers/acpi/acpica/excreate.c (renamed from drivers/acpi/executer/excreate.c)7
-rw-r--r--drivers/acpi/acpica/exdump.c (renamed from drivers/acpi/executer/exdump.c)7
-rw-r--r--drivers/acpi/acpica/exfield.c (renamed from drivers/acpi/executer/exfield.c)5
-rw-r--r--drivers/acpi/acpica/exfldio.c (renamed from drivers/acpi/executer/exfldio.c)20
-rw-r--r--drivers/acpi/acpica/exmisc.c (renamed from drivers/acpi/executer/exmisc.c)7
-rw-r--r--drivers/acpi/acpica/exmutex.c (renamed from drivers/acpi/executer/exmutex.c)5
-rw-r--r--drivers/acpi/acpica/exnames.c (renamed from drivers/acpi/executer/exnames.c)5
-rw-r--r--drivers/acpi/acpica/exoparg1.c (renamed from drivers/acpi/executer/exoparg1.c)11
-rw-r--r--drivers/acpi/acpica/exoparg2.c (renamed from drivers/acpi/executer/exoparg2.c)9
-rw-r--r--drivers/acpi/acpica/exoparg3.c (renamed from drivers/acpi/executer/exoparg3.c)7
-rw-r--r--drivers/acpi/acpica/exoparg6.c (renamed from drivers/acpi/executer/exoparg6.c)7
-rw-r--r--drivers/acpi/acpica/exprep.c (renamed from drivers/acpi/executer/exprep.c)7
-rw-r--r--drivers/acpi/acpica/exregion.c (renamed from drivers/acpi/executer/exregion.c)3
-rw-r--r--drivers/acpi/acpica/exresnte.c (renamed from drivers/acpi/executer/exresnte.c)7
-rw-r--r--drivers/acpi/acpica/exresolv.c (renamed from drivers/acpi/executer/exresolv.c)9
-rw-r--r--drivers/acpi/acpica/exresop.c (renamed from drivers/acpi/executer/exresop.c)9
-rw-r--r--drivers/acpi/acpica/exstore.c (renamed from drivers/acpi/executer/exstore.c)9
-rw-r--r--drivers/acpi/acpica/exstoren.c (renamed from drivers/acpi/executer/exstoren.c)5
-rw-r--r--drivers/acpi/acpica/exstorob.c (renamed from drivers/acpi/executer/exstorob.c)3
-rw-r--r--drivers/acpi/acpica/exsystem.c (renamed from drivers/acpi/executer/exsystem.c)3
-rw-r--r--drivers/acpi/acpica/exutils.c (renamed from drivers/acpi/executer/exutils.c)5
-rw-r--r--drivers/acpi/acpica/hwacpi.c (renamed from drivers/acpi/hardware/hwacpi.c)1
-rw-r--r--drivers/acpi/acpica/hwgpe.c (renamed from drivers/acpi/hardware/hwgpe.c)78
-rw-r--r--drivers/acpi/acpica/hwregs.c353
-rw-r--r--drivers/acpi/acpica/hwsleep.c (renamed from drivers/acpi/hardware/hwsleep.c)76
-rw-r--r--drivers/acpi/acpica/hwtimer.c (renamed from drivers/acpi/hardware/hwtimer.c)1
-rw-r--r--drivers/acpi/acpica/hwxface.c (renamed from drivers/acpi/hardware/hwregs.c)744
-rw-r--r--drivers/acpi/acpica/nsaccess.c (renamed from drivers/acpi/namespace/nsaccess.c)18
-rw-r--r--drivers/acpi/acpica/nsalloc.c (renamed from drivers/acpi/namespace/nsalloc.c)3
-rw-r--r--drivers/acpi/acpica/nsdump.c (renamed from drivers/acpi/namespace/nsdump.c)3
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c (renamed from drivers/acpi/namespace/nsdumpdv.c)3
-rw-r--r--drivers/acpi/acpica/nseval.c (renamed from drivers/acpi/namespace/nseval.c)77
-rw-r--r--drivers/acpi/acpica/nsinit.c (renamed from drivers/acpi/namespace/nsinit.c)7
-rw-r--r--drivers/acpi/acpica/nsload.c (renamed from drivers/acpi/namespace/nsload.c)7
-rw-r--r--drivers/acpi/acpica/nsnames.c (renamed from drivers/acpi/namespace/nsnames.c)5
-rw-r--r--drivers/acpi/acpica/nsobject.c (renamed from drivers/acpi/namespace/nsobject.c)3
-rw-r--r--drivers/acpi/acpica/nsparse.c (renamed from drivers/acpi/namespace/nsparse.c)9
-rw-r--r--drivers/acpi/acpica/nspredef.c (renamed from drivers/acpi/namespace/nspredef.c)261
-rw-r--r--drivers/acpi/acpica/nssearch.c (renamed from drivers/acpi/namespace/nssearch.c)3
-rw-r--r--drivers/acpi/acpica/nsutils.c (renamed from drivers/acpi/namespace/nsutils.c)15
-rw-r--r--drivers/acpi/acpica/nswalk.c (renamed from drivers/acpi/namespace/nswalk.c)3
-rw-r--r--drivers/acpi/acpica/nsxfeval.c (renamed from drivers/acpi/namespace/nsxfeval.c)5
-rw-r--r--drivers/acpi/acpica/nsxfname.c (renamed from drivers/acpi/namespace/nsxfname.c)3
-rw-r--r--drivers/acpi/acpica/nsxfobj.c (renamed from drivers/acpi/namespace/nsxfobj.c)3
-rw-r--r--drivers/acpi/acpica/psargs.c (renamed from drivers/acpi/parser/psargs.c)9
-rw-r--r--drivers/acpi/acpica/psloop.c (renamed from drivers/acpi/parser/psloop.c)7
-rw-r--r--drivers/acpi/acpica/psopcode.c (renamed from drivers/acpi/parser/psopcode.c)7
-rw-r--r--drivers/acpi/acpica/psparse.c (renamed from drivers/acpi/parser/psparse.c)23
-rw-r--r--drivers/acpi/acpica/psscope.c (renamed from drivers/acpi/parser/psscope.c)3
-rw-r--r--drivers/acpi/acpica/pstree.c (renamed from drivers/acpi/parser/pstree.c)5
-rw-r--r--drivers/acpi/acpica/psutils.c (renamed from drivers/acpi/parser/psutils.c)5
-rw-r--r--drivers/acpi/acpica/pswalk.c (renamed from drivers/acpi/parser/pswalk.c)3
-rw-r--r--drivers/acpi/acpica/psxface.c (renamed from drivers/acpi/parser/psxface.c)40
-rw-r--r--drivers/acpi/acpica/rsaddr.c (renamed from drivers/acpi/resources/rsaddr.c)3
-rw-r--r--drivers/acpi/acpica/rscalc.c (renamed from drivers/acpi/resources/rscalc.c)5
-rw-r--r--drivers/acpi/acpica/rscreate.c (renamed from drivers/acpi/resources/rscreate.c)5
-rw-r--r--drivers/acpi/acpica/rsdump.c (renamed from drivers/acpi/resources/rsdump.c)3
-rw-r--r--drivers/acpi/acpica/rsinfo.c (renamed from drivers/acpi/resources/rsinfo.c)3
-rw-r--r--drivers/acpi/acpica/rsio.c (renamed from drivers/acpi/resources/rsio.c)3
-rw-r--r--drivers/acpi/acpica/rsirq.c (renamed from drivers/acpi/resources/rsirq.c)3
-rw-r--r--drivers/acpi/acpica/rslist.c (renamed from drivers/acpi/resources/rslist.c)3
-rw-r--r--drivers/acpi/acpica/rsmemory.c (renamed from drivers/acpi/resources/rsmemory.c)3
-rw-r--r--drivers/acpi/acpica/rsmisc.c (renamed from drivers/acpi/resources/rsmisc.c)3
-rw-r--r--drivers/acpi/acpica/rsutils.c (renamed from drivers/acpi/resources/rsutils.c)5
-rw-r--r--drivers/acpi/acpica/rsxface.c (renamed from drivers/acpi/resources/rsxface.c)5
-rw-r--r--drivers/acpi/acpica/tbfadt.c (renamed from drivers/acpi/tables/tbfadt.c)252
-rw-r--r--drivers/acpi/acpica/tbfind.c (renamed from drivers/acpi/tables/tbfind.c)3
-rw-r--r--drivers/acpi/acpica/tbinstal.c (renamed from drivers/acpi/tables/tbinstal.c)5
-rw-r--r--drivers/acpi/acpica/tbutils.c (renamed from drivers/acpi/tables/tbutils.c)30
-rw-r--r--drivers/acpi/acpica/tbxface.c (renamed from drivers/acpi/tables/tbxface.c)5
-rw-r--r--drivers/acpi/acpica/tbxfroot.c (renamed from drivers/acpi/tables/tbxfroot.c)3
-rw-r--r--drivers/acpi/acpica/utalloc.c (renamed from drivers/acpi/utilities/utalloc.c)3
-rw-r--r--drivers/acpi/acpica/utcopy.c (renamed from drivers/acpi/utilities/utcopy.c)3
-rw-r--r--drivers/acpi/acpica/utdebug.c (renamed from drivers/acpi/utilities/utdebug.c)95
-rw-r--r--drivers/acpi/acpica/utdelete.c (renamed from drivers/acpi/utilities/utdelete.c)7
-rw-r--r--drivers/acpi/acpica/uteval.c (renamed from drivers/acpi/utilities/uteval.c)11
-rw-r--r--drivers/acpi/acpica/utglobal.c (renamed from drivers/acpi/utilities/utglobal.c)12
-rw-r--r--drivers/acpi/acpica/utinit.c (renamed from drivers/acpi/utilities/utinit.c)7
-rw-r--r--drivers/acpi/acpica/utmath.c (renamed from drivers/acpi/utilities/utmath.c)1
-rw-r--r--drivers/acpi/acpica/utmisc.c (renamed from drivers/acpi/utilities/utmisc.c)23
-rw-r--r--drivers/acpi/acpica/utmutex.c (renamed from drivers/acpi/utilities/utmutex.c)1
-rw-r--r--drivers/acpi/acpica/utobject.c (renamed from drivers/acpi/utilities/utobject.c)3
-rw-r--r--drivers/acpi/acpica/utresrc.c (renamed from drivers/acpi/utilities/utresrc.c)3
-rw-r--r--drivers/acpi/acpica/utstate.c (renamed from drivers/acpi/utilities/utstate.c)1
-rw-r--r--drivers/acpi/acpica/utxface.c (renamed from drivers/acpi/utilities/utxface.c)18
-rw-r--r--drivers/acpi/battery.c5
-rw-r--r--drivers/acpi/cm_sbs.c3
-rw-r--r--drivers/acpi/debug.c1
-rw-r--r--drivers/acpi/dispatcher/Makefile9
-rw-r--r--drivers/acpi/ec.c57
-rw-r--r--drivers/acpi/events/Makefile9
-rw-r--r--drivers/acpi/executer/Makefile10
-rw-r--r--drivers/acpi/hardware/Makefile9
-rw-r--r--drivers/acpi/main.c (renamed from drivers/acpi/sleep/main.c)79
-rw-r--r--drivers/acpi/namespace/Makefile12
-rw-r--r--drivers/acpi/numa.c1
-rw-r--r--drivers/acpi/osl.c4
-rw-r--r--drivers/acpi/parser/Makefile8
-rw-r--r--drivers/acpi/pci_bind.c90
-rw-r--r--drivers/acpi/pci_irq.c472
-rw-r--r--drivers/acpi/pci_link.c6
-rw-r--r--drivers/acpi/power.c6
-rw-r--r--drivers/acpi/proc.c (renamed from drivers/acpi/sleep/proc.c)65
-rw-r--r--drivers/acpi/reboot.c2
-rw-r--r--drivers/acpi/resources/Makefile10
-rw-r--r--drivers/acpi/sbshc.c1
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/sleep.h (renamed from drivers/acpi/sleep/sleep.h)0
-rw-r--r--drivers/acpi/sleep/Makefile5
-rw-r--r--drivers/acpi/system.c63
-rw-r--r--drivers/acpi/tables/Makefile7
-rw-r--r--drivers/acpi/utilities/Makefile9
-rw-r--r--drivers/acpi/utilities/utcache.c314
-rw-r--r--drivers/acpi/video.c20
-rw-r--r--drivers/acpi/video_detect.c4
-rw-r--r--drivers/acpi/wakeup.c (renamed from drivers/acpi/sleep/wakeup.c)6
-rw-r--r--drivers/amba/bus.c3
-rw-r--r--drivers/ata/ahci.c10
-rw-r--r--drivers/ata/ata_piix.c51
-rw-r--r--drivers/ata/libata-acpi.c6
-rw-r--r--drivers/ata/libata-core.c21
-rw-r--r--drivers/ata/libata-sff.c209
-rw-r--r--drivers/ata/pata_acpi.c6
-rw-r--r--drivers/ata/pata_ali.c107
-rw-r--r--drivers/ata/pata_amd.c4
-rw-r--r--drivers/ata/pata_hpt366.c109
-rw-r--r--drivers/ata/pata_hpt3x3.c49
-rw-r--r--drivers/ata/pata_mpiix.c3
-rw-r--r--drivers/ata/pata_platform.c2
-rw-r--r--drivers/ata/pata_sil680.c4
-rw-r--r--drivers/ata/sata_sil24.c7
-rw-r--r--drivers/atm/iphase.c6
-rw-r--r--drivers/char/Kconfig1
-rw-r--r--drivers/char/hvc_beat.c4
-rw-r--r--drivers/char/hvc_iucv.c420
-rw-r--r--drivers/char/pty.c2
-rw-r--r--drivers/char/rtc.c17
-rw-r--r--drivers/char/tpm/tpm_bios.c2
-rw-r--r--drivers/char/tpm/tpm_nsc.c35
-rw-r--r--drivers/char/vt.c3
-rw-r--r--drivers/cpuidle/governors/menu.c10
-rw-r--r--drivers/dca/dca-core.c2
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/dmaengine.c778
-rw-r--r--drivers/dma/dmatest.c129
-rw-r--r--drivers/dma/dw_dmac.c119
-rw-r--r--drivers/dma/fsldma.c5
-rw-r--r--drivers/dma/ioat.c92
-rw-r--r--drivers/dma/ioat_dma.c18
-rw-r--r--drivers/dma/iop-adma.c30
-rw-r--r--drivers/dma/mv_xor.c11
-rw-r--r--drivers/firmware/dcdbas.c9
-rw-r--r--drivers/firmware/dcdbas.h2
-rw-r--r--drivers/firmware/memmap.c6
-rw-r--r--drivers/ide/ide-acpi.c6
-rw-r--r--drivers/ieee1394/eth1394.c54
-rw-r--r--drivers/ieee1394/eth1394.h1
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c2
-rw-r--r--drivers/input/mouse/pxa930_trkball.c4
-rw-r--r--drivers/isdn/hardware/eicon/debuglib.h2
-rw-r--r--drivers/isdn/hardware/eicon/os_4bri.c2
-rw-r--r--drivers/isdn/hardware/eicon/os_bri.c2
-rw-r--r--drivers/isdn/hardware/eicon/os_pri.c2
-rw-r--r--drivers/isdn/hysdn/hysdn_net.c77
-rw-r--r--drivers/isdn/i4l/isdn_net.c69
-rw-r--r--drivers/leds/Kconfig15
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/led-class.c24
-rw-r--r--drivers/leds/leds-alix2.c181
-rw-r--r--drivers/leds/leds-ams-delta.c33
-rw-r--r--drivers/leds/leds-clevo-mail.c21
-rw-r--r--drivers/leds/leds-fsg.c37
-rw-r--r--drivers/leds/leds-gpio.c36
-rw-r--r--drivers/leds/leds-hp-disk.c20
-rw-r--r--drivers/leds/leds-hp6xx.c22
-rw-r--r--drivers/leds/leds-net48xx.c21
-rw-r--r--drivers/leds/leds-pca9532.c77
-rw-r--r--drivers/leds/leds-s3c24xx.c25
-rw-r--r--drivers/leds/leds-wm8350.c311
-rw-r--r--drivers/leds/leds-wrap.c27
-rw-r--r--drivers/leds/ledtrig-timer.c5
-rw-r--r--drivers/md/bitmap.c11
-rw-r--r--drivers/md/faulty.c3
-rw-r--r--drivers/md/linear.c3
-rw-r--r--drivers/md/md.c416
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/raid0.c178
-rw-r--r--drivers/md/raid1.c11
-rw-r--r--drivers/md/raid10.c3
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c57
-rw-r--r--drivers/message/fusion/mptctl.c5
-rw-r--r--drivers/message/fusion/mptlan.c44
-rw-r--r--drivers/mfd/wm8350-core.c3
-rw-r--r--drivers/misc/Kconfig296
-rw-r--r--drivers/misc/Makefile14
-rw-r--r--drivers/misc/dell-laptop.c436
-rw-r--r--drivers/misc/enclosure.c8
-rw-r--r--drivers/misc/sgi-xp/xpnet.c68
-rw-r--r--drivers/mmc/host/atmel-mci.c103
-rw-r--r--drivers/mtd/Kconfig10
-rw-r--r--drivers/mtd/Makefile2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c12
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c18
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c14
-rw-r--r--drivers/mtd/chips/fwh_lock.h4
-rw-r--r--drivers/mtd/devices/Kconfig7
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/lart.c6
-rw-r--r--drivers/mtd/devices/m25p80.c41
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c24
-rw-r--r--drivers/mtd/devices/ps3vram.c768
-rw-r--r--drivers/mtd/ftl.c100
-rw-r--r--drivers/mtd/inftlcore.c2
-rw-r--r--drivers/mtd/inftlmount.c4
-rw-r--r--drivers/mtd/lpddr/Kconfig22
-rw-r--r--drivers/mtd/lpddr/Makefile6
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c796
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c255
-rw-r--r--drivers/mtd/maps/Kconfig21
-rw-r--r--drivers/mtd/maps/alchemy-flash.c2
-rw-r--r--drivers/mtd/maps/amd76xrom.c4
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c2
-rw-r--r--drivers/mtd/maps/ck804xrom.c4
-rw-r--r--drivers/mtd/maps/dbox2-flash.c2
-rw-r--r--drivers/mtd/maps/edb7312.c2
-rw-r--r--drivers/mtd/maps/esb2rom.c4
-rw-r--r--drivers/mtd/maps/fortunet.c2
-rw-r--r--drivers/mtd/maps/h720x-flash.c2
-rw-r--r--drivers/mtd/maps/ichxrom.c4
-rw-r--r--drivers/mtd/maps/impa7.c2
-rw-r--r--drivers/mtd/maps/ipaq-flash.c2
-rw-r--r--drivers/mtd/maps/mbx860.c2
-rw-r--r--drivers/mtd/maps/nettel.c9
-rw-r--r--drivers/mtd/maps/octagon-5066.c2
-rw-r--r--drivers/mtd/maps/physmap.c41
-rw-r--r--drivers/mtd/maps/pmcmsp-flash.c2
-rw-r--r--drivers/mtd/maps/redwood.c2
-rw-r--r--drivers/mtd/maps/rpxlite.c2
-rw-r--r--drivers/mtd/maps/sbc8240.c2
-rw-r--r--drivers/mtd/maps/scb2_flash.c8
-rw-r--r--drivers/mtd/maps/sharpsl-flash.c2
-rw-r--r--drivers/mtd/maps/tqm8xxl.c2
-rw-r--r--drivers/mtd/maps/uclinux.c4
-rw-r--r--drivers/mtd/maps/vmax301.c2
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c2
-rw-r--r--drivers/mtd/mtdchar.c6
-rw-r--r--drivers/mtd/mtdconcat.c35
-rw-r--r--drivers/mtd/mtdcore.c16
-rw-r--r--drivers/mtd/mtdoops.c9
-rw-r--r--drivers/mtd/mtdpart.c34
-rw-r--r--drivers/mtd/nand/Kconfig7
-rw-r--r--drivers/mtd/nand/alauda.c6
-rw-r--r--drivers/mtd/nand/cafe_nand.c7
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c4
-rw-r--r--drivers/mtd/nand/nand_base.c25
-rw-r--r--drivers/mtd/nand/nand_bbt.c31
-rw-r--r--drivers/mtd/nand/nandsim.c339
-rw-r--r--drivers/mtd/nand/ndfc.c269
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c6
-rw-r--r--drivers/mtd/nand/sharpsl.c247
-rw-r--r--drivers/mtd/nftlcore.c2
-rw-r--r--drivers/mtd/nftlmount.c4
-rw-r--r--drivers/mtd/onenand/onenand_base.c8
-rw-r--r--drivers/mtd/rfd_ftl.c29
-rw-r--r--drivers/mtd/ssfdc.c7
-rw-r--r--drivers/mtd/tests/Makefile7
-rw-r--r--drivers/mtd/tests/mtd_oobtest.c742
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c632
-rw-r--r--drivers/mtd/tests/mtd_readtest.c253
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c502
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c330
-rw-r--r--drivers/mtd/tests/mtd_subpagetest.c525
-rw-r--r--drivers/mtd/tests/mtd_torturetest.c530
-rw-r--r--drivers/mtd/ubi/build.c2
-rw-r--r--drivers/mtd/ubi/gluebi.c17
-rw-r--r--drivers/mtd/ubi/kapi.c2
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/acenic_firmware.h9456
-rw-r--r--drivers/net/amd8111e.c35
-rw-r--r--drivers/net/appletalk/ipddp.c25
-rw-r--r--drivers/net/atp.c32
-rw-r--r--drivers/net/b44.c29
-rw-r--r--drivers/net/bnx2x_main.c3
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/cassini.c29
-rw-r--r--drivers/net/de600.c14
-rw-r--r--drivers/net/de620.c17
-rw-r--r--drivers/net/e100.c299
-rw-r--r--drivers/net/ehea/ehea_phyp.c16
-rw-r--r--drivers/net/enc28j60.c18
-rw-r--r--drivers/net/epic100.c21
-rw-r--r--drivers/net/fealnx.c21
-rw-r--r--drivers/net/gianfar.c8
-rw-r--r--drivers/net/hp100.c32
-rw-r--r--drivers/net/ibmveth.c4
-rw-r--r--drivers/net/ibmveth.h10
-rw-r--r--drivers/net/ipg.c23
-rw-r--r--drivers/net/irda/ali-ircc.c45
-rw-r--r--drivers/net/irda/ali-ircc.h1
-rw-r--r--drivers/net/irda/au1000_ircc.h1
-rw-r--r--drivers/net/irda/au1k_ir.c9
-rw-r--r--drivers/net/irda/donauboe.h1
-rw-r--r--drivers/net/irda/irda-usb.c28
-rw-r--r--drivers/net/irda/irda-usb.h1
-rw-r--r--drivers/net/irda/kingsun-sir.c20
-rw-r--r--drivers/net/irda/ks959-sir.c22
-rw-r--r--drivers/net/irda/ksdazzle-sir.c26
-rw-r--r--drivers/net/irda/mcs7780.c49
-rw-r--r--drivers/net/irda/mcs7780.h2
-rw-r--r--drivers/net/irda/nsc-ircc.c45
-rw-r--r--drivers/net/irda/nsc-ircc.h1
-rw-r--r--drivers/net/irda/pxaficp_ir.c52
-rw-r--r--drivers/net/irda/sa1100_ir.c46
-rw-r--r--drivers/net/irda/sir-dev.h1
-rw-r--r--drivers/net/irda/sir_dev.c26
-rw-r--r--drivers/net/irda/smsc-ircc2.c38
-rw-r--r--drivers/net/irda/stir4200.c44
-rw-r--r--drivers/net/irda/via-ircc.c47
-rw-r--r--drivers/net/irda/via-ircc.h1
-rw-r--r--drivers/net/irda/vlsi_ir.c78
-rw-r--r--drivers/net/irda/vlsi_ir.h1
-rw-r--r--drivers/net/irda/w83977af_ir.c35
-rw-r--r--drivers/net/irda/w83977af_ir.h1
-rw-r--r--drivers/net/mlx4/en_params.c6
-rw-r--r--drivers/net/mlx4/en_tx.c59
-rw-r--r--drivers/net/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/natsemi.c29
-rw-r--r--drivers/net/ns83820.c4
-rw-r--r--drivers/net/pcnet32.c32
-rw-r--r--drivers/net/plip.c13
-rw-r--r--drivers/net/r6040.c27
-rw-r--r--drivers/net/sb1000.c16
-rw-r--r--drivers/net/sis190.c28
-rw-r--r--drivers/net/slip.c28
-rw-r--r--drivers/net/starfire.c34
-rw-r--r--drivers/net/sundance.c23
-rw-r--r--drivers/net/sungem.c22
-rw-r--r--drivers/net/sunhme.c26
-rw-r--r--drivers/net/tlan.c26
-rw-r--r--drivers/net/tulip/de2104x.c19
-rw-r--r--drivers/net/tulip/de4x5.c20
-rw-r--r--drivers/net/tulip/dmfe.c62
-rw-r--r--drivers/net/tulip/tulip_core.c27
-rw-r--r--drivers/net/tulip/uli526x.c63
-rw-r--r--drivers/net/tulip/winbond-840.c23
-rw-r--r--drivers/net/tulip/xircom_cb.c43
-rw-r--r--drivers/net/typhoon.c22
-rw-r--r--drivers/net/usb/dm9601.c29
-rw-r--r--drivers/net/usb/kaweth.c23
-rw-r--r--drivers/net/usb/pegasus.c21
-rw-r--r--drivers/net/virtio_net.c20
-rw-r--r--drivers/net/wimax/i2400m/usb.c6
-rw-r--r--drivers/net/wireless/ath5k/dma.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c2
-rw-r--r--drivers/net/xen-netfront.c17
-rw-r--r--drivers/oprofile/buffer_sync.c188
-rw-r--r--drivers/oprofile/cpu_buffer.c316
-rw-r--r--drivers/oprofile/cpu_buffer.h89
-rw-r--r--drivers/oprofile/event_buffer.c4
-rw-r--r--drivers/oprofile/oprof.c4
-rw-r--r--drivers/oprofile/oprof.h8
-rw-r--r--drivers/oprofile/oprofile_files.c27
-rw-r--r--drivers/parisc/asp.c3
-rw-r--r--drivers/parisc/ccio-dma.c4
-rw-r--r--drivers/parisc/dino.c4
-rw-r--r--drivers/parisc/hppb.c2
-rw-r--r--drivers/parisc/lasi.c5
-rw-r--r--drivers/parisc/lba_pci.c2
-rw-r--r--drivers/parisc/sba_iommu.c9
-rw-r--r--drivers/parisc/wax.c3
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c1
-rw-r--r--drivers/pci/hotplug/pciehp.h1
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/platform/Kconfig5
-rw-r--r--drivers/platform/Makefile5
-rw-r--r--drivers/platform/x86/Kconfig375
-rw-r--r--drivers/platform/x86/Makefile19
-rw-r--r--drivers/platform/x86/acer-wmi.c (renamed from drivers/misc/acer-wmi.c)0
-rw-r--r--drivers/platform/x86/asus-laptop.c (renamed from drivers/misc/asus-laptop.c)0
-rw-r--r--drivers/platform/x86/asus_acpi.c (renamed from drivers/acpi/asus_acpi.c)0
-rw-r--r--drivers/platform/x86/compal-laptop.c (renamed from drivers/misc/compal-laptop.c)0
-rw-r--r--drivers/platform/x86/eeepc-laptop.c (renamed from drivers/misc/eeepc-laptop.c)0
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c (renamed from drivers/misc/fujitsu-laptop.c)419
-rw-r--r--drivers/platform/x86/hp-wmi.c (renamed from drivers/misc/hp-wmi.c)0
-rw-r--r--drivers/platform/x86/intel_menlow.c (renamed from drivers/misc/intel_menlow.c)0
-rw-r--r--drivers/platform/x86/msi-laptop.c (renamed from drivers/misc/msi-laptop.c)0
-rw-r--r--drivers/platform/x86/panasonic-laptop.c (renamed from drivers/misc/panasonic-laptop.c)22
-rw-r--r--drivers/platform/x86/sony-laptop.c (renamed from drivers/misc/sony-laptop.c)15
-rw-r--r--drivers/platform/x86/tc1100-wmi.c (renamed from drivers/misc/tc1100-wmi.c)1
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c (renamed from drivers/misc/thinkpad_acpi.c)1
-rw-r--r--drivers/platform/x86/toshiba_acpi.c (renamed from drivers/acpi/toshiba_acpi.c)0
-rw-r--r--drivers/platform/x86/wmi.c (renamed from drivers/acpi/wmi.c)0
-rw-r--r--drivers/pnp/pnpacpi/core.c1
-rw-r--r--drivers/regulator/core.c474
-rw-r--r--drivers/regulator/da903x.c12
-rw-r--r--drivers/regulator/wm8350-regulator.c91
-rw-r--r--drivers/rtc/rtc-ds1307.c154
-rw-r--r--drivers/rtc/rtc-parisc.c3
-rw-r--r--drivers/s390/block/dasd.c21
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c48
-rw-r--r--drivers/s390/block/dasd_diag.c3
-rw-r--r--drivers/s390/block/dasd_eckd.c3
-rw-r--r--drivers/s390/block/dasd_fba.c3
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/char/Kconfig2
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/cio/cio.c2
-rw-r--r--drivers/s390/cio/qdio_debug.c2
-rw-r--r--drivers/s390/cio/qdio_main.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c27
-rw-r--r--drivers/s390/net/qeth_l3_main.c53
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/NCR_D700.c2
-rw-r--r--drivers/scsi/a2091.c18
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c2
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.c1
-rw-r--r--drivers/scsi/gvp11.c8
-rw-r--r--drivers/scsi/hosts.c6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c14
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c16
-rw-r--r--drivers/scsi/ipr.c2
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/lasi700.c3
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_dump.c2
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c12
-rw-r--r--drivers/scsi/libsas/sas_port.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c3
-rw-r--r--drivers/scsi/mvsas.c2
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c26
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c462
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h40
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h33
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h294
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c295
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c82
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c139
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c263
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/raid_class.c3
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_debug.c4
-rw-r--r--drivers/scsi/scsi_error.c24
-rw-r--r--drivers/scsi/scsi_ioctl.c9
-rw-r--r--drivers/scsi/scsi_lib.c119
-rw-r--r--drivers/scsi/scsi_scan.c7
-rw-r--r--drivers/scsi/scsi_sysfs.c12
-rw-r--r--drivers/scsi/scsi_transport_fc.c39
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c11
-rw-r--r--drivers/scsi/scsi_transport_sas.c42
-rw-r--r--drivers/scsi/scsi_transport_srp.c2
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sd_dif.c17
-rw-r--r--drivers/scsi/ses.c2
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sgiwd93.c3
-rw-r--r--drivers/scsi/sim710.c4
-rw-r--r--drivers/scsi/sni_53c710.c3
-rw-r--r--drivers/scsi/st.c492
-rw-r--r--drivers/scsi/st.h14
-rw-r--r--drivers/scsi/zalon.c4
-rw-r--r--drivers/serial/Kconfig21
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/nwpserial.c475
-rw-r--r--drivers/serial/of_serial.c19
-rw-r--r--drivers/usb/gadget/f_phonet.c12
-rw-r--r--drivers/usb/gadget/u_ether.c16
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h2
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/lc.c17
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/netdev.c9
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/rx.c8
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/tx.c6
-rw-r--r--drivers/video/amba-clcd.c1
-rw-r--r--drivers/video/backlight/Kconfig15
-rw-r--r--drivers/video/backlight/Makefile2
-rw-r--r--drivers/video/backlight/backlight.c73
-rw-r--r--drivers/video/backlight/corgi_bl.c169
-rw-r--r--drivers/video/backlight/cr_bllcd.c18
-rw-r--r--drivers/video/backlight/generic_bl.c147
-rw-r--r--drivers/video/backlight/hp680_bl.c20
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c1
-rw-r--r--drivers/video/backlight/progear_bl.c20
-rw-r--r--drivers/video/backlight/tdo24m.c94
-rw-r--r--drivers/video/backlight/tosa_lcd.c27
-rw-r--r--drivers/video/backlight/vgg2432a4.c2
-rw-r--r--drivers/w1/masters/Kconfig6
-rw-r--r--drivers/w1/masters/Makefile2
-rw-r--r--drivers/w1/masters/mxc_w1.c211
-rw-r--r--drivers/w1/w1.h1
-rw-r--r--drivers/w1/w1_io.c26
-rw-r--r--drivers/w1/w1_netlink.c261
-rw-r--r--drivers/w1/w1_netlink.h16
-rw-r--r--drivers/xen/Kconfig24
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c3
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c28
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c1
-rw-r--r--drivers/xen/xenfs/Makefile3
-rw-r--r--drivers/xen/xenfs/super.c64
-rw-r--r--drivers/xen/xenfs/xenbus.c593
-rw-r--r--drivers/xen/xenfs/xenfs.h6
-rw-r--r--firmware/Makefile2
-rw-r--r--firmware/WHENCE12
-rw-r--r--firmware/e100/d101m_ucode.bin.ihex38
-rw-r--r--firmware/e100/d101s_ucode.bin.ihex38
-rw-r--r--firmware/e100/d102e_ucode.bin.ihex38
-rw-r--r--fs/Kconfig19
-rw-r--r--fs/Makefile1
-rw-r--r--fs/binfmt_elf.c12
-rw-r--r--fs/bio.c36
-rw-r--r--fs/block_dev.c29
-rw-r--r--fs/btrfs/Makefile25
-rw-r--r--fs/btrfs/acl.c351
-rw-r--r--fs/btrfs/async-thread.c419
-rw-r--r--fs/btrfs/async-thread.h101
-rw-r--r--fs/btrfs/btrfs_inode.h131
-rw-r--r--fs/btrfs/compat.h7
-rw-r--r--fs/btrfs/compression.c709
-rw-r--r--fs/btrfs/compression.h47
-rw-r--r--fs/btrfs/crc32c.h29
-rw-r--r--fs/btrfs/ctree.c3953
-rw-r--r--fs/btrfs/ctree.h2129
-rw-r--r--fs/btrfs/dir-item.c386
-rw-r--r--fs/btrfs/disk-io.c2343
-rw-r--r--fs/btrfs/disk-io.h102
-rw-r--r--fs/btrfs/export.c203
-rw-r--r--fs/btrfs/export.h19
-rw-r--r--fs/btrfs/extent-tree.c5986
-rw-r--r--fs/btrfs/extent_io.c3717
-rw-r--r--fs/btrfs/extent_io.h269
-rw-r--r--fs/btrfs/extent_map.c351
-rw-r--r--fs/btrfs/extent_map.h62
-rw-r--r--fs/btrfs/file-item.c831
-rw-r--r--fs/btrfs/file.c1288
-rw-r--r--fs/btrfs/free-space-cache.c495
-rw-r--r--fs/btrfs/hash.h27
-rw-r--r--fs/btrfs/inode-item.c206
-rw-r--r--fs/btrfs/inode-map.c144
-rw-r--r--fs/btrfs/inode.c5035
-rw-r--r--fs/btrfs/ioctl.c1132
-rw-r--r--fs/btrfs/ioctl.h67
-rw-r--r--fs/btrfs/locking.c88
-rw-r--r--fs/btrfs/locking.h27
-rw-r--r--fs/btrfs/ordered-data.c730
-rw-r--r--fs/btrfs/ordered-data.h158
-rw-r--r--fs/btrfs/orphan.c67
-rw-r--r--fs/btrfs/print-tree.c216
-rw-r--r--fs/btrfs/print-tree.h23
-rw-r--r--fs/btrfs/ref-cache.c230
-rw-r--r--fs/btrfs/ref-cache.h77
-rw-r--r--fs/btrfs/root-tree.c366
-rw-r--r--fs/btrfs/struct-funcs.c139
-rw-r--r--fs/btrfs/super.c720
-rw-r--r--fs/btrfs/sysfs.c269
-rw-r--r--fs/btrfs/transaction.c1097
-rw-r--r--fs/btrfs/transaction.h106
-rw-r--r--fs/btrfs/tree-defrag.c147
-rw-r--r--fs/btrfs/tree-log.c2898
-rw-r--r--fs/btrfs/tree-log.h41
-rw-r--r--fs/btrfs/version.h4
-rw-r--r--fs/btrfs/version.sh43
-rw-r--r--fs/btrfs/volumes.c3218
-rw-r--r--fs/btrfs/volumes.h162
-rw-r--r--fs/btrfs/xattr.c322
-rw-r--r--fs/btrfs/xattr.h39
-rw-r--r--fs/btrfs/zlib.c632
-rw-r--r--fs/coda/sysctl.c5
-rw-r--r--fs/dcache.c14
-rw-r--r--fs/dquot.c2
-rw-r--r--fs/ext2/ialloc.c8
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext2/ioctl.c3
-rw-r--r--fs/ext2/super.c10
-rw-r--r--fs/ext3/hash.c77
-rw-r--r--fs/ext3/ialloc.c8
-rw-r--r--fs/ext3/ioctl.c3
-rw-r--r--fs/ext3/namei.c15
-rw-r--r--fs/ext3/super.c43
-rw-r--r--fs/ext4/balloc.c293
-rw-r--r--fs/ext4/bitmap.c5
-rw-r--r--fs/ext4/dir.c10
-rw-r--r--fs/ext4/ext4.h152
-rw-r--r--fs/ext4/ext4_extents.h5
-rw-r--r--fs/ext4/ext4_i.h16
-rw-r--r--fs/ext4/ext4_jbd2.c83
-rw-r--r--fs/ext4/ext4_jbd2.h87
-rw-r--r--fs/ext4/ext4_sb.h6
-rw-r--r--fs/ext4/extents.c62
-rw-r--r--fs/ext4/file.c3
-rw-r--r--fs/ext4/hash.c77
-rw-r--r--fs/ext4/ialloc.c324
-rw-r--r--fs/ext4/inode.c309
-rw-r--r--fs/ext4/ioctl.c2
-rw-r--r--fs/ext4/mballoc.c629
-rw-r--r--fs/ext4/mballoc.h71
-rw-r--r--fs/ext4/migrate.c19
-rw-r--r--fs/ext4/namei.c96
-rw-r--r--fs/ext4/resize.c113
-rw-r--r--fs/ext4/super.c622
-rw-r--r--fs/ext4/xattr.c25
-rw-r--r--fs/ioprio.c3
-rw-r--r--fs/jbd/commit.c15
-rw-r--r--fs/jbd/transaction.c39
-rw-r--r--fs/jbd2/checkpoint.c24
-rw-r--r--fs/jbd2/commit.c58
-rw-r--r--fs/jbd2/journal.c124
-rw-r--r--fs/jbd2/transaction.c60
-rw-r--r--fs/jffs2/compr_rubin.c120
-rw-r--r--fs/jffs2/erase.c5
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/dlmglue.c4
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/partitions/check.c1
-rw-r--r--fs/proc/vmcore.c2
-rw-r--r--fs/romfs/inode.c12
-rw-r--r--fs/splice.c1
-rw-r--r--fs/super.c4
-rw-r--r--include/acpi/acdisasm.h445
-rw-r--r--include/acpi/acexcep.h6
-rw-r--r--include/acpi/acoutput.h103
-rw-r--r--include/acpi/acpi.h31
-rw-r--r--include/acpi/acpiosxf.h13
-rw-r--r--include/acpi/acpixf.h100
-rw-r--r--include/acpi/acrestyp.h405
-rw-r--r--include/acpi/actbl.h25
-rw-r--r--include/acpi/actbl1.h2
-rw-r--r--include/acpi/actypes.h557
-rw-r--r--include/acpi/platform/acenv.h45
-rw-r--r--include/acpi/platform/aclinux.h4
-rw-r--r--include/linux/acpi.h17
-rw-r--r--include/linux/async_tx.h17
-rw-r--r--include/linux/atmel-mci.h6
-rw-r--r--include/linux/auxvec.h6
-rw-r--r--include/linux/backlight.h16
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/can/core.h2
-rw-r--r--include/linux/cgroup.h61
-rw-r--r--include/linux/cpuset.h10
-rw-r--r--include/linux/dmaengine.h181
-rw-r--r--include/linux/dw_dmac.h31
-rw-r--r--include/linux/ext2_fs.h24
-rw-r--r--include/linux/ext2_fs_sb.h4
-rw-r--r--include/linux/ext3_fs.h52
-rw-r--r--include/linux/ext3_fs_sb.h5
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/if_vlan.h19
-rw-r--r--include/linux/ioprio.h2
-rw-r--r--include/linux/jbd.h15
-rw-r--r--include/linux/jbd2.h38
-rw-r--r--include/linux/kernel.h6
-rw-r--r--include/linux/leds-pca9532.h2
-rw-r--r--include/linux/leds.h5
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/magic.h1
-rw-r--r--include/linux/memcontrol.h154
-rw-r--r--include/linux/mfd/wm8350/pmic.h36
-rw-r--r--include/linux/mm_inline.h22
-rw-r--r--include/linux/mmzone.h24
-rw-r--r--include/linux/mtd/cfi.h1
-rw-r--r--include/linux/mtd/ftl.h38
-rw-r--r--include/linux/mtd/map.h1
-rw-r--r--include/linux/mtd/mtd.h75
-rw-r--r--include/linux/mtd/nand.h7
-rw-r--r--include/linux/mtd/partitions.h6
-rw-r--r--include/linux/mtd/pfow.h159
-rw-r--r--include/linux/mtd/physmap.h1
-rw-r--r--include/linux/mtd/qinfo.h91
-rw-r--r--include/linux/mtd/sharpsl.h20
-rw-r--r--include/linux/netdevice.h9
-rw-r--r--include/linux/nwpserial.h18
-rw-r--r--include/linux/oprofile.h18
-rw-r--r--include/linux/page_cgroup.h52
-rw-r--r--include/linux/pci_hotplug.h1
-rw-r--r--include/linux/pid.h18
-rw-r--r--include/linux/pid_namespace.h6
-rw-r--r--include/linux/raid/md_k.h20
-rw-r--r--include/linux/raid/md_p.h2
-rw-r--r--include/linux/raid/raid0.h10
-rw-r--r--include/linux/regulator/consumer.h8
-rw-r--r--include/linux/regulator/driver.h37
-rw-r--r--include/linux/regulator/machine.h41
-rw-r--r--include/linux/res_counter.h8
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/spi/tdo24m.h13
-rw-r--r--include/linux/suspend.h13
-rw-r--r--include/linux/swap.h25
-rw-r--r--include/net/netdma.h11
-rw-r--r--include/net/protocol.h3
-rw-r--r--include/net/wimax.h3
-rw-r--r--include/scsi/scsi_transport_fc.h1
-rw-r--r--include/xen/xenbus.h2
-rw-r--r--init/Kconfig142
-rw-r--r--ipc/mqueue.c3
-rw-r--r--kernel/async.c4
-rw-r--r--kernel/cgroup.c276
-rw-r--r--kernel/cpuset.c251
-rw-r--r--kernel/cred.c2
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/ns_cgroup.c2
-rw-r--r--kernel/pid.c6
-rw-r--r--kernel/power/disk.c6
-rw-r--r--kernel/power/snapshot.c370
-rw-r--r--kernel/power/swsusp.c122
-rw-r--r--kernel/res_counter.c44
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/trace/ring_buffer.c8
-rw-r--r--lib/sort.c30
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/memcontrol.c1846
-rw-r--r--mm/memory.c28
-rw-r--r--mm/migrate.c42
-rw-r--r--mm/oom_kill.c10
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/page_cgroup.c207
-rw-r--r--mm/shmem.c20
-rw-r--r--mm/swap.c33
-rw-r--r--mm/swap_state.c4
-rw-r--r--mm/swapfile.c24
-rw-r--r--mm/vmscan.c197
-rw-r--r--net/8021q/vlan_core.c111
-rw-r--r--net/8021q/vlan_dev.c14
-rw-r--r--net/appletalk/aarp.c5
-rw-r--r--net/bluetooth/bnep/bnep.h1
-rw-r--r--net/bluetooth/bnep/core.c12
-rw-r--r--net/bluetooth/bnep/netdev.c33
-rw-r--r--net/can/af_can.c15
-rw-r--r--net/can/bcm.c12
-rw-r--r--net/can/raw.c15
-rw-r--r--net/core/dev.c231
-rw-r--r--net/dsa/slave.c51
-rw-r--r--net/ipv4/tcp.c7
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv6/af_inet6.c107
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/sysctl_net_ipv6.c2
-rw-r--r--net/ipv6/tcp_ipv6.c47
-rw-r--r--net/phonet/pep-gprs.c12
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sched/sch_teql.c20
-rw-r--r--net/sctp/auth.c2
-rw-r--r--net/wimax/Kconfig14
-rw-r--r--net/wimax/id-table.c8
-rw-r--r--net/wimax/op-rfkill.c2
-rw-r--r--net/wireless/wext.c4
-rw-r--r--security/device_cgroup.c5
-rw-r--r--security/smack/smackfs.c2
-rw-r--r--sound/soc/au1x/dbdma2.c2
-rw-r--r--sound/soc/davinci/davinci-pcm.c2
1012 files changed, 73247 insertions, 23979 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-regulator b/Documentation/ABI/testing/sysfs-class-regulator
index 3731f6f29bcb..873ef1fc1569 100644
--- a/Documentation/ABI/testing/sysfs-class-regulator
+++ b/Documentation/ABI/testing/sysfs-class-regulator
@@ -3,8 +3,9 @@ Date: April 2008
3KernelVersion: 2.6.26 3KernelVersion: 2.6.26
4Contact: Liam Girdwood <lrg@slimlogic.co.uk> 4Contact: Liam Girdwood <lrg@slimlogic.co.uk>
5Description: 5Description:
6 Each regulator directory will contain a field called 6 Some regulator directories will contain a field called
7 state. This holds the regulator output state. 7 state. This reports the regulator enable status, for
8 regulators which can report that value.
8 9
9 This will be one of the following strings: 10 This will be one of the following strings:
10 11
@@ -18,7 +19,8 @@ Description:
18 'disabled' means the regulator output is OFF and is not 19 'disabled' means the regulator output is OFF and is not
19 supplying power to the system.. 20 supplying power to the system..
20 21
21 'unknown' means software cannot determine the state. 22 'unknown' means software cannot determine the state, or
23 the reported state is invalid.
22 24
23 NOTE: this field can be used in conjunction with microvolts 25 NOTE: this field can be used in conjunction with microvolts
24 and microamps to determine regulator output levels. 26 and microamps to determine regulator output levels.
@@ -53,9 +55,10 @@ Date: April 2008
53KernelVersion: 2.6.26 55KernelVersion: 2.6.26
54Contact: Liam Girdwood <lrg@slimlogic.co.uk> 56Contact: Liam Girdwood <lrg@slimlogic.co.uk>
55Description: 57Description:
56 Each regulator directory will contain a field called 58 Some regulator directories will contain a field called
57 microvolts. This holds the regulator output voltage setting 59 microvolts. This holds the regulator output voltage setting
58 measured in microvolts (i.e. E-6 Volts). 60 measured in microvolts (i.e. E-6 Volts), for regulators
61 which can report that voltage.
59 62
60 NOTE: This value should not be used to determine the regulator 63 NOTE: This value should not be used to determine the regulator
61 output voltage level as this value is the same regardless of 64 output voltage level as this value is the same regardless of
@@ -67,9 +70,10 @@ Date: April 2008
67KernelVersion: 2.6.26 70KernelVersion: 2.6.26
68Contact: Liam Girdwood <lrg@slimlogic.co.uk> 71Contact: Liam Girdwood <lrg@slimlogic.co.uk>
69Description: 72Description:
70 Each regulator directory will contain a field called 73 Some regulator directories will contain a field called
71 microamps. This holds the regulator output current limit 74 microamps. This holds the regulator output current limit
72 setting measured in microamps (i.e. E-6 Amps). 75 setting measured in microamps (i.e. E-6 Amps), for regulators
76 which can report that current.
73 77
74 NOTE: This value should not be used to determine the regulator 78 NOTE: This value should not be used to determine the regulator
75 output current level as this value is the same regardless of 79 output current level as this value is the same regardless of
@@ -81,8 +85,9 @@ Date: April 2008
81KernelVersion: 2.6.26 85KernelVersion: 2.6.26
82Contact: Liam Girdwood <lrg@slimlogic.co.uk> 86Contact: Liam Girdwood <lrg@slimlogic.co.uk>
83Description: 87Description:
84 Each regulator directory will contain a field called 88 Some regulator directories will contain a field called
85 opmode. This holds the regulator operating mode setting. 89 opmode. This holds the current regulator operating mode,
90 for regulators which can report it.
86 91
87 The opmode value can be one of the following strings: 92 The opmode value can be one of the following strings:
88 93
@@ -92,7 +97,7 @@ Description:
92 'standby' 97 'standby'
93 'unknown' 98 'unknown'
94 99
95 The modes are described in include/linux/regulator/regulator.h 100 The modes are described in include/linux/regulator/consumer.h
96 101
97 NOTE: This value should not be used to determine the regulator 102 NOTE: This value should not be used to determine the regulator
98 output operating mode as this value is the same regardless of 103 output operating mode as this value is the same regardless of
@@ -104,9 +109,10 @@ Date: April 2008
104KernelVersion: 2.6.26 109KernelVersion: 2.6.26
105Contact: Liam Girdwood <lrg@slimlogic.co.uk> 110Contact: Liam Girdwood <lrg@slimlogic.co.uk>
106Description: 111Description:
107 Each regulator directory will contain a field called 112 Some regulator directories will contain a field called
108 min_microvolts. This holds the minimum safe working regulator 113 min_microvolts. This holds the minimum safe working regulator
109 output voltage setting for this domain measured in microvolts. 114 output voltage setting for this domain measured in microvolts,
115 for regulators which support voltage constraints.
110 116
111 NOTE: this will return the string 'constraint not defined' if 117 NOTE: this will return the string 'constraint not defined' if
112 the power domain has no min microvolts constraint defined by 118 the power domain has no min microvolts constraint defined by
@@ -118,9 +124,10 @@ Date: April 2008
118KernelVersion: 2.6.26 124KernelVersion: 2.6.26
119Contact: Liam Girdwood <lrg@slimlogic.co.uk> 125Contact: Liam Girdwood <lrg@slimlogic.co.uk>
120Description: 126Description:
121 Each regulator directory will contain a field called 127 Some regulator directories will contain a field called
122 max_microvolts. This holds the maximum safe working regulator 128 max_microvolts. This holds the maximum safe working regulator
123 output voltage setting for this domain measured in microvolts. 129 output voltage setting for this domain measured in microvolts,
130 for regulators which support voltage constraints.
124 131
125 NOTE: this will return the string 'constraint not defined' if 132 NOTE: this will return the string 'constraint not defined' if
126 the power domain has no max microvolts constraint defined by 133 the power domain has no max microvolts constraint defined by
@@ -132,10 +139,10 @@ Date: April 2008
132KernelVersion: 2.6.26 139KernelVersion: 2.6.26
133Contact: Liam Girdwood <lrg@slimlogic.co.uk> 140Contact: Liam Girdwood <lrg@slimlogic.co.uk>
134Description: 141Description:
135 Each regulator directory will contain a field called 142 Some regulator directories will contain a field called
136 min_microamps. This holds the minimum safe working regulator 143 min_microamps. This holds the minimum safe working regulator
137 output current limit setting for this domain measured in 144 output current limit setting for this domain measured in
138 microamps. 145 microamps, for regulators which support current constraints.
139 146
140 NOTE: this will return the string 'constraint not defined' if 147 NOTE: this will return the string 'constraint not defined' if
141 the power domain has no min microamps constraint defined by 148 the power domain has no min microamps constraint defined by
@@ -147,10 +154,10 @@ Date: April 2008
147KernelVersion: 2.6.26 154KernelVersion: 2.6.26
148Contact: Liam Girdwood <lrg@slimlogic.co.uk> 155Contact: Liam Girdwood <lrg@slimlogic.co.uk>
149Description: 156Description:
150 Each regulator directory will contain a field called 157 Some regulator directories will contain a field called
151 max_microamps. This holds the maximum safe working regulator 158 max_microamps. This holds the maximum safe working regulator
152 output current limit setting for this domain measured in 159 output current limit setting for this domain measured in
153 microamps. 160 microamps, for regulators which support current constraints.
154 161
155 NOTE: this will return the string 'constraint not defined' if 162 NOTE: this will return the string 'constraint not defined' if
156 the power domain has no max microamps constraint defined by 163 the power domain has no max microamps constraint defined by
@@ -185,7 +192,7 @@ Date: April 2008
185KernelVersion: 2.6.26 192KernelVersion: 2.6.26
186Contact: Liam Girdwood <lrg@slimlogic.co.uk> 193Contact: Liam Girdwood <lrg@slimlogic.co.uk>
187Description: 194Description:
188 Each regulator directory will contain a field called 195 Some regulator directories will contain a field called
189 requested_microamps. This holds the total requested load 196 requested_microamps. This holds the total requested load
190 current in microamps for this regulator from all its consumer 197 current in microamps for this regulator from all its consumer
191 devices. 198 devices.
@@ -204,125 +211,102 @@ Date: May 2008
204KernelVersion: 2.6.26 211KernelVersion: 2.6.26
205Contact: Liam Girdwood <lrg@slimlogic.co.uk> 212Contact: Liam Girdwood <lrg@slimlogic.co.uk>
206Description: 213Description:
207 Each regulator directory will contain a field called 214 Some regulator directories will contain a field called
208 suspend_mem_microvolts. This holds the regulator output 215 suspend_mem_microvolts. This holds the regulator output
209 voltage setting for this domain measured in microvolts when 216 voltage setting for this domain measured in microvolts when
210 the system is suspended to memory. 217 the system is suspended to memory, for voltage regulators
211 218 implementing suspend voltage configuration constraints.
212 NOTE: this will return the string 'not defined' if
213 the power domain has no suspend to memory voltage defined by
214 platform code.
215 219
216What: /sys/class/regulator/.../suspend_disk_microvolts 220What: /sys/class/regulator/.../suspend_disk_microvolts
217Date: May 2008 221Date: May 2008
218KernelVersion: 2.6.26 222KernelVersion: 2.6.26
219Contact: Liam Girdwood <lrg@slimlogic.co.uk> 223Contact: Liam Girdwood <lrg@slimlogic.co.uk>
220Description: 224Description:
221 Each regulator directory will contain a field called 225 Some regulator directories will contain a field called
222 suspend_disk_microvolts. This holds the regulator output 226 suspend_disk_microvolts. This holds the regulator output
223 voltage setting for this domain measured in microvolts when 227 voltage setting for this domain measured in microvolts when
224 the system is suspended to disk. 228 the system is suspended to disk, for voltage regulators
225 229 implementing suspend voltage configuration constraints.
226 NOTE: this will return the string 'not defined' if
227 the power domain has no suspend to disk voltage defined by
228 platform code.
229 230
230What: /sys/class/regulator/.../suspend_standby_microvolts 231What: /sys/class/regulator/.../suspend_standby_microvolts
231Date: May 2008 232Date: May 2008
232KernelVersion: 2.6.26 233KernelVersion: 2.6.26
233Contact: Liam Girdwood <lrg@slimlogic.co.uk> 234Contact: Liam Girdwood <lrg@slimlogic.co.uk>
234Description: 235Description:
235 Each regulator directory will contain a field called 236 Some regulator directories will contain a field called
236 suspend_standby_microvolts. This holds the regulator output 237 suspend_standby_microvolts. This holds the regulator output
237 voltage setting for this domain measured in microvolts when 238 voltage setting for this domain measured in microvolts when
238 the system is suspended to standby. 239 the system is suspended to standby, for voltage regulators
239 240 implementing suspend voltage configuration constraints.
240 NOTE: this will return the string 'not defined' if
241 the power domain has no suspend to standby voltage defined by
242 platform code.
243 241
244What: /sys/class/regulator/.../suspend_mem_mode 242What: /sys/class/regulator/.../suspend_mem_mode
245Date: May 2008 243Date: May 2008
246KernelVersion: 2.6.26 244KernelVersion: 2.6.26
247Contact: Liam Girdwood <lrg@slimlogic.co.uk> 245Contact: Liam Girdwood <lrg@slimlogic.co.uk>
248Description: 246Description:
249 Each regulator directory will contain a field called 247 Some regulator directories will contain a field called
250 suspend_mem_mode. This holds the regulator operating mode 248 suspend_mem_mode. This holds the regulator operating mode
251 setting for this domain when the system is suspended to 249 setting for this domain when the system is suspended to
252 memory. 250 memory, for regulators implementing suspend mode
253 251 configuration constraints.
254 NOTE: this will return the string 'not defined' if
255 the power domain has no suspend to memory mode defined by
256 platform code.
257 252
258What: /sys/class/regulator/.../suspend_disk_mode 253What: /sys/class/regulator/.../suspend_disk_mode
259Date: May 2008 254Date: May 2008
260KernelVersion: 2.6.26 255KernelVersion: 2.6.26
261Contact: Liam Girdwood <lrg@slimlogic.co.uk> 256Contact: Liam Girdwood <lrg@slimlogic.co.uk>
262Description: 257Description:
263 Each regulator directory will contain a field called 258 Some regulator directories will contain a field called
264 suspend_disk_mode. This holds the regulator operating mode 259 suspend_disk_mode. This holds the regulator operating mode
265 setting for this domain when the system is suspended to disk. 260 setting for this domain when the system is suspended to disk,
266 261 for regulators implementing suspend mode configuration
267 NOTE: this will return the string 'not defined' if 262 constraints.
268 the power domain has no suspend to disk mode defined by
269 platform code.
270 263
271What: /sys/class/regulator/.../suspend_standby_mode 264What: /sys/class/regulator/.../suspend_standby_mode
272Date: May 2008 265Date: May 2008
273KernelVersion: 2.6.26 266KernelVersion: 2.6.26
274Contact: Liam Girdwood <lrg@slimlogic.co.uk> 267Contact: Liam Girdwood <lrg@slimlogic.co.uk>
275Description: 268Description:
276 Each regulator directory will contain a field called 269 Some regulator directories will contain a field called
277 suspend_standby_mode. This holds the regulator operating mode 270 suspend_standby_mode. This holds the regulator operating mode
278 setting for this domain when the system is suspended to 271 setting for this domain when the system is suspended to
279 standby. 272 standby, for regulators implementing suspend mode
280 273 configuration constraints.
281 NOTE: this will return the string 'not defined' if
282 the power domain has no suspend to standby mode defined by
283 platform code.
284 274
285What: /sys/class/regulator/.../suspend_mem_state 275What: /sys/class/regulator/.../suspend_mem_state
286Date: May 2008 276Date: May 2008
287KernelVersion: 2.6.26 277KernelVersion: 2.6.26
288Contact: Liam Girdwood <lrg@slimlogic.co.uk> 278Contact: Liam Girdwood <lrg@slimlogic.co.uk>
289Description: 279Description:
290 Each regulator directory will contain a field called 280 Some regulator directories will contain a field called
291 suspend_mem_state. This holds the regulator operating state 281 suspend_mem_state. This holds the regulator operating state
292 when suspended to memory. 282 when suspended to memory, for regulators implementing suspend
293 283 configuration constraints.
294 This will be one of the following strings:
295 284
296 'enabled' 285 This will be one of the same strings reported by
297 'disabled' 286 the "state" attribute.
298 'not defined'
299 287
300What: /sys/class/regulator/.../suspend_disk_state 288What: /sys/class/regulator/.../suspend_disk_state
301Date: May 2008 289Date: May 2008
302KernelVersion: 2.6.26 290KernelVersion: 2.6.26
303Contact: Liam Girdwood <lrg@slimlogic.co.uk> 291Contact: Liam Girdwood <lrg@slimlogic.co.uk>
304Description: 292Description:
305 Each regulator directory will contain a field called 293 Some regulator directories will contain a field called
306 suspend_disk_state. This holds the regulator operating state 294 suspend_disk_state. This holds the regulator operating state
307 when suspended to disk. 295 when suspended to disk, for regulators implementing
308 296 suspend configuration constraints.
309 This will be one of the following strings:
310 297
311 'enabled' 298 This will be one of the same strings reported by
312 'disabled' 299 the "state" attribute.
313 'not defined'
314 300
315What: /sys/class/regulator/.../suspend_standby_state 301What: /sys/class/regulator/.../suspend_standby_state
316Date: May 2008 302Date: May 2008
317KernelVersion: 2.6.26 303KernelVersion: 2.6.26
318Contact: Liam Girdwood <lrg@slimlogic.co.uk> 304Contact: Liam Girdwood <lrg@slimlogic.co.uk>
319Description: 305Description:
320 Each regulator directory will contain a field called 306 Some regulator directories will contain a field called
321 suspend_standby_state. This holds the regulator operating 307 suspend_standby_state. This holds the regulator operating
322 state when suspended to standby. 308 state when suspended to standby, for regulators implementing
323 309 suspend configuration constraints.
324 This will be one of the following strings:
325 310
326 'enabled' 311 This will be one of the same strings reported by
327 'disabled' 312 the "state" attribute.
328 'not defined'
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 0a08126d3094..dc3154e49279 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -12,7 +12,7 @@ DOCBOOKS := z8530book.xml mcabook.xml \
12 kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \ 12 kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
13 gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \ 13 gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
14 genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \ 14 genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
15 mac80211.xml debugobjects.xml sh.xml 15 mac80211.xml debugobjects.xml sh.xml regulator.xml
16 16
17### 17###
18# The build process is as follows (targets): 18# The build process is as follows (targets):
diff --git a/Documentation/DocBook/regulator.tmpl b/Documentation/DocBook/regulator.tmpl
new file mode 100644
index 000000000000..53f4f8d3b810
--- /dev/null
+++ b/Documentation/DocBook/regulator.tmpl
@@ -0,0 +1,304 @@
1<?xml version="1.0" encoding="UTF-8"?>
2<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
3 "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
4
5<book id="regulator-api">
6 <bookinfo>
7 <title>Voltage and current regulator API</title>
8
9 <authorgroup>
10 <author>
11 <firstname>Liam</firstname>
12 <surname>Girdwood</surname>
13 <affiliation>
14 <address>
15 <email>lrg@slimlogic.co.uk</email>
16 </address>
17 </affiliation>
18 </author>
19 <author>
20 <firstname>Mark</firstname>
21 <surname>Brown</surname>
22 <affiliation>
23 <orgname>Wolfson Microelectronics</orgname>
24 <address>
25 <email>broonie@opensource.wolfsonmicro.com</email>
26 </address>
27 </affiliation>
28 </author>
29 </authorgroup>
30
31 <copyright>
32 <year>2007-2008</year>
33 <holder>Wolfson Microelectronics</holder>
34 </copyright>
35 <copyright>
36 <year>2008</year>
37 <holder>Liam Girdwood</holder>
38 </copyright>
39
40 <legalnotice>
41 <para>
42 This documentation is free software; you can redistribute
43 it and/or modify it under the terms of the GNU General Public
44 License version 2 as published by the Free Software Foundation.
45 </para>
46
47 <para>
48 This program is distributed in the hope that it will be
49 useful, but WITHOUT ANY WARRANTY; without even the implied
50 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
51 See the GNU General Public License for more details.
52 </para>
53
54 <para>
55 You should have received a copy of the GNU General Public
56 License along with this program; if not, write to the Free
57 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
58 MA 02111-1307 USA
59 </para>
60
61 <para>
62 For more details see the file COPYING in the source
63 distribution of Linux.
64 </para>
65 </legalnotice>
66 </bookinfo>
67
68<toc></toc>
69
70 <chapter id="intro">
71 <title>Introduction</title>
72 <para>
73 This framework is designed to provide a standard kernel
74 interface to control voltage and current regulators.
75 </para>
76 <para>
77 The intention is to allow systems to dynamically control
78 regulator power output in order to save power and prolong
79 battery life. This applies to both voltage regulators (where
80 voltage output is controllable) and current sinks (where current
81 limit is controllable).
82 </para>
83 <para>
84 Note that additional (and currently more complete) documentation
85 is available in the Linux kernel source under
86 <filename>Documentation/power/regulator</filename>.
87 </para>
88
89 <sect1 id="glossary">
90 <title>Glossary</title>
91 <para>
92 The regulator API uses a number of terms which may not be
93 familiar:
94 </para>
95 <glossary>
96
97 <glossentry>
98 <glossterm>Regulator</glossterm>
99 <glossdef>
100 <para>
101 Electronic device that supplies power to other devices. Most
102 regulators can enable and disable their output and some can also
103 control their output voltage or current.
104 </para>
105 </glossdef>
106 </glossentry>
107
108 <glossentry>
109 <glossterm>Consumer</glossterm>
110 <glossdef>
111 <para>
112 Electronic device which consumes power provided by a regulator.
113 These may either be static, requiring only a fixed supply, or
114 dynamic, requiring active management of the regulator at
115 runtime.
116 </para>
117 </glossdef>
118 </glossentry>
119
120 <glossentry>
121 <glossterm>Power Domain</glossterm>
122 <glossdef>
123 <para>
124 The electronic circuit supplied by a given regulator, including
125 the regulator and all consumer devices. The configuration of
126 the regulator is shared between all the components in the
127 circuit.
128 </para>
129 </glossdef>
130 </glossentry>
131
132 <glossentry>
133 <glossterm>Power Management Integrated Circuit</glossterm>
134 <acronym>PMIC</acronym>
135 <glossdef>
136 <para>
137 An IC which contains numerous regulators and often also other
138 subsystems. In an embedded system the primary PMIC is often
139 equivalent to a combination of the PSU and southbridge in a
140 desktop system.
141 </para>
142 </glossdef>
143 </glossentry>
144 </glossary>
145 </sect1>
146 </chapter>
147
148 <chapter id="consumer">
149 <title>Consumer driver interface</title>
150 <para>
151 This offers a similar API to the kernel clock framework.
152 Consumer drivers use <link
153 linkend='API-regulator-get'>get</link> and <link
154 linkend='API-regulator-put'>put</link> operations to acquire and
155 release regulators. Functions are
156 provided to <link linkend='API-regulator-enable'>enable</link>
157 and <link linkend='API-regulator-disable'>disable</link> the
158 reguator and to get and set the runtime parameters of the
159 regulator.
160 </para>
161 <para>
162 When requesting regulators consumers use symbolic names for their
163 supplies, such as "Vcc", which are mapped into actual regulator
164 devices by the machine interface.
165 </para>
166 <para>
167 A stub version of this API is provided when the regulator
168 framework is not in use in order to minimise the need to use
169 ifdefs.
170 </para>
171
172 <sect1 id="consumer-enable">
173 <title>Enabling and disabling</title>
174 <para>
175 The regulator API provides reference counted enabling and
176 disabling of regulators. Consumer devices use the <function><link
177 linkend='API-regulator-enable'>regulator_enable</link></function>
178 and <function><link
179 linkend='API-regulator-disable'>regulator_disable</link>
180 </function> functions to enable and disable regulators. Calls
181 to the two functions must be balanced.
182 </para>
183 <para>
184 Note that since multiple consumers may be using a regulator and
185 machine constraints may not allow the regulator to be disabled
186 there is no guarantee that calling
187 <function>regulator_disable</function> will actually cause the
188 supply provided by the regulator to be disabled. Consumer
189 drivers should assume that the regulator may be enabled at all
190 times.
191 </para>
192 </sect1>
193
194 <sect1 id="consumer-config">
195 <title>Configuration</title>
196 <para>
197 Some consumer devices may need to be able to dynamically
198 configure their supplies. For example, MMC drivers may need to
199 select the correct operating voltage for their cards. This may
200 be done while the regulator is enabled or disabled.
201 </para>
202 <para>
203 The <function><link
204 linkend='API-regulator-set-voltage'>regulator_set_voltage</link>
205 </function> and <function><link
206 linkend='API-regulator-set-current-limit'
207 >regulator_set_current_limit</link>
208 </function> functions provide the primary interface for this.
209 Both take ranges of voltages and currents, supporting drivers
210 that do not require a specific value (eg, CPU frequency scaling
211 normally permits the CPU to use a wider range of supply
212 voltages at lower frequencies but does not require that the
213 supply voltage be lowered). Where an exact value is required
214 both minimum and maximum values should be identical.
215 </para>
216 </sect1>
217
218 <sect1 id="consumer-callback">
219 <title>Callbacks</title>
220 <para>
221 Callbacks may also be <link
222 linkend='API-regulator-register-notifier'>registered</link>
223 for events such as regulation failures.
224 </para>
225 </sect1>
226 </chapter>
227
228 <chapter id="driver">
229 <title>Regulator driver interface</title>
230 <para>
231 Drivers for regulator chips <link
232 linkend='API-regulator-register'>register</link> the regulators
233 with the regulator core, providing operations structures to the
234 core. A <link
235 linkend='API-regulator-notifier-call-chain'>notifier</link> interface
236 allows error conditions to be reported to the core.
237 </para>
238 <para>
239 Registration should be triggered by explicit setup done by the
240 platform, supplying a <link
241 linkend='API-struct-regulator-init-data'>struct
242 regulator_init_data</link> for the regulator containing
243 <link linkend='machine-constraint'>constraint</link> and
244 <link linkend='machine-supply'>supply</link> information.
245 </para>
246 </chapter>
247
248 <chapter id="machine">
249 <title>Machine interface</title>
250 <para>
251 This interface provides a way to define how regulators are
252 connected to consumers on a given system and what the valid
253 operating parameters are for the system.
254 </para>
255
256 <sect1 id="machine-supply">
257 <title>Supplies</title>
258 <para>
259 Regulator supplies are specified using <link
260 linkend='API-struct-regulator-consumer-supply'>struct
261 regulator_consumer_supply</link>. This is done at
262 <link linkend='driver'>driver registration
263 time</link> as part of the machine constraints.
264 </para>
265 </sect1>
266
267 <sect1 id="machine-constraint">
268 <title>Constraints</title>
269 <para>
270 As well as definining the connections the machine interface
271 also provides constraints definining the operations that
272 clients are allowed to perform and the parameters that may be
273 set. This is required since generally regulator devices will
274 offer more flexibility than it is safe to use on a given
275 system, for example supporting higher supply voltages than the
276 consumers are rated for.
277 </para>
278 <para>
279 This is done at <link linkend='driver'>driver
280 registration time</link> by providing a <link
281 linkend='API-struct-regulation-constraints'>struct
282 regulation_constraints</link>.
283 </para>
284 <para>
285 The constraints may also specify an initial configuration for the
286 regulator in the constraints, which is particularly useful for
287 use with static consumers.
288 </para>
289 </sect1>
290 </chapter>
291
292 <chapter id="api">
293 <title>API reference</title>
294 <para>
295 Due to limitations of the kernel documentation framework and the
296 existing layout of the source code the entire regulator API is
297 documented here.
298 </para>
299!Iinclude/linux/regulator/consumer.h
300!Iinclude/linux/regulator/machine.h
301!Iinclude/linux/regulator/driver.h
302!Edrivers/regulator/core.c
303 </chapter>
304</book>
diff --git a/Documentation/RCU/00-INDEX b/Documentation/RCU/00-INDEX
index 7dc0695a8f90..9bb62f7b89c3 100644
--- a/Documentation/RCU/00-INDEX
+++ b/Documentation/RCU/00-INDEX
@@ -12,6 +12,8 @@ rcuref.txt
12 - Reference-count design for elements of lists/arrays protected by RCU 12 - Reference-count design for elements of lists/arrays protected by RCU
13rcu.txt 13rcu.txt
14 - RCU Concepts 14 - RCU Concepts
15rcubarrier.txt
16 - Unloading modules that use RCU callbacks
15RTFP.txt 17RTFP.txt
16 - List of RCU papers (bibliography) going back to 1980. 18 - List of RCU papers (bibliography) going back to 1980.
17torture.txt 19torture.txt
diff --git a/Documentation/RCU/rcubarrier.txt b/Documentation/RCU/rcubarrier.txt
new file mode 100644
index 000000000000..909602d409bb
--- /dev/null
+++ b/Documentation/RCU/rcubarrier.txt
@@ -0,0 +1,304 @@
1RCU and Unloadable Modules
2
3[Originally published in LWN Jan. 14, 2007: http://lwn.net/Articles/217484/]
4
5RCU (read-copy update) is a synchronization mechanism that can be thought
6of as a replacement for read-writer locking (among other things), but with
7very low-overhead readers that are immune to deadlock, priority inversion,
8and unbounded latency. RCU read-side critical sections are delimited
9by rcu_read_lock() and rcu_read_unlock(), which, in non-CONFIG_PREEMPT
10kernels, generate no code whatsoever.
11
12This means that RCU writers are unaware of the presence of concurrent
13readers, so that RCU updates to shared data must be undertaken quite
14carefully, leaving an old version of the data structure in place until all
15pre-existing readers have finished. These old versions are needed because
16such readers might hold a reference to them. RCU updates can therefore be
17rather expensive, and RCU is thus best suited for read-mostly situations.
18
19How can an RCU writer possibly determine when all readers are finished,
20given that readers might well leave absolutely no trace of their
21presence? There is a synchronize_rcu() primitive that blocks until all
22pre-existing readers have completed. An updater wishing to delete an
23element p from a linked list might do the following, while holding an
24appropriate lock, of course:
25
26 list_del_rcu(p);
27 synchronize_rcu();
28 kfree(p);
29
30But the above code cannot be used in IRQ context -- the call_rcu()
31primitive must be used instead. This primitive takes a pointer to an
32rcu_head struct placed within the RCU-protected data structure and
33another pointer to a function that may be invoked later to free that
34structure. Code to delete an element p from the linked list from IRQ
35context might then be as follows:
36
37 list_del_rcu(p);
38 call_rcu(&p->rcu, p_callback);
39
40Since call_rcu() never blocks, this code can safely be used from within
41IRQ context. The function p_callback() might be defined as follows:
42
43 static void p_callback(struct rcu_head *rp)
44 {
45 struct pstruct *p = container_of(rp, struct pstruct, rcu);
46
47 kfree(p);
48 }
49
50
51Unloading Modules That Use call_rcu()
52
53But what if p_callback is defined in an unloadable module?
54
55If we unload the module while some RCU callbacks are pending,
56the CPUs executing these callbacks are going to be severely
57disappointed when they are later invoked, as fancifully depicted at
58http://lwn.net/images/ns/kernel/rcu-drop.jpg.
59
60We could try placing a synchronize_rcu() in the module-exit code path,
61but this is not sufficient. Although synchronize_rcu() does wait for a
62grace period to elapse, it does not wait for the callbacks to complete.
63
64One might be tempted to try several back-to-back synchronize_rcu()
65calls, but this is still not guaranteed to work. If there is a very
66heavy RCU-callback load, then some of the callbacks might be deferred
67in order to allow other processing to proceed. Such deferral is required
68in realtime kernels in order to avoid excessive scheduling latencies.
69
70
71rcu_barrier()
72
73We instead need the rcu_barrier() primitive. This primitive is similar
74to synchronize_rcu(), but instead of waiting solely for a grace
75period to elapse, it also waits for all outstanding RCU callbacks to
76complete. Pseudo-code using rcu_barrier() is as follows:
77
78 1. Prevent any new RCU callbacks from being posted.
79 2. Execute rcu_barrier().
80 3. Allow the module to be unloaded.
81
82Quick Quiz #1: Why is there no srcu_barrier()?
83
84The rcutorture module makes use of rcu_barrier in its exit function
85as follows:
86
87 1 static void
88 2 rcu_torture_cleanup(void)
89 3 {
90 4 int i;
91 5
92 6 fullstop = 1;
93 7 if (shuffler_task != NULL) {
94 8 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
95 9 kthread_stop(shuffler_task);
9610 }
9711 shuffler_task = NULL;
9812
9913 if (writer_task != NULL) {
10014 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
10115 kthread_stop(writer_task);
10216 }
10317 writer_task = NULL;
10418
10519 if (reader_tasks != NULL) {
10620 for (i = 0; i < nrealreaders; i++) {
10721 if (reader_tasks[i] != NULL) {
10822 VERBOSE_PRINTK_STRING(
10923 "Stopping rcu_torture_reader task");
11024 kthread_stop(reader_tasks[i]);
11125 }
11226 reader_tasks[i] = NULL;
11327 }
11428 kfree(reader_tasks);
11529 reader_tasks = NULL;
11630 }
11731 rcu_torture_current = NULL;
11832
11933 if (fakewriter_tasks != NULL) {
12034 for (i = 0; i < nfakewriters; i++) {
12135 if (fakewriter_tasks[i] != NULL) {
12236 VERBOSE_PRINTK_STRING(
12337 "Stopping rcu_torture_fakewriter task");
12438 kthread_stop(fakewriter_tasks[i]);
12539 }
12640 fakewriter_tasks[i] = NULL;
12741 }
12842 kfree(fakewriter_tasks);
12943 fakewriter_tasks = NULL;
13044 }
13145
13246 if (stats_task != NULL) {
13347 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
13448 kthread_stop(stats_task);
13549 }
13650 stats_task = NULL;
13751
13852 /* Wait for all RCU callbacks to fire. */
13953 rcu_barrier();
14054
14155 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
14256
14357 if (cur_ops->cleanup != NULL)
14458 cur_ops->cleanup();
14559 if (atomic_read(&n_rcu_torture_error))
14660 rcu_torture_print_module_parms("End of test: FAILURE");
14761 else
14862 rcu_torture_print_module_parms("End of test: SUCCESS");
14963 }
150
151Line 6 sets a global variable that prevents any RCU callbacks from
152re-posting themselves. This will not be necessary in most cases, since
153RCU callbacks rarely include calls to call_rcu(). However, the rcutorture
154module is an exception to this rule, and therefore needs to set this
155global variable.
156
157Lines 7-50 stop all the kernel tasks associated with the rcutorture
158module. Therefore, once execution reaches line 53, no more rcutorture
159RCU callbacks will be posted. The rcu_barrier() call on line 53 waits
160for any pre-existing callbacks to complete.
161
162Then lines 55-62 print status and do operation-specific cleanup, and
163then return, permitting the module-unload operation to be completed.
164
165Quick Quiz #2: Is there any other situation where rcu_barrier() might
166 be required?
167
168Your module might have additional complications. For example, if your
169module invokes call_rcu() from timers, you will need to first cancel all
170the timers, and only then invoke rcu_barrier() to wait for any remaining
171RCU callbacks to complete.
172
173
174Implementing rcu_barrier()
175
176Dipankar Sarma's implementation of rcu_barrier() makes use of the fact
177that RCU callbacks are never reordered once queued on one of the per-CPU
178queues. His implementation queues an RCU callback on each of the per-CPU
179callback queues, and then waits until they have all started executing, at
180which point, all earlier RCU callbacks are guaranteed to have completed.
181
182The original code for rcu_barrier() was as follows:
183
184 1 void rcu_barrier(void)
185 2 {
186 3 BUG_ON(in_interrupt());
187 4 /* Take cpucontrol mutex to protect against CPU hotplug */
188 5 mutex_lock(&rcu_barrier_mutex);
189 6 init_completion(&rcu_barrier_completion);
190 7 atomic_set(&rcu_barrier_cpu_count, 0);
191 8 on_each_cpu(rcu_barrier_func, NULL, 0, 1);
192 9 wait_for_completion(&rcu_barrier_completion);
19310 mutex_unlock(&rcu_barrier_mutex);
19411 }
195
196Line 3 verifies that the caller is in process context, and lines 5 and 10
197use rcu_barrier_mutex to ensure that only one rcu_barrier() is using the
198global completion and counters at a time, which are initialized on lines
1996 and 7. Line 8 causes each CPU to invoke rcu_barrier_func(), which is
200shown below. Note that the final "1" in on_each_cpu()'s argument list
201ensures that all the calls to rcu_barrier_func() will have completed
202before on_each_cpu() returns. Line 9 then waits for the completion.
203
204This code was rewritten in 2008 to support rcu_barrier_bh() and
205rcu_barrier_sched() in addition to the original rcu_barrier().
206
207The rcu_barrier_func() runs on each CPU, where it invokes call_rcu()
208to post an RCU callback, as follows:
209
210 1 static void rcu_barrier_func(void *notused)
211 2 {
212 3 int cpu = smp_processor_id();
213 4 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
214 5 struct rcu_head *head;
215 6
216 7 head = &rdp->barrier;
217 8 atomic_inc(&rcu_barrier_cpu_count);
218 9 call_rcu(head, rcu_barrier_callback);
21910 }
220
221Lines 3 and 4 locate RCU's internal per-CPU rcu_data structure,
222which contains the struct rcu_head that needed for the later call to
223call_rcu(). Line 7 picks up a pointer to this struct rcu_head, and line
2248 increments a global counter. This counter will later be decremented
225by the callback. Line 9 then registers the rcu_barrier_callback() on
226the current CPU's queue.
227
228The rcu_barrier_callback() function simply atomically decrements the
229rcu_barrier_cpu_count variable and finalizes the completion when it
230reaches zero, as follows:
231
232 1 static void rcu_barrier_callback(struct rcu_head *notused)
233 2 {
234 3 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
235 4 complete(&rcu_barrier_completion);
236 5 }
237
238Quick Quiz #3: What happens if CPU 0's rcu_barrier_func() executes
239 immediately (thus incrementing rcu_barrier_cpu_count to the
240 value one), but the other CPU's rcu_barrier_func() invocations
241 are delayed for a full grace period? Couldn't this result in
242 rcu_barrier() returning prematurely?
243
244
245rcu_barrier() Summary
246
247The rcu_barrier() primitive has seen relatively little use, since most
248code using RCU is in the core kernel rather than in modules. However, if
249you are using RCU from an unloadable module, you need to use rcu_barrier()
250so that your module may be safely unloaded.
251
252
253Answers to Quick Quizzes
254
255Quick Quiz #1: Why is there no srcu_barrier()?
256
257Answer: Since there is no call_srcu(), there can be no outstanding SRCU
258 callbacks. Therefore, there is no need to wait for them.
259
260Quick Quiz #2: Is there any other situation where rcu_barrier() might
261 be required?
262
263Answer: Interestingly enough, rcu_barrier() was not originally
264 implemented for module unloading. Nikita Danilov was using
265 RCU in a filesystem, which resulted in a similar situation at
266 filesystem-unmount time. Dipankar Sarma coded up rcu_barrier()
267 in response, so that Nikita could invoke it during the
268 filesystem-unmount process.
269
270 Much later, yours truly hit the RCU module-unload problem when
271 implementing rcutorture, and found that rcu_barrier() solves
272 this problem as well.
273
274Quick Quiz #3: What happens if CPU 0's rcu_barrier_func() executes
275 immediately (thus incrementing rcu_barrier_cpu_count to the
276 value one), but the other CPU's rcu_barrier_func() invocations
277 are delayed for a full grace period? Couldn't this result in
278 rcu_barrier() returning prematurely?
279
280Answer: This cannot happen. The reason is that on_each_cpu() has its last
281 argument, the wait flag, set to "1". This flag is passed through
282 to smp_call_function() and further to smp_call_function_on_cpu(),
283 causing this latter to spin until the cross-CPU invocation of
284 rcu_barrier_func() has completed. This by itself would prevent
285 a grace period from completing on non-CONFIG_PREEMPT kernels,
286 since each CPU must undergo a context switch (or other quiescent
287 state) before the grace period can complete. However, this is
288 of no use in CONFIG_PREEMPT kernels.
289
290 Therefore, on_each_cpu() disables preemption across its call
291 to smp_call_function() and also across the local call to
292 rcu_barrier_func(). This prevents the local CPU from context
293 switching, again preventing grace periods from completing. This
294 means that all CPUs have executed rcu_barrier_func() before
295 the first rcu_barrier_callback() can possibly execute, in turn
296 preventing rcu_barrier_cpu_count from prematurely reaching zero.
297
298 Currently, -rt implementations of RCU keep but a single global
299 queue for RCU callbacks, and thus do not suffer from this
300 problem. However, when the -rt RCU eventually does have per-CPU
301 callback queues, things will have to change. One simple change
302 is to add an rcu_read_lock() before line 8 of rcu_barrier()
303 and an rcu_read_unlock() after line 8 of this same function. If
304 you can think of a better change, please let me know!
diff --git a/Documentation/bad_memory.txt b/Documentation/bad_memory.txt
new file mode 100644
index 000000000000..df8416213202
--- /dev/null
+++ b/Documentation/bad_memory.txt
@@ -0,0 +1,45 @@
1March 2008
2Jan-Simon Moeller, dl9pf@gmx.de
3
4
5How to deal with bad memory e.g. reported by memtest86+ ?
6#########################################################
7
8There are three possibilities I know of:
9
101) Reinsert/swap the memory modules
11
122) Buy new modules (best!) or try to exchange the memory
13 if you have spare-parts
14
153) Use BadRAM or memmap
16
17This Howto is about number 3) .
18
19
20BadRAM
21######
22BadRAM is the actively developed and available as kernel-patch
23here: http://rick.vanrein.org/linux/badram/
24
25For more details see the BadRAM documentation.
26
27memmap
28######
29
30memmap is already in the kernel and usable as kernel-parameter at
31boot-time. Its syntax is slightly strange and you may need to
32calculate the values by yourself!
33
34Syntax to exclude a memory area (see kernel-parameters.txt for details):
35memmap=<size>$<address>
36
37Example: memtest86+ reported here errors at address 0x18691458, 0x18698424 and
38 some others. All had 0x1869xxxx in common, so I chose a pattern of
39 0x18690000,0xffff0000.
40
41With the numbers of the example above:
42memmap=64K$0x18690000
43 or
44memmap=0x10000$0x18690000
45
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index d9014aa0eb68..e33ee74eee77 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -227,7 +227,6 @@ Each cgroup is represented by a directory in the cgroup file system
227containing the following files describing that cgroup: 227containing the following files describing that cgroup:
228 228
229 - tasks: list of tasks (by pid) attached to that cgroup 229 - tasks: list of tasks (by pid) attached to that cgroup
230 - releasable flag: cgroup currently removeable?
231 - notify_on_release flag: run the release agent on exit? 230 - notify_on_release flag: run the release agent on exit?
232 - release_agent: the path to use for release notifications (this file 231 - release_agent: the path to use for release notifications (this file
233 exists in the top cgroup only) 232 exists in the top cgroup only)
@@ -360,7 +359,7 @@ Now you want to do something with this cgroup.
360 359
361In this directory you can find several files: 360In this directory you can find several files:
362# ls 361# ls
363notify_on_release releasable tasks 362notify_on_release tasks
364(plus whatever files added by the attached subsystems) 363(plus whatever files added by the attached subsystems)
365 364
366Now attach your shell to this cgroup: 365Now attach your shell to this cgroup:
@@ -479,7 +478,6 @@ newly-created cgroup if an error occurs after this subsystem's
479create() method has been called for the new cgroup). 478create() method has been called for the new cgroup).
480 479
481void pre_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp); 480void pre_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
482(cgroup_mutex held by caller)
483 481
484Called before checking the reference count on each subsystem. This may 482Called before checking the reference count on each subsystem. This may
485be useful for subsystems which have some extra references even if 483be useful for subsystems which have some extra references even if
@@ -498,6 +496,7 @@ remain valid while the caller holds cgroup_mutex.
498 496
499void attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 497void attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
500 struct cgroup *old_cgrp, struct task_struct *task) 498 struct cgroup *old_cgrp, struct task_struct *task)
499(cgroup_mutex held by caller)
501 500
502Called after the task has been attached to the cgroup, to allow any 501Called after the task has been attached to the cgroup, to allow any
503post-attachment activity that requires memory allocations or blocking. 502post-attachment activity that requires memory allocations or blocking.
@@ -511,6 +510,7 @@ void exit(struct cgroup_subsys *ss, struct task_struct *task)
511Called during task exit. 510Called during task exit.
512 511
513int populate(struct cgroup_subsys *ss, struct cgroup *cgrp) 512int populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
513(cgroup_mutex held by caller)
514 514
515Called after creation of a cgroup to allow a subsystem to populate 515Called after creation of a cgroup to allow a subsystem to populate
516the cgroup directory with file entries. The subsystem should make 516the cgroup directory with file entries. The subsystem should make
@@ -520,6 +520,7 @@ method can return an error code, the error code is currently not
520always handled well. 520always handled well.
521 521
522void post_clone(struct cgroup_subsys *ss, struct cgroup *cgrp) 522void post_clone(struct cgroup_subsys *ss, struct cgroup *cgrp)
523(cgroup_mutex held by caller)
523 524
524Called at the end of cgroup_clone() to do any paramater 525Called at the end of cgroup_clone() to do any paramater
525initialization which might be required before a task could attach. For 526initialization which might be required before a task could attach. For
@@ -527,7 +528,7 @@ example in cpusets, no task may attach before 'cpus' and 'mems' are set
527up. 528up.
528 529
529void bind(struct cgroup_subsys *ss, struct cgroup *root) 530void bind(struct cgroup_subsys *ss, struct cgroup *root)
530(cgroup_mutex held by caller) 531(cgroup_mutex and ss->hierarchy_mutex held by caller)
531 532
532Called when a cgroup subsystem is rebound to a different hierarchy 533Called when a cgroup subsystem is rebound to a different hierarchy
533and root cgroup. Currently this will only involve movement between 534and root cgroup. Currently this will only involve movement between
diff --git a/Documentation/controllers/memcg_test.txt b/Documentation/controllers/memcg_test.txt
new file mode 100644
index 000000000000..08d4d3ea0d79
--- /dev/null
+++ b/Documentation/controllers/memcg_test.txt
@@ -0,0 +1,342 @@
1Memory Resource Controller(Memcg) Implementation Memo.
2Last Updated: 2008/12/15
3Base Kernel Version: based on 2.6.28-rc8-mm.
4
5Because VM is getting complex (one of reasons is memcg...), memcg's behavior
6is complex. This is a document for memcg's internal behavior.
7Please note that implementation details can be changed.
8
9(*) Topics on API should be in Documentation/controllers/memory.txt)
10
110. How to record usage ?
12 2 objects are used.
13
14 page_cgroup ....an object per page.
15 Allocated at boot or memory hotplug. Freed at memory hot removal.
16
17 swap_cgroup ... an entry per swp_entry.
18 Allocated at swapon(). Freed at swapoff().
19
20 The page_cgroup has USED bit and double count against a page_cgroup never
21 occurs. swap_cgroup is used only when a charged page is swapped-out.
22
231. Charge
24
25 a page/swp_entry may be charged (usage += PAGE_SIZE) at
26
27 mem_cgroup_newpage_charge()
28 Called at new page fault and Copy-On-Write.
29
30 mem_cgroup_try_charge_swapin()
31 Called at do_swap_page() (page fault on swap entry) and swapoff.
32 Followed by charge-commit-cancel protocol. (With swap accounting)
33 At commit, a charge recorded in swap_cgroup is removed.
34
35 mem_cgroup_cache_charge()
36 Called at add_to_page_cache()
37
38 mem_cgroup_cache_charge_swapin()
39 Called at shmem's swapin.
40
41 mem_cgroup_prepare_migration()
42 Called before migration. "extra" charge is done and followed by
43 charge-commit-cancel protocol.
44 At commit, charge against oldpage or newpage will be committed.
45
462. Uncharge
47 a page/swp_entry may be uncharged (usage -= PAGE_SIZE) by
48
49 mem_cgroup_uncharge_page()
50 Called when an anonymous page is fully unmapped. I.e., mapcount goes
51 to 0. If the page is SwapCache, uncharge is delayed until
52 mem_cgroup_uncharge_swapcache().
53
54 mem_cgroup_uncharge_cache_page()
55 Called when a page-cache is deleted from radix-tree. If the page is
56 SwapCache, uncharge is delayed until mem_cgroup_uncharge_swapcache().
57
58 mem_cgroup_uncharge_swapcache()
59 Called when SwapCache is removed from radix-tree. The charge itself
60 is moved to swap_cgroup. (If mem+swap controller is disabled, no
61 charge to swap occurs.)
62
63 mem_cgroup_uncharge_swap()
64 Called when swp_entry's refcnt goes down to 0. A charge against swap
65 disappears.
66
67 mem_cgroup_end_migration(old, new)
68 At success of migration old is uncharged (if necessary), a charge
69 to new page is committed. At failure, charge to old page is committed.
70
713. charge-commit-cancel
72 In some case, we can't know this "charge" is valid or not at charging
73 (because of races).
74 To handle such case, there are charge-commit-cancel functions.
75 mem_cgroup_try_charge_XXX
76 mem_cgroup_commit_charge_XXX
77 mem_cgroup_cancel_charge_XXX
78 these are used in swap-in and migration.
79
80 At try_charge(), there are no flags to say "this page is charged".
81 at this point, usage += PAGE_SIZE.
82
83 At commit(), the function checks the page should be charged or not
84 and set flags or avoid charging.(usage -= PAGE_SIZE)
85
86 At cancel(), simply usage -= PAGE_SIZE.
87
88Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y.
89
904. Anonymous
91 Anonymous page is newly allocated at
92 - page fault into MAP_ANONYMOUS mapping.
93 - Copy-On-Write.
94 It is charged right after it's allocated before doing any page table
95 related operations. Of course, it's uncharged when another page is used
96 for the fault address.
97
98 At freeing anonymous page (by exit() or munmap()), zap_pte() is called
99 and pages for ptes are freed one by one.(see mm/memory.c). Uncharges
100 are done at page_remove_rmap() when page_mapcount() goes down to 0.
101
102 Another page freeing is by page-reclaim (vmscan.c) and anonymous
103 pages are swapped out. In this case, the page is marked as
104 PageSwapCache(). uncharge() routine doesn't uncharge the page marked
105 as SwapCache(). It's delayed until __delete_from_swap_cache().
106
107 4.1 Swap-in.
108 At swap-in, the page is taken from swap-cache. There are 2 cases.
109
110 (a) If the SwapCache is newly allocated and read, it has no charges.
111 (b) If the SwapCache has been mapped by processes, it has been
112 charged already.
113
114 This swap-in is one of the most complicated work. In do_swap_page(),
115 following events occur when pte is unchanged.
116
117 (1) the page (SwapCache) is looked up.
118 (2) lock_page()
119 (3) try_charge_swapin()
120 (4) reuse_swap_page() (may call delete_swap_cache())
121 (5) commit_charge_swapin()
122 (6) swap_free().
123
124 Considering following situation for example.
125
126 (A) The page has not been charged before (2) and reuse_swap_page()
127 doesn't call delete_from_swap_cache().
128 (B) The page has not been charged before (2) and reuse_swap_page()
129 calls delete_from_swap_cache().
130 (C) The page has been charged before (2) and reuse_swap_page() doesn't
131 call delete_from_swap_cache().
132 (D) The page has been charged before (2) and reuse_swap_page() calls
133 delete_from_swap_cache().
134
135 memory.usage/memsw.usage changes to this page/swp_entry will be
136 Case (A) (B) (C) (D)
137 Event
138 Before (2) 0/ 1 0/ 1 1/ 1 1/ 1
139 ===========================================
140 (3) +1/+1 +1/+1 +1/+1 +1/+1
141 (4) - 0/ 0 - -1/ 0
142 (5) 0/-1 0/ 0 -1/-1 0/ 0
143 (6) - 0/-1 - 0/-1
144 ===========================================
145 Result 1/ 1 1/ 1 1/ 1 1/ 1
146
147 In any cases, charges to this page should be 1/ 1.
148
149 4.2 Swap-out.
150 At swap-out, typical state transition is below.
151
152 (a) add to swap cache. (marked as SwapCache)
153 swp_entry's refcnt += 1.
154 (b) fully unmapped.
155 swp_entry's refcnt += # of ptes.
156 (c) write back to swap.
157 (d) delete from swap cache. (remove from SwapCache)
158 swp_entry's refcnt -= 1.
159
160
161 At (b), the page is marked as SwapCache and not uncharged.
162 At (d), the page is removed from SwapCache and a charge in page_cgroup
163 is moved to swap_cgroup.
164
165 Finally, at task exit,
166 (e) zap_pte() is called and swp_entry's refcnt -=1 -> 0.
167 Here, a charge in swap_cgroup disappears.
168
1695. Page Cache
170 Page Cache is charged at
171 - add_to_page_cache_locked().
172
173 uncharged at
174 - __remove_from_page_cache().
175
176 The logic is very clear. (About migration, see below)
177 Note: __remove_from_page_cache() is called by remove_from_page_cache()
178 and __remove_mapping().
179
1806. Shmem(tmpfs) Page Cache
181 Memcg's charge/uncharge have special handlers of shmem. The best way
182 to understand shmem's page state transition is to read mm/shmem.c.
183 But brief explanation of the behavior of memcg around shmem will be
184 helpful to understand the logic.
185
186 Shmem's page (just leaf page, not direct/indirect block) can be on
187 - radix-tree of shmem's inode.
188 - SwapCache.
189 - Both on radix-tree and SwapCache. This happens at swap-in
190 and swap-out,
191
192 It's charged when...
193 - A new page is added to shmem's radix-tree.
194 - A swp page is read. (move a charge from swap_cgroup to page_cgroup)
195 It's uncharged when
196 - A page is removed from radix-tree and not SwapCache.
197 - When SwapCache is removed, a charge is moved to swap_cgroup.
198 - When swp_entry's refcnt goes down to 0, a charge in swap_cgroup
199 disappears.
200
2017. Page Migration
202 One of the most complicated functions is page-migration-handler.
203 Memcg has 2 routines. Assume that we are migrating a page's contents
204 from OLDPAGE to NEWPAGE.
205
206 Usual migration logic is..
207 (a) remove the page from LRU.
208 (b) allocate NEWPAGE (migration target)
209 (c) lock by lock_page().
210 (d) unmap all mappings.
211 (e-1) If necessary, replace entry in radix-tree.
212 (e-2) move contents of a page.
213 (f) map all mappings again.
214 (g) pushback the page to LRU.
215 (-) OLDPAGE will be freed.
216
217 Before (g), memcg should complete all necessary charge/uncharge to
218 NEWPAGE/OLDPAGE.
219
220 The point is....
221 - If OLDPAGE is anonymous, all charges will be dropped at (d) because
222 try_to_unmap() drops all mapcount and the page will not be
223 SwapCache.
224
225 - If OLDPAGE is SwapCache, charges will be kept at (g) because
226 __delete_from_swap_cache() isn't called at (e-1)
227
228 - If OLDPAGE is page-cache, charges will be kept at (g) because
229 remove_from_swap_cache() isn't called at (e-1)
230
231 memcg provides following hooks.
232
233 - mem_cgroup_prepare_migration(OLDPAGE)
234 Called after (b) to account a charge (usage += PAGE_SIZE) against
235 memcg which OLDPAGE belongs to.
236
237 - mem_cgroup_end_migration(OLDPAGE, NEWPAGE)
238 Called after (f) before (g).
239 If OLDPAGE is used, commit OLDPAGE again. If OLDPAGE is already
240 charged, a charge by prepare_migration() is automatically canceled.
241 If NEWPAGE is used, commit NEWPAGE and uncharge OLDPAGE.
242
243 But zap_pte() (by exit or munmap) can be called while migration,
244 we have to check if OLDPAGE/NEWPAGE is a valid page after commit().
245
2468. LRU
247 Each memcg has its own private LRU. Now, it's handling is under global
248 VM's control (means that it's handled under global zone->lru_lock).
249 Almost all routines around memcg's LRU is called by global LRU's
250 list management functions under zone->lru_lock().
251
252 A special function is mem_cgroup_isolate_pages(). This scans
253 memcg's private LRU and call __isolate_lru_page() to extract a page
254 from LRU.
255 (By __isolate_lru_page(), the page is removed from both of global and
256 private LRU.)
257
258
2599. Typical Tests.
260
261 Tests for racy cases.
262
263 9.1 Small limit to memcg.
264 When you do test to do racy case, it's good test to set memcg's limit
265 to be very small rather than GB. Many races found in the test under
266 xKB or xxMB limits.
267 (Memory behavior under GB and Memory behavior under MB shows very
268 different situation.)
269
270 9.2 Shmem
271 Historically, memcg's shmem handling was poor and we saw some amount
272 of troubles here. This is because shmem is page-cache but can be
273 SwapCache. Test with shmem/tmpfs is always good test.
274
275 9.3 Migration
276 For NUMA, migration is an another special case. To do easy test, cpuset
277 is useful. Following is a sample script to do migration.
278
279 mount -t cgroup -o cpuset none /opt/cpuset
280
281 mkdir /opt/cpuset/01
282 echo 1 > /opt/cpuset/01/cpuset.cpus
283 echo 0 > /opt/cpuset/01/cpuset.mems
284 echo 1 > /opt/cpuset/01/cpuset.memory_migrate
285 mkdir /opt/cpuset/02
286 echo 1 > /opt/cpuset/02/cpuset.cpus
287 echo 1 > /opt/cpuset/02/cpuset.mems
288 echo 1 > /opt/cpuset/02/cpuset.memory_migrate
289
290 In above set, when you moves a task from 01 to 02, page migration to
291 node 0 to node 1 will occur. Following is a script to migrate all
292 under cpuset.
293 --
294 move_task()
295 {
296 for pid in $1
297 do
298 /bin/echo $pid >$2/tasks 2>/dev/null
299 echo -n $pid
300 echo -n " "
301 done
302 echo END
303 }
304
305 G1_TASK=`cat ${G1}/tasks`
306 G2_TASK=`cat ${G2}/tasks`
307 move_task "${G1_TASK}" ${G2} &
308 --
309 9.4 Memory hotplug.
310 memory hotplug test is one of good test.
311 to offline memory, do following.
312 # echo offline > /sys/devices/system/memory/memoryXXX/state
313 (XXX is the place of memory)
314 This is an easy way to test page migration, too.
315
316 9.5 mkdir/rmdir
317 When using hierarchy, mkdir/rmdir test should be done.
318 Use tests like the following.
319
320 echo 1 >/opt/cgroup/01/memory/use_hierarchy
321 mkdir /opt/cgroup/01/child_a
322 mkdir /opt/cgroup/01/child_b
323
324 set limit to 01.
325 add limit to 01/child_b
326 run jobs under child_a and child_b
327
328 create/delete following groups at random while jobs are running.
329 /opt/cgroup/01/child_a/child_aa
330 /opt/cgroup/01/child_b/child_bb
331 /opt/cgroup/01/child_c
332
333 running new jobs in new group is also good.
334
335 9.6 Mount with other subsystems.
336 Mounting with other subsystems is a good test because there is a
337 race and lock dependency with other cgroup subsystems.
338
339 example)
340 # mount -t cgroup none /cgroup -t cpuset,memory,cpu,devices
341
342 and do task move, mkdir, rmdir etc...under this.
diff --git a/Documentation/controllers/memory.txt b/Documentation/controllers/memory.txt
index 1c07547d3f81..e1501964df1e 100644
--- a/Documentation/controllers/memory.txt
+++ b/Documentation/controllers/memory.txt
@@ -137,7 +137,32 @@ behind this approach is that a cgroup that aggressively uses a shared
137page will eventually get charged for it (once it is uncharged from 137page will eventually get charged for it (once it is uncharged from
138the cgroup that brought it in -- this will happen on memory pressure). 138the cgroup that brought it in -- this will happen on memory pressure).
139 139
1402.4 Reclaim 140Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used..
141When you do swapoff and make swapped-out pages of shmem(tmpfs) to
142be backed into memory in force, charges for pages are accounted against the
143caller of swapoff rather than the users of shmem.
144
145
1462.4 Swap Extension (CONFIG_CGROUP_MEM_RES_CTLR_SWAP)
147Swap Extension allows you to record charge for swap. A swapped-in page is
148charged back to original page allocator if possible.
149
150When swap is accounted, following files are added.
151 - memory.memsw.usage_in_bytes.
152 - memory.memsw.limit_in_bytes.
153
154usage of mem+swap is limited by memsw.limit_in_bytes.
155
156Note: why 'mem+swap' rather than swap.
157The global LRU(kswapd) can swap out arbitrary pages. Swap-out means
158to move account from memory to swap...there is no change in usage of
159mem+swap.
160
161In other words, when we want to limit the usage of swap without affecting
162global LRU, mem+swap limit is better than just limiting swap from OS point
163of view.
164
1652.5 Reclaim
141 166
142Each cgroup maintains a per cgroup LRU that consists of an active 167Each cgroup maintains a per cgroup LRU that consists of an active
143and inactive list. When a cgroup goes over its limit, we first try 168and inactive list. When a cgroup goes over its limit, we first try
@@ -207,12 +232,6 @@ exceeded.
207The memory.stat file gives accounting information. Now, the number of 232The memory.stat file gives accounting information. Now, the number of
208caches, RSS and Active pages/Inactive pages are shown. 233caches, RSS and Active pages/Inactive pages are shown.
209 234
210The memory.force_empty gives an interface to drop *all* charges by force.
211
212# echo 1 > memory.force_empty
213
214will drop all charges in cgroup. Currently, this is maintained for test.
215
2164. Testing 2354. Testing
217 236
218Balbir posted lmbench, AIM9, LTP and vmmstress results [10] and [11]. 237Balbir posted lmbench, AIM9, LTP and vmmstress results [10] and [11].
@@ -242,10 +261,106 @@ reclaimed.
242 261
243A cgroup can be removed by rmdir, but as discussed in sections 4.1 and 4.2, a 262A cgroup can be removed by rmdir, but as discussed in sections 4.1 and 4.2, a
244cgroup might have some charge associated with it, even though all 263cgroup might have some charge associated with it, even though all
245tasks have migrated away from it. Such charges are automatically dropped at 264tasks have migrated away from it.
246rmdir() if there are no tasks. 265Such charges are freed(at default) or moved to its parent. When moved,
266both of RSS and CACHES are moved to parent.
267If both of them are busy, rmdir() returns -EBUSY. See 5.1 Also.
268
269Charges recorded in swap information is not updated at removal of cgroup.
270Recorded information is discarded and a cgroup which uses swap (swapcache)
271will be charged as a new owner of it.
272
273
2745. Misc. interfaces.
275
2765.1 force_empty
277 memory.force_empty interface is provided to make cgroup's memory usage empty.
278 You can use this interface only when the cgroup has no tasks.
279 When writing anything to this
280
281 # echo 0 > memory.force_empty
282
283 Almost all pages tracked by this memcg will be unmapped and freed. Some of
284 pages cannot be freed because it's locked or in-use. Such pages are moved
285 to parent and this cgroup will be empty. But this may return -EBUSY in
286 some too busy case.
287
288 Typical use case of this interface is that calling this before rmdir().
289 Because rmdir() moves all pages to parent, some out-of-use page caches can be
290 moved to the parent. If you want to avoid that, force_empty will be useful.
291
2925.2 stat file
293 memory.stat file includes following statistics (now)
294 cache - # of pages from page-cache and shmem.
295 rss - # of pages from anonymous memory.
296 pgpgin - # of event of charging
297 pgpgout - # of event of uncharging
298 active_anon - # of pages on active lru of anon, shmem.
299 inactive_anon - # of pages on active lru of anon, shmem
300 active_file - # of pages on active lru of file-cache
301 inactive_file - # of pages on inactive lru of file cache
302 unevictable - # of pages cannot be reclaimed.(mlocked etc)
303
304 Below is depend on CONFIG_DEBUG_VM.
305 inactive_ratio - VM inernal parameter. (see mm/page_alloc.c)
306 recent_rotated_anon - VM internal parameter. (see mm/vmscan.c)
307 recent_rotated_file - VM internal parameter. (see mm/vmscan.c)
308 recent_scanned_anon - VM internal parameter. (see mm/vmscan.c)
309 recent_scanned_file - VM internal parameter. (see mm/vmscan.c)
310
311 Memo:
312 recent_rotated means recent frequency of lru rotation.
313 recent_scanned means recent # of scans to lru.
314 showing for better debug please see the code for meanings.
315
316
3175.3 swappiness
318 Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
319
320 Following cgroup's swapiness can't be changed.
321 - root cgroup (uses /proc/sys/vm/swappiness).
322 - a cgroup which uses hierarchy and it has child cgroup.
323 - a cgroup which uses hierarchy and not the root of hierarchy.
324
325
3266. Hierarchy support
327
328The memory controller supports a deep hierarchy and hierarchical accounting.
329The hierarchy is created by creating the appropriate cgroups in the
330cgroup filesystem. Consider for example, the following cgroup filesystem
331hierarchy
332
333 root
334 / | \
335 / | \
336 a b c
337 | \
338 | \
339 d e
340
341In the diagram above, with hierarchical accounting enabled, all memory
342usage of e, is accounted to its ancestors up until the root (i.e, c and root),
343that has memory.use_hierarchy enabled. If one of the ancestors goes over its
344limit, the reclaim algorithm reclaims from the tasks in the ancestor and the
345children of the ancestor.
346
3476.1 Enabling hierarchical accounting and reclaim
348
349The memory controller by default disables the hierarchy feature. Support
350can be enabled by writing 1 to memory.use_hierarchy file of the root cgroup
351
352# echo 1 > memory.use_hierarchy
353
354The feature can be disabled by
355
356# echo 0 > memory.use_hierarchy
357
358NOTE1: Enabling/disabling will fail if the cgroup already has other
359cgroups created below it.
360
361NOTE2: This feature can be enabled/disabled per subtree.
247 362
2485. TODO 3637. TODO
249 364
2501. Add support for accounting huge pages (as a separate controller) 3651. Add support for accounting huge pages (as a separate controller)
2512. Make per-cgroup scanner reclaim not-shared pages first 3662. Make per-cgroup scanner reclaim not-shared pages first
diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt
index c1e9545c59bd..9f59fcbf5d82 100644
--- a/Documentation/crypto/async-tx-api.txt
+++ b/Documentation/crypto/async-tx-api.txt
@@ -13,9 +13,9 @@
133.6 Constraints 133.6 Constraints
143.7 Example 143.7 Example
15 15
164 DRIVER DEVELOPER NOTES 164 DMAENGINE DRIVER DEVELOPER NOTES
174.1 Conformance points 174.1 Conformance points
184.2 "My application needs finer control of hardware channels" 184.2 "My application needs exclusive control of hardware channels"
19 19
205 SOURCE 205 SOURCE
21 21
@@ -150,6 +150,7 @@ ops_run_* and ops_complete_* routines in drivers/md/raid5.c for more
150implementation examples. 150implementation examples.
151 151
1524 DRIVER DEVELOPMENT NOTES 1524 DRIVER DEVELOPMENT NOTES
153
1534.1 Conformance points: 1544.1 Conformance points:
154There are a few conformance points required in dmaengine drivers to 155There are a few conformance points required in dmaengine drivers to
155accommodate assumptions made by applications using the async_tx API: 156accommodate assumptions made by applications using the async_tx API:
@@ -158,58 +159,49 @@ accommodate assumptions made by applications using the async_tx API:
1583/ Use async_tx_run_dependencies() in the descriptor clean up path to 1593/ Use async_tx_run_dependencies() in the descriptor clean up path to
159 handle submission of dependent operations 160 handle submission of dependent operations
160 161
1614.2 "My application needs finer control of hardware channels" 1624.2 "My application needs exclusive control of hardware channels"
162This requirement seems to arise from cases where a DMA engine driver is 163Primarily this requirement arises from cases where a DMA engine driver
163trying to support device-to-memory DMA. The dmaengine and async_tx 164is being used to support device-to-memory operations. A channel that is
164implementations were designed for offloading memory-to-memory 165performing these operations cannot, for many platform specific reasons,
165operations; however, there are some capabilities of the dmaengine layer 166be shared. For these cases the dma_request_channel() interface is
166that can be used for platform-specific channel management. 167provided.
167Platform-specific constraints can be handled by registering the 168
168application as a 'dma_client' and implementing a 'dma_event_callback' to 169The interface is:
169apply a filter to the available channels in the system. Before showing 170struct dma_chan *dma_request_channel(dma_cap_mask_t mask,
170how to implement a custom dma_event callback some background of 171 dma_filter_fn filter_fn,
171dmaengine's client support is required. 172 void *filter_param);
172 173
173The following routines in dmaengine support multiple clients requesting 174Where dma_filter_fn is defined as:
174use of a channel: 175typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
175- dma_async_client_register(struct dma_client *client) 176
176- dma_async_client_chan_request(struct dma_client *client) 177When the optional 'filter_fn' parameter is set to NULL
177 178dma_request_channel simply returns the first channel that satisfies the
178dma_async_client_register takes a pointer to an initialized dma_client 179capability mask. Otherwise, when the mask parameter is insufficient for
179structure. It expects that the 'event_callback' and 'cap_mask' fields 180specifying the necessary channel, the filter_fn routine can be used to
180are already initialized. 181disposition the available channels in the system. The filter_fn routine
181 182is called once for each free channel in the system. Upon seeing a
182dma_async_client_chan_request triggers dmaengine to notify the client of 183suitable channel filter_fn returns DMA_ACK which flags that channel to
183all channels that satisfy the capability mask. It is up to the client's 184be the return value from dma_request_channel. A channel allocated via
184event_callback routine to track how many channels the client needs and 185this interface is exclusive to the caller, until dma_release_channel()
185how many it is currently using. The dma_event_callback routine returns a 186is called.
186dma_state_client code to let dmaengine know the status of the 187
187allocation. 188The DMA_PRIVATE capability flag is used to tag dma devices that should
188 189not be used by the general-purpose allocator. It can be set at
189Below is the example of how to extend this functionality for 190initialization time if it is known that a channel will always be
190platform-specific filtering of the available channels beyond the 191private. Alternatively, it is set when dma_request_channel() finds an
191standard capability mask: 192unused "public" channel.
192 193
193static enum dma_state_client 194A couple caveats to note when implementing a driver and consumer:
194my_dma_client_callback(struct dma_client *client, 1951/ Once a channel has been privately allocated it will no longer be
195 struct dma_chan *chan, enum dma_state state) 196 considered by the general-purpose allocator even after a call to
196{ 197 dma_release_channel().
197 struct dma_device *dma_dev; 1982/ Since capabilities are specified at the device level a dma_device
198 struct my_platform_specific_dma *plat_dma_dev; 199 with multiple channels will either have all channels public, or all
199 200 channels private.
200 dma_dev = chan->device;
201 plat_dma_dev = container_of(dma_dev,
202 struct my_platform_specific_dma,
203 dma_dev);
204
205 if (!plat_dma_dev->platform_specific_capability)
206 return DMA_DUP;
207
208 . . .
209}
210 201
2115 SOURCE 2025 SOURCE
212include/linux/dmaengine.h: core header file for DMA drivers and clients 203
204include/linux/dmaengine.h: core header file for DMA drivers and api users
213drivers/dma/dmaengine.c: offload engine channel management routines 205drivers/dma/dmaengine.c: offload engine channel management routines
214drivers/dma/: location for offload engine drivers 206drivers/dma/: location for offload engine drivers
215include/linux/async_tx.h: core header file for the async_tx api 207include/linux/async_tx.h: core header file for the async_tx api
diff --git a/Documentation/development-process/4.Coding b/Documentation/development-process/4.Coding
index 014aca8f14e2..a5a3450faaa0 100644
--- a/Documentation/development-process/4.Coding
+++ b/Documentation/development-process/4.Coding
@@ -375,10 +375,10 @@ say, this can be a large job, so it is best to be sure that the
375justification is solid. 375justification is solid.
376 376
377When making an incompatible API change, one should, whenever possible, 377When making an incompatible API change, one should, whenever possible,
378ensure that code which has not been updated is caught by the compiler. 378ensure that code which has not been updated is caught by the compiler.
379This will help you to be sure that you have found all in-tree uses of that 379This will help you to be sure that you have found all in-tree uses of that
380interface. It will also alert developers of out-of-tree code that there is 380interface. It will also alert developers of out-of-tree code that there is
381a change that they need to respond to. Supporting out-of-tree code is not 381a change that they need to respond to. Supporting out-of-tree code is not
382something that kernel developers need to be worried about, but we also do 382something that kernel developers need to be worried about, but we also do
383not have to make life harder for out-of-tree developers than it it needs to 383not have to make life harder for out-of-tree developers than it needs to
384be. 384be.
diff --git a/Documentation/dmaengine.txt b/Documentation/dmaengine.txt
new file mode 100644
index 000000000000..0c1c2f63c0a9
--- /dev/null
+++ b/Documentation/dmaengine.txt
@@ -0,0 +1 @@
See Documentation/crypto/async-tx-api.txt
diff --git a/Documentation/filesystems/btrfs.txt b/Documentation/filesystems/btrfs.txt
new file mode 100644
index 000000000000..64087c34327f
--- /dev/null
+++ b/Documentation/filesystems/btrfs.txt
@@ -0,0 +1,91 @@
1
2 BTRFS
3 =====
4
5Btrfs is a new copy on write filesystem for Linux aimed at
6implementing advanced features while focusing on fault tolerance,
7repair and easy administration. Initially developed by Oracle, Btrfs
8is licensed under the GPL and open for contribution from anyone.
9
10Linux has a wealth of filesystems to choose from, but we are facing a
11number of challenges with scaling to the large storage subsystems that
12are becoming common in today's data centers. Filesystems need to scale
13in their ability to address and manage large storage, and also in
14their ability to detect, repair and tolerate errors in the data stored
15on disk. Btrfs is under heavy development, and is not suitable for
16any uses other than benchmarking and review. The Btrfs disk format is
17not yet finalized.
18
19The main Btrfs features include:
20
21 * Extent based file storage (2^64 max file size)
22 * Space efficient packing of small files
23 * Space efficient indexed directories
24 * Dynamic inode allocation
25 * Writable snapshots
26 * Subvolumes (separate internal filesystem roots)
27 * Object level mirroring and striping
28 * Checksums on data and metadata (multiple algorithms available)
29 * Compression
30 * Integrated multiple device support, with several raid algorithms
31 * Online filesystem check (not yet implemented)
32 * Very fast offline filesystem check
33 * Efficient incremental backup and FS mirroring (not yet implemented)
34 * Online filesystem defragmentation
35
36
37
38 MAILING LIST
39 ============
40
41There is a Btrfs mailing list hosted on vger.kernel.org. You can
42find details on how to subscribe here:
43
44http://vger.kernel.org/vger-lists.html#linux-btrfs
45
46Mailing list archives are available from gmane:
47
48http://dir.gmane.org/gmane.comp.file-systems.btrfs
49
50
51
52 IRC
53 ===
54
55Discussion of Btrfs also occurs on the #btrfs channel of the Freenode
56IRC network.
57
58
59
60 UTILITIES
61 =========
62
63Userspace tools for creating and manipulating Btrfs file systems are
64available from the git repository at the following location:
65
66 http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs-unstable.git
67 git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs-unstable.git
68
69These include the following tools:
70
71mkfs.btrfs: create a filesystem
72
73btrfsctl: control program to create snapshots and subvolumes:
74
75 mount /dev/sda2 /mnt
76 btrfsctl -s new_subvol_name /mnt
77 btrfsctl -s snapshot_of_default /mnt/default
78 btrfsctl -s snapshot_of_new_subvol /mnt/new_subvol_name
79 btrfsctl -s snapshot_of_a_snapshot /mnt/snapshot_of_new_subvol
80 ls /mnt
81 default snapshot_of_a_snapshot snapshot_of_new_subvol
82 new_subvol_name snapshot_of_default
83
84 Snapshots and subvolumes cannot be deleted right now, but you can
85 rm -rf all the files and directories inside them.
86
87btrfsck: do a limited check of the FS extent trees.
88
89btrfs-debug-tree: print all of the FS metadata in text form. Example:
90
91 btrfs-debug-tree /dev/sda2 >& big_output_file
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index 174eaff7ded9..cec829bc7291 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -58,13 +58,22 @@ Note: More extensive information for getting started with ext4 can be
58 58
59 # mount -t ext4 /dev/hda1 /wherever 59 # mount -t ext4 /dev/hda1 /wherever
60 60
61 - When comparing performance with other filesystems, remember that 61 - When comparing performance with other filesystems, it's always
62 ext3/4 by default offers higher data integrity guarantees than most. 62 important to try multiple workloads; very often a subtle change in a
63 So when comparing with a metadata-only journalling filesystem, such 63 workload parameter can completely change the ranking of which
64 as ext3, use `mount -o data=writeback'. And you might as well use 64 filesystems do well compared to others. When comparing versus ext3,
65 `mount -o nobh' too along with it. Making the journal larger than 65 note that ext4 enables write barriers by default, while ext3 does
66 the mke2fs default often helps performance with metadata-intensive 66 not enable write barriers by default. So it is useful to use
67 workloads. 67 explicitly specify whether barriers are enabled or not when via the
68 '-o barriers=[0|1]' mount option for both ext3 and ext4 filesystems
69 for a fair comparison. When tuning ext3 for best benchmark numbers,
70 it is often worthwhile to try changing the data journaling mode; '-o
71 data=writeback,nobh' can be faster for some workloads. (Note
72 however that running mounted with data=writeback can potentially
73 leave stale data exposed in recently written files in case of an
74 unclean shutdown, which could be a security exposure in some
75 situations.) Configuring the filesystem with a large journal can
76 also be helpful for metadata-intensive workloads.
68 77
692. Features 782. Features
70=========== 79===========
@@ -74,7 +83,7 @@ Note: More extensive information for getting started with ext4 can be
74* ability to use filesystems > 16TB (e2fsprogs support not available yet) 83* ability to use filesystems > 16TB (e2fsprogs support not available yet)
75* extent format reduces metadata overhead (RAM, IO for access, transactions) 84* extent format reduces metadata overhead (RAM, IO for access, transactions)
76* extent format more robust in face of on-disk corruption due to magics, 85* extent format more robust in face of on-disk corruption due to magics,
77* internal redunancy in tree 86* internal redundancy in tree
78* improved file allocation (multi-block alloc) 87* improved file allocation (multi-block alloc)
79* fix 32000 subdirectory limit 88* fix 32000 subdirectory limit
80* nsec timestamps for mtime, atime, ctime, create time 89* nsec timestamps for mtime, atime, ctime, create time
@@ -116,10 +125,11 @@ grouping of bitmaps and inode tables. Some test results available here:
116When mounting an ext4 filesystem, the following option are accepted: 125When mounting an ext4 filesystem, the following option are accepted:
117(*) == default 126(*) == default
118 127
119extents (*) ext4 will use extents to address file data. The 128ro Mount filesystem read only. Note that ext4 will
120 file system will no longer be mountable by ext3. 129 replay the journal (and thus write to the
121 130 partition) even when mounted "read only". The
122noextents ext4 will not use extents for newly created files 131 mount options "ro,noload" can be used to prevent
132 writes to the filesystem.
123 133
124journal_checksum Enable checksumming of the journal transactions. 134journal_checksum Enable checksumming of the journal transactions.
125 This will allow the recovery code in e2fsck and the 135 This will allow the recovery code in e2fsck and the
@@ -134,17 +144,17 @@ journal_async_commit Commit block can be written to disk without waiting
134journal=update Update the ext4 file system's journal to the current 144journal=update Update the ext4 file system's journal to the current
135 format. 145 format.
136 146
137journal=inum When a journal already exists, this option is ignored.
138 Otherwise, it specifies the number of the inode which
139 will represent the ext4 file system's journal file.
140
141journal_dev=devnum When the external journal device's major/minor numbers 147journal_dev=devnum When the external journal device's major/minor numbers
142 have changed, this option allows the user to specify 148 have changed, this option allows the user to specify
143 the new journal location. The journal device is 149 the new journal location. The journal device is
144 identified through its new major/minor numbers encoded 150 identified through its new major/minor numbers encoded
145 in devnum. 151 in devnum.
146 152
147noload Don't load the journal on mounting. 153noload Don't load the journal on mounting. Note that
154 if the filesystem was not unmounted cleanly,
155 skipping the journal replay will lead to the
156 filesystem containing inconsistencies that can
157 lead to any number of problems.
148 158
149data=journal All data are committed into the journal prior to being 159data=journal All data are committed into the journal prior to being
150 written into the main file system. 160 written into the main file system.
@@ -219,9 +229,12 @@ minixdf Make 'df' act like Minix.
219 229
220debug Extra debugging information is sent to syslog. 230debug Extra debugging information is sent to syslog.
221 231
222errors=remount-ro(*) Remount the filesystem read-only on an error. 232errors=remount-ro Remount the filesystem read-only on an error.
223errors=continue Keep going on a filesystem error. 233errors=continue Keep going on a filesystem error.
224errors=panic Panic and halt the machine if an error occurs. 234errors=panic Panic and halt the machine if an error occurs.
235 (These mount options override the errors behavior
236 specified in the superblock, which can be configured
237 using tune2fs)
225 238
226data_err=ignore(*) Just print an error message if an error occurs 239data_err=ignore(*) Just print an error message if an error occurs
227 in a file data buffer in ordered mode. 240 in a file data buffer in ordered mode.
@@ -261,6 +274,42 @@ delalloc (*) Deferring block allocation until write-out time.
261nodelalloc Disable delayed allocation. Blocks are allocation 274nodelalloc Disable delayed allocation. Blocks are allocation
262 when data is copied from user to page cache. 275 when data is copied from user to page cache.
263 276
277max_batch_time=usec Maximum amount of time ext4 should wait for
278 additional filesystem operations to be batch
279 together with a synchronous write operation.
280 Since a synchronous write operation is going to
281 force a commit and then a wait for the I/O
282 complete, it doesn't cost much, and can be a
283 huge throughput win, we wait for a small amount
284 of time to see if any other transactions can
285 piggyback on the synchronous write. The
286 algorithm used is designed to automatically tune
287 for the speed of the disk, by measuring the
288 amount of time (on average) that it takes to
289 finish committing a transaction. Call this time
290 the "commit time". If the time that the
291 transactoin has been running is less than the
292 commit time, ext4 will try sleeping for the
293 commit time to see if other operations will join
294 the transaction. The commit time is capped by
295 the max_batch_time, which defaults to 15000us
296 (15ms). This optimization can be turned off
297 entirely by setting max_batch_time to 0.
298
299min_batch_time=usec This parameter sets the commit time (as
300 described above) to be at least min_batch_time.
301 It defaults to zero microseconds. Increasing
302 this parameter may improve the throughput of
303 multi-threaded, synchronous workloads on very
304 fast disks, at the cost of increasing latency.
305
306journal_ioprio=prio The I/O priority (from 0 to 7, where 0 is the
307 highest priorty) which should be used for I/O
308 operations submitted by kjournald2 during a
309 commit operation. This defaults to 3, which is
310 a slightly higher priority than the default I/O
311 priority.
312
264Data Mode 313Data Mode
265========= 314=========
266There are 3 different data modes: 315There are 3 different data modes:
diff --git a/Documentation/hwmon/abituguru-datasheet b/Documentation/hwmon/abituguru-datasheet
index 4d184f2db0ea..d9251efdcec7 100644
--- a/Documentation/hwmon/abituguru-datasheet
+++ b/Documentation/hwmon/abituguru-datasheet
@@ -121,7 +121,7 @@ Once all bytes have been read data will hold 0x09, but there is no reason to
121test for this. Notice that the number of bytes is bank address dependent see 121test for this. Notice that the number of bytes is bank address dependent see
122above and below. 122above and below.
123 123
124After completing a successfull read it is advised to put the uGuru back in 124After completing a successful read it is advised to put the uGuru back in
125ready mode, so that it is ready for the next read / write cycle. This way 125ready mode, so that it is ready for the next read / write cycle. This way
126if your program / driver is unloaded and later loaded again the detection 126if your program / driver is unloaded and later loaded again the detection
127algorithm described above will still work. 127algorithm described above will still work.
@@ -141,7 +141,7 @@ don't ask why this is the way it is.
141 141
142Once DATA holds 0x01 read CMD it should hold 0xAC now. 142Once DATA holds 0x01 read CMD it should hold 0xAC now.
143 143
144After completing a successfull write it is advised to put the uGuru back in 144After completing a successful write it is advised to put the uGuru back in
145ready mode, so that it is ready for the next read / write cycle. This way 145ready mode, so that it is ready for the next read / write cycle. This way
146if your program / driver is unloaded and later loaded again the detection 146if your program / driver is unloaded and later loaded again the detection
147algorithm described above will still work. 147algorithm described above will still work.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 532eacbbed62..8511d3532c27 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -141,6 +141,7 @@ and is between 256 and 4096 characters. It is defined in the file
141 ht -- run only enough ACPI to enable Hyper Threading 141 ht -- run only enough ACPI to enable Hyper Threading
142 strict -- Be less tolerant of platforms that are not 142 strict -- Be less tolerant of platforms that are not
143 strictly ACPI specification compliant. 143 strictly ACPI specification compliant.
144 rsdt -- prefer RSDT over (default) XSDT
144 145
145 See also Documentation/power/pm.txt, pci=noacpi 146 See also Documentation/power/pm.txt, pci=noacpi
146 147
@@ -151,16 +152,20 @@ and is between 256 and 4096 characters. It is defined in the file
151 default: 0 152 default: 0
152 153
153 acpi_sleep= [HW,ACPI] Sleep options 154 acpi_sleep= [HW,ACPI] Sleep options
154 Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig, old_ordering } 155 Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig,
155 See Documentation/power/video.txt for s3_bios and s3_mode. 156 old_ordering, s4_nonvs }
157 See Documentation/power/video.txt for information on
158 s3_bios and s3_mode.
156 s3_beep is for debugging; it makes the PC's speaker beep 159 s3_beep is for debugging; it makes the PC's speaker beep
157 as soon as the kernel's real-mode entry point is called. 160 as soon as the kernel's real-mode entry point is called.
158 s4_nohwsig prevents ACPI hardware signature from being 161 s4_nohwsig prevents ACPI hardware signature from being
159 used during resume from hibernation. 162 used during resume from hibernation.
160 old_ordering causes the ACPI 1.0 ordering of the _PTS 163 old_ordering causes the ACPI 1.0 ordering of the _PTS
161 control method, wrt putting devices into low power 164 control method, with respect to putting devices into
162 states, to be enforced (the ACPI 2.0 ordering of _PTS is 165 low power states, to be enforced (the ACPI 2.0 ordering
163 used by default). 166 of _PTS is used by default).
167 s4_nonvs prevents the kernel from saving/restoring the
168 ACPI NVS memory during hibernation.
164 169
165 acpi_sci= [HW,ACPI] ACPI System Control Interrupt trigger mode 170 acpi_sci= [HW,ACPI] ACPI System Control Interrupt trigger mode
166 Format: { level | edge | high | low } 171 Format: { level | edge | high | low }
@@ -195,7 +200,7 @@ and is between 256 and 4096 characters. It is defined in the file
195 acpi_skip_timer_override [HW,ACPI] 200 acpi_skip_timer_override [HW,ACPI]
196 Recognize and ignore IRQ0/pin2 Interrupt Override. 201 Recognize and ignore IRQ0/pin2 Interrupt Override.
197 For broken nForce2 BIOS resulting in XT-PIC timer. 202 For broken nForce2 BIOS resulting in XT-PIC timer.
198 acpi_use_timer_override [HW,ACPI} 203 acpi_use_timer_override [HW,ACPI]
199 Use timer override. For some broken Nvidia NF5 boards 204 Use timer override. For some broken Nvidia NF5 boards
200 that require a timer override, but don't have 205 that require a timer override, but don't have
201 HPET 206 HPET
@@ -829,8 +834,8 @@ and is between 256 and 4096 characters. It is defined in the file
829 834
830 hlt [BUGS=ARM,SH] 835 hlt [BUGS=ARM,SH]
831 836
832 hvc_iucv= [S390] Number of z/VM IUCV Hypervisor console (HVC) 837 hvc_iucv= [S390] Number of z/VM IUCV hypervisor console (HVC)
833 back-ends. Valid parameters: 0..8 838 terminal devices. Valid values: 0..8
834 839
835 i8042.debug [HW] Toggle i8042 debug mode 840 i8042.debug [HW] Toggle i8042 debug mode
836 i8042.direct [HW] Put keyboard port into non-translated mode 841 i8042.direct [HW] Put keyboard port into non-translated mode
@@ -878,17 +883,19 @@ and is between 256 and 4096 characters. It is defined in the file
878 See Documentation/ide/ide.txt. 883 See Documentation/ide/ide.txt.
879 884
880 idle= [X86] 885 idle= [X86]
881 Format: idle=poll or idle=mwait, idle=halt, idle=nomwait 886 Format: idle=poll, idle=mwait, idle=halt, idle=nomwait
882 Poll forces a polling idle loop that can slightly improves the performance 887 Poll forces a polling idle loop that can slightly
883 of waking up a idle CPU, but will use a lot of power and make the system 888 improve the performance of waking up a idle CPU, but
884 run hot. Not recommended. 889 will use a lot of power and make the system run hot.
885 idle=mwait. On systems which support MONITOR/MWAIT but the kernel chose 890 Not recommended.
886 to not use it because it doesn't save as much power as a normal idle 891 idle=mwait: On systems which support MONITOR/MWAIT but
887 loop use the MONITOR/MWAIT idle loop anyways. Performance should be the same 892 the kernel chose to not use it because it doesn't save
888 as idle=poll. 893 as much power as a normal idle loop, use the
889 idle=halt. Halt is forced to be used for CPU idle. 894 MONITOR/MWAIT idle loop anyways. Performance should be
895 the same as idle=poll.
896 idle=halt: Halt is forced to be used for CPU idle.
890 In such case C2/C3 won't be used again. 897 In such case C2/C3 won't be used again.
891 idle=nomwait. Disable mwait for CPU C-states 898 idle=nomwait: Disable mwait for CPU C-states
892 899
893 ide-pci-generic.all-generic-ide [HW] (E)IDE subsystem 900 ide-pci-generic.all-generic-ide [HW] (E)IDE subsystem
894 Claim all unknown PCI IDE storage controllers. 901 Claim all unknown PCI IDE storage controllers.
@@ -1074,8 +1081,8 @@ and is between 256 and 4096 characters. It is defined in the file
1074 lapic [X86-32,APIC] Enable the local APIC even if BIOS 1081 lapic [X86-32,APIC] Enable the local APIC even if BIOS
1075 disabled it. 1082 disabled it.
1076 1083
1077 lapic_timer_c2_ok [X86-32,x86-64,APIC] trust the local apic timer in 1084 lapic_timer_c2_ok [X86-32,x86-64,APIC] trust the local apic timer
1078 C2 power state. 1085 in C2 power state.
1079 1086
1080 libata.dma= [LIBATA] DMA control 1087 libata.dma= [LIBATA] DMA control
1081 libata.dma=0 Disable all PATA and SATA DMA 1088 libata.dma=0 Disable all PATA and SATA DMA
@@ -1562,6 +1569,9 @@ and is between 256 and 4096 characters. It is defined in the file
1562 1569
1563 nosoftlockup [KNL] Disable the soft-lockup detector. 1570 nosoftlockup [KNL] Disable the soft-lockup detector.
1564 1571
1572 noswapaccount [KNL] Disable accounting of swap in memory resource
1573 controller. (See Documentation/controllers/memory.txt)
1574
1565 nosync [HW,M68K] Disables sync negotiation for all devices. 1575 nosync [HW,M68K] Disables sync negotiation for all devices.
1566 1576
1567 notsc [BUGS=X86-32] Disable Time Stamp Counter 1577 notsc [BUGS=X86-32] Disable Time Stamp Counter
@@ -2300,7 +2310,8 @@ and is between 256 and 4096 characters. It is defined in the file
2300 2310
2301 thermal.psv= [HW,ACPI] 2311 thermal.psv= [HW,ACPI]
2302 -1: disable all passive trip points 2312 -1: disable all passive trip points
2303 <degrees C>: override all passive trip points to this value 2313 <degrees C>: override all passive trip points to this
2314 value
2304 2315
2305 thermal.tzp= [HW,ACPI] 2316 thermal.tzp= [HW,ACPI]
2306 Specify global default ACPI thermal zone polling rate 2317 Specify global default ACPI thermal zone polling rate
diff --git a/Documentation/powerpc/dts-bindings/4xx/ndfc.txt b/Documentation/powerpc/dts-bindings/4xx/ndfc.txt
new file mode 100644
index 000000000000..869f0b5f16e8
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/4xx/ndfc.txt
@@ -0,0 +1,39 @@
1AMCC NDFC (NanD Flash Controller)
2
3Required properties:
4- compatible : "ibm,ndfc".
5- reg : should specify chip select and size used for the chip (0x2000).
6
7Optional properties:
8- ccr : NDFC config and control register value (default 0).
9- bank-settings : NDFC bank configuration register value (default 0).
10
11Notes:
12- partition(s) - follows the OF MTD standard for partitions
13
14Example:
15
16ndfc@1,0 {
17 compatible = "ibm,ndfc";
18 reg = <0x00000001 0x00000000 0x00002000>;
19 ccr = <0x00001000>;
20 bank-settings = <0x80002222>;
21 #address-cells = <1>;
22 #size-cells = <1>;
23
24 nand {
25 #address-cells = <1>;
26 #size-cells = <1>;
27
28 partition@0 {
29 label = "kernel";
30 reg = <0x00000000 0x00200000>;
31 };
32 partition@200000 {
33 label = "root";
34 reg = <0x00200000 0x03E00000>;
35 };
36 };
37};
38
39
diff --git a/Documentation/powerpc/dts-bindings/fsl/board.txt b/Documentation/powerpc/dts-bindings/fsl/board.txt
index 81a917ef96e9..6c974d28eeb4 100644
--- a/Documentation/powerpc/dts-bindings/fsl/board.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/board.txt
@@ -18,7 +18,7 @@ This is the memory-mapped registers for on board FPGA.
18 18
19Required properities: 19Required properities:
20- compatible : should be "fsl,fpga-pixis". 20- compatible : should be "fsl,fpga-pixis".
21- reg : should contain the address and the lenght of the FPPGA register 21- reg : should contain the address and the length of the FPPGA register
22 set. 22 set.
23 23
24Example (MPC8610HPCD): 24Example (MPC8610HPCD):
@@ -27,3 +27,33 @@ Example (MPC8610HPCD):
27 compatible = "fsl,fpga-pixis"; 27 compatible = "fsl,fpga-pixis";
28 reg = <0xe8000000 32>; 28 reg = <0xe8000000 32>;
29 }; 29 };
30
31* Freescale BCSR GPIO banks
32
33Some BCSR registers act as simple GPIO controllers, each such
34register can be represented by the gpio-controller node.
35
36Required properities:
37- compatible : Should be "fsl,<board>-bcsr-gpio".
38- reg : Should contain the address and the length of the GPIO bank
39 register.
40- #gpio-cells : Should be two. The first cell is the pin number and the
41 second cell is used to specify optional paramters (currently unused).
42- gpio-controller : Marks the port as GPIO controller.
43
44Example:
45
46 bcsr@1,0 {
47 #address-cells = <1>;
48 #size-cells = <1>;
49 compatible = "fsl,mpc8360mds-bcsr";
50 reg = <1 0 0x8000>;
51 ranges = <0 1 0 0x8000>;
52
53 bcsr13: gpio-controller@d {
54 #gpio-cells = <2>;
55 compatible = "fsl,mpc8360mds-bcsr-gpio";
56 reg = <0xd 1>;
57 gpio-controller;
58 };
59 };
diff --git a/Documentation/scsi/scsi_fc_transport.txt b/Documentation/scsi/scsi_fc_transport.txt
index 38d324d62b25..e5b071d46619 100644
--- a/Documentation/scsi/scsi_fc_transport.txt
+++ b/Documentation/scsi/scsi_fc_transport.txt
@@ -191,7 +191,7 @@ Vport States:
191 This is equivalent to a driver "attach" on an adapter, which is 191 This is equivalent to a driver "attach" on an adapter, which is
192 independent of the adapter's link state. 192 independent of the adapter's link state.
193 - Instantiation of the vport on the FC link via ELS traffic, etc. 193 - Instantiation of the vport on the FC link via ELS traffic, etc.
194 This is equivalent to a "link up" and successfull link initialization. 194 This is equivalent to a "link up" and successful link initialization.
195 Further information can be found in the interfaces section below for 195 Further information can be found in the interfaces section below for
196 Vport Creation. 196 Vport Creation.
197 197
@@ -320,7 +320,7 @@ Vport Creation:
320 This is equivalent to a driver "attach" on an adapter, which is 320 This is equivalent to a driver "attach" on an adapter, which is
321 independent of the adapter's link state. 321 independent of the adapter's link state.
322 - Instantiation of the vport on the FC link via ELS traffic, etc. 322 - Instantiation of the vport on the FC link via ELS traffic, etc.
323 This is equivalent to a "link up" and successfull link initialization. 323 This is equivalent to a "link up" and successful link initialization.
324 324
325 The LLDD's vport_create() function will not synchronously wait for both 325 The LLDD's vport_create() function will not synchronously wait for both
326 parts to be fully completed before returning. It must validate that the 326 parts to be fully completed before returning. It must validate that the
diff --git a/Documentation/w1/masters/00-INDEX b/Documentation/w1/masters/00-INDEX
index 7b0ceaaad7af..d63fa024ac05 100644
--- a/Documentation/w1/masters/00-INDEX
+++ b/Documentation/w1/masters/00-INDEX
@@ -4,5 +4,7 @@ ds2482
4 - The Maxim/Dallas Semiconductor DS2482 provides 1-wire busses. 4 - The Maxim/Dallas Semiconductor DS2482 provides 1-wire busses.
5ds2490 5ds2490
6 - The Maxim/Dallas Semiconductor DS2490 builds USB <-> W1 bridges. 6 - The Maxim/Dallas Semiconductor DS2490 builds USB <-> W1 bridges.
7mxc_w1
8 - W1 master controller driver found on Freescale MX2/MX3 SoCs
7w1-gpio 9w1-gpio
8 - GPIO 1-wire bus master driver. 10 - GPIO 1-wire bus master driver.
diff --git a/Documentation/w1/masters/mxc-w1 b/Documentation/w1/masters/mxc-w1
new file mode 100644
index 000000000000..97f6199a7f39
--- /dev/null
+++ b/Documentation/w1/masters/mxc-w1
@@ -0,0 +1,11 @@
1Kernel driver mxc_w1
2====================
3
4Supported chips:
5 * Freescale MX27, MX31 and probably other i.MX SoCs
6 Datasheets:
7 http://www.freescale.com/files/32bit/doc/data_sheet/MCIMX31.pdf?fpsp=1
8 http://www.freescale.com/files/dsp/MCIMX27.pdf?fpsp=1
9
10Author: Originally based on Freescale code, prepared for mainline by
11 Sascha Hauer <s.hauer@pengutronix.de>
diff --git a/Documentation/w1/w1.netlink b/Documentation/w1/w1.netlink
index 3640c7c87d45..804445f745ed 100644
--- a/Documentation/w1/w1.netlink
+++ b/Documentation/w1/w1.netlink
@@ -5,69 +5,157 @@ Message types.
5============= 5=============
6 6
7There are three types of messages between w1 core and userspace: 7There are three types of messages between w1 core and userspace:
81. Events. They are generated each time new master or slave device found 81. Events. They are generated each time new master or slave device
9 either due to automatic or requested search. 9 found either due to automatic or requested search.
102. Userspace commands. Includes read/write and search/alarm search comamnds. 102. Userspace commands.
113. Replies to userspace commands. 113. Replies to userspace commands.
12 12
13 13
14Protocol. 14Protocol.
15======== 15========
16 16
17[struct cn_msg] - connector header. It's length field is equal to size of the attached data. 17[struct cn_msg] - connector header.
18 Its length field is equal to size of the attached data
18[struct w1_netlink_msg] - w1 netlink header. 19[struct w1_netlink_msg] - w1 netlink header.
19 __u8 type - message type. 20 __u8 type - message type.
20 W1_SLAVE_ADD/W1_SLAVE_REMOVE - slave add/remove events. 21 W1_LIST_MASTERS
21 W1_MASTER_ADD/W1_MASTER_REMOVE - master add/remove events. 22 list current bus masters
22 W1_MASTER_CMD - userspace command for bus master device (search/alarm search). 23 W1_SLAVE_ADD/W1_SLAVE_REMOVE
23 W1_SLAVE_CMD - userspace command for slave device (read/write/ search/alarm search 24 slave add/remove events
24 for bus master device where given slave device found). 25 W1_MASTER_ADD/W1_MASTER_REMOVE
26 master add/remove events
27 W1_MASTER_CMD
28 userspace command for bus master
29 device (search/alarm search)
30 W1_SLAVE_CMD
31 userspace command for slave device
32 (read/write/touch)
25 __u8 res - reserved 33 __u8 res - reserved
26 __u16 len - size of attached to this header data. 34 __u16 len - size of data attached to this header data
27 union { 35 union {
28 __u8 id; - slave unique device id 36 __u8 id[8]; - slave unique device id
29 struct w1_mst { 37 struct w1_mst {
30 __u32 id; - master's id. 38 __u32 id; - master's id
31 __u32 res; - reserved 39 __u32 res; - reserved
32 } mst; 40 } mst;
33 } id; 41 } id;
34 42
35[strucrt w1_netlink_cmd] - command for gived master or slave device. 43[struct w1_netlink_cmd] - command for given master or slave device.
36 __u8 cmd - command opcode. 44 __u8 cmd - command opcode.
37 W1_CMD_READ - read command. 45 W1_CMD_READ - read command
38 W1_CMD_WRITE - write command. 46 W1_CMD_WRITE - write command
39 W1_CMD_SEARCH - search command. 47 W1_CMD_TOUCH - touch command
40 W1_CMD_ALARM_SEARCH - alarm search command. 48 (write and sample data back to userspace)
49 W1_CMD_SEARCH - search command
50 W1_CMD_ALARM_SEARCH - alarm search command
41 __u8 res - reserved 51 __u8 res - reserved
42 __u16 len - length of data for this command. 52 __u16 len - length of data for this command
43 For read command data must be allocated like for write command. 53 For read command data must be allocated like for write command
44 __u8 data[0] - data for this command. 54 __u8 data[0] - data for this command
45 55
46 56
47Each connector message can include one or more w1_netlink_msg with zero of more attached w1_netlink_cmd messages. 57Each connector message can include one or more w1_netlink_msg with
58zero or more attached w1_netlink_cmd messages.
48 59
49For event messages there are no w1_netlink_cmd embedded structures, only connector header 60For event messages there are no w1_netlink_cmd embedded structures,
50and w1_netlink_msg strucutre with "len" field being zero and filled type (one of event types) 61only connector header and w1_netlink_msg strucutre with "len" field
51and id - either 8 bytes of slave unique id in host order, or master's id, which is assigned 62being zero and filled type (one of event types) and id:
52to bus master device when it is added to w1 core. 63either 8 bytes of slave unique id in host order,
64or master's id, which is assigned to bus master device
65when it is added to w1 core.
66
67Currently replies to userspace commands are only generated for read
68command request. One reply is generated exactly for one w1_netlink_cmd
69read request. Replies are not combined when sent - i.e. typical reply
70messages looks like the following:
53 71
54Currently replies to userspace commands are only generated for read command request.
55One reply is generated exactly for one w1_netlink_cmd read request.
56Replies are not combined when sent - i.e. typical reply messages looks like the following:
57[cn_msg][w1_netlink_msg][w1_netlink_cmd] 72[cn_msg][w1_netlink_msg][w1_netlink_cmd]
58cn_msg.len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd) + cmd->len; 73cn_msg.len = sizeof(struct w1_netlink_msg) +
74 sizeof(struct w1_netlink_cmd) +
75 cmd->len;
59w1_netlink_msg.len = sizeof(struct w1_netlink_cmd) + cmd->len; 76w1_netlink_msg.len = sizeof(struct w1_netlink_cmd) + cmd->len;
60w1_netlink_cmd.len = cmd->len; 77w1_netlink_cmd.len = cmd->len;
61 78
79Replies to W1_LIST_MASTERS should send a message back to the userspace
80which will contain list of all registered master ids in the following
81format:
82
83 cn_msg (CN_W1_IDX.CN_W1_VAL as id, len is equal to sizeof(struct
84 w1_netlink_msg) plus number of masters multipled by 4)
85 w1_netlink_msg (type: W1_LIST_MASTERS, len is equal to
86 number of masters multiplied by 4 (u32 size))
87 id0 ... idN
88
89 Each message is at most 4k in size, so if number of master devices
90 exceeds this, it will be split into several messages,
91 cn.seq will be increased for each one.
92
93W1 search and alarm search commands.
94request:
95[cn_msg]
96 [w1_netlink_msg type = W1_MASTER_CMD
97 id is equal to the bus master id to use for searching]
98 [w1_netlink_cmd cmd = W1_CMD_SEARCH or W1_CMD_ALARM_SEARCH]
99
100reply:
101 [cn_msg, ack = 1 and increasing, 0 means the last message,
102 seq is equal to the request seq]
103 [w1_netlink_msg type = W1_MASTER_CMD]
104 [w1_netlink_cmd cmd = W1_CMD_SEARCH or W1_CMD_ALARM_SEARCH
105 len is equal to number of IDs multiplied by 8]
106 [64bit-id0 ... 64bit-idN]
107Length in each header corresponds to the size of the data behind it, so
108w1_netlink_cmd->len = N * 8; where N is number of IDs in this message.
109 Can be zero.
110w1_netlink_msg->len = sizeof(struct w1_netlink_cmd) + N * 8;
111cn_msg->len = sizeof(struct w1_netlink_msg) +
112 sizeof(struct w1_netlink_cmd) +
113 N*8;
114
115W1 reset command.
116[cn_msg]
117 [w1_netlink_msg type = W1_MASTER_CMD
118 id is equal to the bus master id to use for searching]
119 [w1_netlink_cmd cmd = W1_CMD_RESET]
120
121
122Command status replies.
123======================
124
125Each command (either root, master or slave with or without w1_netlink_cmd
126structure) will be 'acked' by the w1 core. Format of the reply is the same
127as request message except that length parameters do not account for data
128requested by the user, i.e. read/write/touch IO requests will not contain
129data, so w1_netlink_cmd.len will be 0, w1_netlink_msg.len will be size
130of the w1_netlink_cmd structure and cn_msg.len will be equal to the sum
131of the sizeof(struct w1_netlink_msg) and sizeof(struct w1_netlink_cmd).
132If reply is generated for master or root command (which do not have
133w1_netlink_cmd attached), reply will contain only cn_msg and w1_netlink_msg
134structires.
135
136w1_netlink_msg.status field will carry positive error value
137(EINVAL for example) or zero in case of success.
138
139All other fields in every structure will mirror the same parameters in the
140request message (except lengths as described above).
141
142Status reply is generated for every w1_netlink_cmd embedded in the
143w1_netlink_msg, if there are no w1_netlink_cmd structures,
144reply will be generated for the w1_netlink_msg.
145
146All w1_netlink_cmd command structures are handled in every w1_netlink_msg,
147even if there were errors, only length mismatch interrupts message processing.
148
62 149
63Operation steps in w1 core when new command is received. 150Operation steps in w1 core when new command is received.
64======================================================= 151=======================================================
65 152
66When new message (w1_netlink_msg) is received w1 core detects if it is master of slave request, 153When new message (w1_netlink_msg) is received w1 core detects if it is
67according to w1_netlink_msg.type field. 154master or slave request, according to w1_netlink_msg.type field.
68Then master or slave device is searched for. 155Then master or slave device is searched for.
69When found, master device (requested or those one on where slave device is found) is locked. 156When found, master device (requested or those one on where slave device
70If slave command is requested, then reset/select procedure is started to select given device. 157is found) is locked. If slave command is requested, then reset/select
158procedure is started to select given device.
71 159
72Then all requested in w1_netlink_msg operations are performed one by one. 160Then all requested in w1_netlink_msg operations are performed one by one.
73If command requires reply (like read command) it is sent on command completion. 161If command requires reply (like read command) it is sent on command completion.
@@ -82,8 +170,8 @@ Connector [1] specific documentation.
82Each connector message includes two u32 fields as "address". 170Each connector message includes two u32 fields as "address".
83w1 uses CN_W1_IDX and CN_W1_VAL defined in include/linux/connector.h header. 171w1 uses CN_W1_IDX and CN_W1_VAL defined in include/linux/connector.h header.
84Each message also includes sequence and acknowledge numbers. 172Each message also includes sequence and acknowledge numbers.
85Sequence number for event messages is appropriate bus master sequence number increased with 173Sequence number for event messages is appropriate bus master sequence number
86each event message sent "through" this master. 174increased with each event message sent "through" this master.
87Sequence number for userspace requests is set by userspace application. 175Sequence number for userspace requests is set by userspace application.
88Sequence number for reply is the same as was in request, and 176Sequence number for reply is the same as was in request, and
89acknowledge number is set to seq+1. 177acknowledge number is set to seq+1.
@@ -93,6 +181,6 @@ Additional documantion, source code examples.
93============================================ 181============================================
94 182
951. Documentation/connector 1831. Documentation/connector
962. http://tservice.net.ru/~s0mbre/archive/w1 1842. http://www.ioremap.net/archive/w1
97This archive includes userspace application w1d.c which 185This archive includes userspace application w1d.c which uses
98uses read/write/search commands for all master/slave devices found on the bus. 186read/write/search commands for all master/slave devices found on the bus.
diff --git a/MAINTAINERS b/MAINTAINERS
index a01884407fe4..57e0309243cc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1360,6 +1360,11 @@ P: Maciej W. Rozycki
1360M: macro@linux-mips.org 1360M: macro@linux-mips.org
1361S: Maintained 1361S: Maintained
1362 1362
1363DELL LAPTOP DRIVER
1364P: Matthew Garrett
1365M: mjg59@srcf.ucam.org
1366S: Maintained
1367
1363DELL LAPTOP SMM DRIVER 1368DELL LAPTOP SMM DRIVER
1364P: Massimo Dal Zotto 1369P: Massimo Dal Zotto
1365M: dz@debian.org 1370M: dz@debian.org
@@ -3484,6 +3489,12 @@ L: linuxppc-dev@ozlabs.org
3484L: cbe-oss-dev@ozlabs.org 3489L: cbe-oss-dev@ozlabs.org
3485S: Supported 3490S: Supported
3486 3491
3492PS3VRAM DRIVER
3493P: Jim Paris
3494M: jim@jtan.com
3495L: cbe-oss-dev@ozlabs.org
3496S: Maintained
3497
3487PVRUSB2 VIDEO4LINUX DRIVER 3498PVRUSB2 VIDEO4LINUX DRIVER
3488P: Mike Isely 3499P: Mike Isely
3489M: isely@pobox.com 3500M: isely@pobox.com
diff --git a/arch/arm/configs/clps7500_defconfig b/arch/arm/configs/clps7500_defconfig
deleted file mode 100644
index 49e9f9d8b3d1..000000000000
--- a/arch/arm/configs/clps7500_defconfig
+++ /dev/null
@@ -1,801 +0,0 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.12-rc1-bk2
4# Sun Mar 27 17:20:48 2005
5#
6CONFIG_ARM=y
7CONFIG_MMU=y
8CONFIG_UID16=y
9CONFIG_RWSEM_GENERIC_SPINLOCK=y
10CONFIG_GENERIC_CALIBRATE_DELAY=y
11CONFIG_GENERIC_IOMAP=y
12
13#
14# Code maturity level options
15#
16CONFIG_EXPERIMENTAL=y
17CONFIG_CLEAN_COMPILE=y
18CONFIG_BROKEN_ON_SMP=y
19
20#
21# General setup
22#
23CONFIG_LOCALVERSION=""
24CONFIG_SWAP=y
25CONFIG_SYSVIPC=y
26# CONFIG_POSIX_MQUEUE is not set
27# CONFIG_BSD_PROCESS_ACCT is not set
28# CONFIG_SYSCTL is not set
29# CONFIG_AUDIT is not set
30# CONFIG_HOTPLUG is not set
31CONFIG_KOBJECT_UEVENT=y
32# CONFIG_IKCONFIG is not set
33CONFIG_EMBEDDED=y
34CONFIG_KALLSYMS=y
35# CONFIG_KALLSYMS_EXTRA_PASS is not set
36CONFIG_BASE_FULL=y
37CONFIG_FUTEX=y
38CONFIG_EPOLL=y
39CONFIG_CC_OPTIMIZE_FOR_SIZE=y
40CONFIG_SHMEM=y
41CONFIG_CC_ALIGN_FUNCTIONS=0
42CONFIG_CC_ALIGN_LABELS=0
43CONFIG_CC_ALIGN_LOOPS=0
44CONFIG_CC_ALIGN_JUMPS=0
45# CONFIG_TINY_SHMEM is not set
46CONFIG_BASE_SMALL=0
47
48#
49# Loadable module support
50#
51# CONFIG_MODULES is not set
52
53#
54# System Type
55#
56CONFIG_ARCH_CLPS7500=y
57# CONFIG_ARCH_CLPS711X is not set
58# CONFIG_ARCH_CO285 is not set
59# CONFIG_ARCH_EBSA110 is not set
60# CONFIG_ARCH_FOOTBRIDGE is not set
61# CONFIG_ARCH_INTEGRATOR is not set
62# CONFIG_ARCH_IOP3XX is not set
63# CONFIG_ARCH_IXP4XX is not set
64# CONFIG_ARCH_IXP2000 is not set
65# CONFIG_ARCH_L7200 is not set
66# CONFIG_ARCH_PXA is not set
67# CONFIG_ARCH_RPC is not set
68# CONFIG_ARCH_SA1100 is not set
69# CONFIG_ARCH_S3C2410 is not set
70# CONFIG_ARCH_SHARK is not set
71# CONFIG_ARCH_LH7A40X is not set
72# CONFIG_ARCH_OMAP is not set
73# CONFIG_ARCH_VERSATILE is not set
74# CONFIG_ARCH_IMX is not set
75# CONFIG_ARCH_H720X is not set
76
77#
78# Processor Type
79#
80CONFIG_CPU_32=y
81CONFIG_CPU_ARM710=y
82CONFIG_CPU_32v3=y
83CONFIG_CPU_CACHE_V3=y
84CONFIG_CPU_CACHE_VIVT=y
85CONFIG_CPU_COPY_V3=y
86CONFIG_CPU_TLB_V3=y
87
88#
89# Processor Features
90#
91CONFIG_TIMER_ACORN=y
92
93#
94# Bus support
95#
96CONFIG_ISA=y
97
98#
99# PCCARD (PCMCIA/CardBus) support
100#
101# CONFIG_PCCARD is not set
102
103#
104# Kernel Features
105#
106# CONFIG_PREEMPT is not set
107CONFIG_ALIGNMENT_TRAP=y
108
109#
110# Boot options
111#
112CONFIG_ZBOOT_ROM_TEXT=0x0
113CONFIG_ZBOOT_ROM_BSS=0x0
114CONFIG_CMDLINE="mem=16M root=nfs"
115# CONFIG_XIP_KERNEL is not set
116
117#
118# Floating point emulation
119#
120
121#
122# At least one emulation must be selected
123#
124# CONFIG_FPE_NWFPE is not set
125
126#
127# Userspace binary formats
128#
129CONFIG_BINFMT_ELF=y
130# CONFIG_BINFMT_AOUT is not set
131# CONFIG_BINFMT_MISC is not set
132# CONFIG_ARTHUR is not set
133
134#
135# Power management options
136#
137# CONFIG_PM is not set
138
139#
140# Device Drivers
141#
142
143#
144# Generic Driver Options
145#
146CONFIG_STANDALONE=y
147CONFIG_PREVENT_FIRMWARE_BUILD=y
148# CONFIG_FW_LOADER is not set
149
150#
151# Memory Technology Devices (MTD)
152#
153CONFIG_MTD=y
154# CONFIG_MTD_DEBUG is not set
155# CONFIG_MTD_CONCAT is not set
156# CONFIG_MTD_PARTITIONS is not set
157
158#
159# User Modules And Translation Layers
160#
161# CONFIG_MTD_CHAR is not set
162# CONFIG_MTD_BLOCK is not set
163# CONFIG_MTD_BLOCK_RO is not set
164# CONFIG_FTL is not set
165# CONFIG_NFTL is not set
166# CONFIG_INFTL is not set
167
168#
169# RAM/ROM/Flash chip drivers
170#
171# CONFIG_MTD_CFI is not set
172# CONFIG_MTD_JEDECPROBE is not set
173CONFIG_MTD_MAP_BANK_WIDTH_1=y
174CONFIG_MTD_MAP_BANK_WIDTH_2=y
175CONFIG_MTD_MAP_BANK_WIDTH_4=y
176# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
177# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
178# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
179CONFIG_MTD_CFI_I1=y
180CONFIG_MTD_CFI_I2=y
181# CONFIG_MTD_CFI_I4 is not set
182# CONFIG_MTD_CFI_I8 is not set
183# CONFIG_MTD_RAM is not set
184# CONFIG_MTD_ROM is not set
185# CONFIG_MTD_ABSENT is not set
186
187#
188# Mapping drivers for chip access
189#
190# CONFIG_MTD_COMPLEX_MAPPINGS is not set
191
192#
193# Self-contained MTD device drivers
194#
195# CONFIG_MTD_SLRAM is not set
196# CONFIG_MTD_PHRAM is not set
197# CONFIG_MTD_MTDRAM is not set
198# CONFIG_MTD_BLKMTD is not set
199# CONFIG_MTD_BLOCK2MTD is not set
200
201#
202# Disk-On-Chip Device Drivers
203#
204# CONFIG_MTD_DOC2000 is not set
205# CONFIG_MTD_DOC2001 is not set
206# CONFIG_MTD_DOC2001PLUS is not set
207
208#
209# NAND Flash Device Drivers
210#
211# CONFIG_MTD_NAND is not set
212
213#
214# Parallel port support
215#
216CONFIG_PARPORT=y
217CONFIG_PARPORT_PC=y
218CONFIG_PARPORT_PC_FIFO=y
219# CONFIG_PARPORT_PC_SUPERIO is not set
220# CONFIG_PARPORT_ARC is not set
221# CONFIG_PARPORT_GSC is not set
222CONFIG_PARPORT_1284=y
223
224#
225# Plug and Play support
226#
227# CONFIG_PNP is not set
228
229#
230# Block devices
231#
232# CONFIG_BLK_DEV_FD is not set
233# CONFIG_BLK_DEV_XD is not set
234# CONFIG_PARIDE is not set
235# CONFIG_BLK_DEV_COW_COMMON is not set
236# CONFIG_BLK_DEV_LOOP is not set
237CONFIG_BLK_DEV_NBD=y
238CONFIG_BLK_DEV_RAM=y
239CONFIG_BLK_DEV_RAM_COUNT=16
240CONFIG_BLK_DEV_RAM_SIZE=4096
241# CONFIG_BLK_DEV_INITRD is not set
242CONFIG_INITRAMFS_SOURCE=""
243# CONFIG_CDROM_PKTCDVD is not set
244
245#
246# IO Schedulers
247#
248CONFIG_IOSCHED_NOOP=y
249CONFIG_IOSCHED_AS=y
250CONFIG_IOSCHED_DEADLINE=y
251CONFIG_IOSCHED_CFQ=y
252# CONFIG_ATA_OVER_ETH is not set
253
254#
255# ATA/ATAPI/MFM/RLL support
256#
257# CONFIG_IDE is not set
258
259#
260# SCSI device support
261#
262# CONFIG_SCSI is not set
263
264#
265# Multi-device support (RAID and LVM)
266#
267# CONFIG_MD is not set
268
269#
270# Fusion MPT device support
271#
272
273#
274# IEEE 1394 (FireWire) support
275#
276
277#
278# I2O device support
279#
280
281#
282# Networking support
283#
284CONFIG_NET=y
285
286#
287# Networking options
288#
289# CONFIG_PACKET is not set
290# CONFIG_NETLINK_DEV is not set
291CONFIG_UNIX=y
292# CONFIG_NET_KEY is not set
293CONFIG_INET=y
294# CONFIG_IP_MULTICAST is not set
295# CONFIG_IP_ADVANCED_ROUTER is not set
296CONFIG_IP_PNP=y
297# CONFIG_IP_PNP_DHCP is not set
298CONFIG_IP_PNP_BOOTP=y
299# CONFIG_IP_PNP_RARP is not set
300# CONFIG_NET_IPIP is not set
301# CONFIG_NET_IPGRE is not set
302# CONFIG_ARPD is not set
303# CONFIG_SYN_COOKIES is not set
304# CONFIG_INET_AH is not set
305# CONFIG_INET_ESP is not set
306# CONFIG_INET_IPCOMP is not set
307# CONFIG_INET_TUNNEL is not set
308CONFIG_IP_TCPDIAG=y
309# CONFIG_IP_TCPDIAG_IPV6 is not set
310# CONFIG_IPV6 is not set
311# CONFIG_NETFILTER is not set
312
313#
314# SCTP Configuration (EXPERIMENTAL)
315#
316# CONFIG_IP_SCTP is not set
317# CONFIG_ATM is not set
318# CONFIG_BRIDGE is not set
319# CONFIG_VLAN_8021Q is not set
320# CONFIG_DECNET is not set
321# CONFIG_LLC2 is not set
322# CONFIG_IPX is not set
323# CONFIG_ATALK is not set
324# CONFIG_X25 is not set
325# CONFIG_LAPB is not set
326# CONFIG_NET_DIVERT is not set
327# CONFIG_ECONET is not set
328# CONFIG_WAN_ROUTER is not set
329
330#
331# QoS and/or fair queueing
332#
333# CONFIG_NET_SCHED is not set
334# CONFIG_NET_CLS_ROUTE is not set
335
336#
337# Network testing
338#
339# CONFIG_NET_PKTGEN is not set
340# CONFIG_NETPOLL is not set
341# CONFIG_NET_POLL_CONTROLLER is not set
342# CONFIG_HAMRADIO is not set
343# CONFIG_IRDA is not set
344# CONFIG_BT is not set
345CONFIG_NETDEVICES=y
346CONFIG_DUMMY=y
347# CONFIG_BONDING is not set
348# CONFIG_EQUALIZER is not set
349# CONFIG_TUN is not set
350
351#
352# ARCnet devices
353#
354# CONFIG_ARCNET is not set
355
356#
357# Ethernet (10 or 100Mbit)
358#
359CONFIG_NET_ETHERNET=y
360# CONFIG_MII is not set
361# CONFIG_NET_VENDOR_3COM is not set
362# CONFIG_LANCE is not set
363# CONFIG_NET_VENDOR_SMC is not set
364# CONFIG_SMC91X is not set
365# CONFIG_NET_VENDOR_RACAL is not set
366# CONFIG_AT1700 is not set
367# CONFIG_DEPCA is not set
368# CONFIG_HP100 is not set
369# CONFIG_NET_ISA is not set
370CONFIG_NET_PCI=y
371# CONFIG_AC3200 is not set
372# CONFIG_APRICOT is not set
373CONFIG_CS89x0=y
374# CONFIG_NET_POCKET is not set
375
376#
377# Ethernet (1000 Mbit)
378#
379
380#
381# Ethernet (10000 Mbit)
382#
383
384#
385# Token Ring devices
386#
387# CONFIG_TR is not set
388
389#
390# Wireless LAN (non-hamradio)
391#
392# CONFIG_NET_RADIO is not set
393
394#
395# Wan interfaces
396#
397# CONFIG_WAN is not set
398# CONFIG_PLIP is not set
399CONFIG_PPP=y
400# CONFIG_PPP_MULTILINK is not set
401# CONFIG_PPP_FILTER is not set
402# CONFIG_PPP_ASYNC is not set
403# CONFIG_PPP_SYNC_TTY is not set
404# CONFIG_PPP_DEFLATE is not set
405# CONFIG_PPP_BSDCOMP is not set
406# CONFIG_PPPOE is not set
407CONFIG_SLIP=y
408CONFIG_SLIP_COMPRESSED=y
409# CONFIG_SLIP_SMART is not set
410# CONFIG_SLIP_MODE_SLIP6 is not set
411# CONFIG_SHAPER is not set
412# CONFIG_NETCONSOLE is not set
413
414#
415# ISDN subsystem
416#
417# CONFIG_ISDN is not set
418
419#
420# Input device support
421#
422CONFIG_INPUT=y
423
424#
425# Userland interfaces
426#
427CONFIG_INPUT_MOUSEDEV=y
428CONFIG_INPUT_MOUSEDEV_PSAUX=y
429CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
430CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
431# CONFIG_INPUT_JOYDEV is not set
432# CONFIG_INPUT_TSDEV is not set
433# CONFIG_INPUT_EVDEV is not set
434# CONFIG_INPUT_EVBUG is not set
435
436#
437# Input Device Drivers
438#
439CONFIG_INPUT_KEYBOARD=y
440CONFIG_KEYBOARD_ATKBD=y
441# CONFIG_KEYBOARD_SUNKBD is not set
442# CONFIG_KEYBOARD_LKKBD is not set
443# CONFIG_KEYBOARD_XTKBD is not set
444# CONFIG_KEYBOARD_NEWTON is not set
445CONFIG_INPUT_MOUSE=y
446CONFIG_MOUSE_PS2=y
447# CONFIG_MOUSE_SERIAL is not set
448# CONFIG_MOUSE_INPORT is not set
449# CONFIG_MOUSE_LOGIBM is not set
450# CONFIG_MOUSE_PC110PAD is not set
451# CONFIG_MOUSE_VSXXXAA is not set
452# CONFIG_INPUT_JOYSTICK is not set
453# CONFIG_INPUT_TOUCHSCREEN is not set
454# CONFIG_INPUT_MISC is not set
455
456#
457# Hardware I/O ports
458#
459CONFIG_SERIO=y
460# CONFIG_SERIO_SERPORT is not set
461# CONFIG_SERIO_PARKBD is not set
462CONFIG_SERIO_RPCKBD=y
463CONFIG_SERIO_LIBPS2=y
464# CONFIG_SERIO_RAW is not set
465# CONFIG_GAMEPORT is not set
466CONFIG_SOUND_GAMEPORT=y
467
468#
469# Character devices
470#
471CONFIG_VT=y
472CONFIG_VT_CONSOLE=y
473CONFIG_HW_CONSOLE=y
474# CONFIG_SERIAL_NONSTANDARD is not set
475
476#
477# Serial drivers
478#
479CONFIG_SERIAL_8250=y
480CONFIG_SERIAL_8250_CONSOLE=y
481CONFIG_SERIAL_8250_NR_UARTS=4
482# CONFIG_SERIAL_8250_EXTENDED is not set
483
484#
485# Non-8250 serial port support
486#
487CONFIG_SERIAL_CORE=y
488CONFIG_SERIAL_CORE_CONSOLE=y
489CONFIG_UNIX98_PTYS=y
490CONFIG_LEGACY_PTYS=y
491CONFIG_LEGACY_PTY_COUNT=256
492CONFIG_PRINTER=y
493# CONFIG_LP_CONSOLE is not set
494# CONFIG_PPDEV is not set
495# CONFIG_TIPAR is not set
496
497#
498# IPMI
499#
500# CONFIG_IPMI_HANDLER is not set
501
502#
503# Watchdog Cards
504#
505# CONFIG_WATCHDOG is not set
506# CONFIG_NVRAM is not set
507# CONFIG_RTC is not set
508# CONFIG_DTLK is not set
509# CONFIG_R3964 is not set
510
511#
512# Ftape, the floppy tape device driver
513#
514# CONFIG_DRM is not set
515# CONFIG_RAW_DRIVER is not set
516
517#
518# TPM devices
519#
520# CONFIG_TCG_TPM is not set
521
522#
523# I2C support
524#
525CONFIG_I2C=y
526# CONFIG_I2C_CHARDEV is not set
527
528#
529# I2C Algorithms
530#
531CONFIG_I2C_ALGOBIT=y
532# CONFIG_I2C_ALGOPCF is not set
533# CONFIG_I2C_ALGOPCA is not set
534
535#
536# I2C Hardware Bus support
537#
538# CONFIG_I2C_ELEKTOR is not set
539# CONFIG_I2C_PARPORT is not set
540# CONFIG_I2C_PARPORT_LIGHT is not set
541# CONFIG_I2C_PCA_ISA is not set
542
543#
544# Hardware Sensors Chip support
545#
546# CONFIG_I2C_SENSOR is not set
547# CONFIG_SENSORS_ADM1021 is not set
548# CONFIG_SENSORS_ADM1025 is not set
549# CONFIG_SENSORS_ADM1026 is not set
550# CONFIG_SENSORS_ADM1031 is not set
551# CONFIG_SENSORS_ASB100 is not set
552# CONFIG_SENSORS_DS1621 is not set
553# CONFIG_SENSORS_FSCHER is not set
554# CONFIG_SENSORS_FSCPOS is not set
555# CONFIG_SENSORS_GL518SM is not set
556# CONFIG_SENSORS_GL520SM is not set
557# CONFIG_SENSORS_IT87 is not set
558# CONFIG_SENSORS_LM63 is not set
559# CONFIG_SENSORS_LM75 is not set
560# CONFIG_SENSORS_LM77 is not set
561# CONFIG_SENSORS_LM78 is not set
562# CONFIG_SENSORS_LM80 is not set
563# CONFIG_SENSORS_LM83 is not set
564# CONFIG_SENSORS_LM85 is not set
565# CONFIG_SENSORS_LM87 is not set
566# CONFIG_SENSORS_LM90 is not set
567# CONFIG_SENSORS_MAX1619 is not set
568# CONFIG_SENSORS_PC87360 is not set
569# CONFIG_SENSORS_SMSC47B397 is not set
570# CONFIG_SENSORS_SMSC47M1 is not set
571# CONFIG_SENSORS_W83781D is not set
572# CONFIG_SENSORS_W83L785TS is not set
573# CONFIG_SENSORS_W83627HF is not set
574
575#
576# Other I2C Chip support
577#
578# CONFIG_SENSORS_EEPROM is not set
579# CONFIG_SENSORS_PCF8574 is not set
580# CONFIG_SENSORS_PCF8591 is not set
581# CONFIG_SENSORS_RTC8564 is not set
582# CONFIG_I2C_DEBUG_CORE is not set
583# CONFIG_I2C_DEBUG_ALGO is not set
584# CONFIG_I2C_DEBUG_BUS is not set
585# CONFIG_I2C_DEBUG_CHIP is not set
586
587#
588# Misc devices
589#
590
591#
592# Multimedia devices
593#
594# CONFIG_VIDEO_DEV is not set
595
596#
597# Digital Video Broadcasting Devices
598#
599# CONFIG_DVB is not set
600
601#
602# Graphics support
603#
604CONFIG_FB=y
605CONFIG_FB_CFB_FILLRECT=y
606CONFIG_FB_CFB_COPYAREA=y
607CONFIG_FB_CFB_IMAGEBLIT=y
608CONFIG_FB_SOFT_CURSOR=y
609# CONFIG_FB_MODE_HELPERS is not set
610# CONFIG_FB_TILEBLITTING is not set
611CONFIG_FB_ACORN=y
612# CONFIG_FB_VIRTUAL is not set
613
614#
615# Console display driver support
616#
617# CONFIG_VGA_CONSOLE is not set
618# CONFIG_MDA_CONSOLE is not set
619CONFIG_DUMMY_CONSOLE=y
620CONFIG_FRAMEBUFFER_CONSOLE=y
621CONFIG_FONTS=y
622CONFIG_FONT_8x8=y
623CONFIG_FONT_8x16=y
624# CONFIG_FONT_6x11 is not set
625# CONFIG_FONT_PEARL_8x8 is not set
626# CONFIG_FONT_ACORN_8x8 is not set
627# CONFIG_FONT_MINI_4x6 is not set
628# CONFIG_FONT_SUN8x16 is not set
629# CONFIG_FONT_SUN12x22 is not set
630
631#
632# Logo configuration
633#
634CONFIG_LOGO=y
635CONFIG_LOGO_LINUX_MONO=y
636CONFIG_LOGO_LINUX_VGA16=y
637CONFIG_LOGO_LINUX_CLUT224=y
638# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
639
640#
641# Sound
642#
643# CONFIG_SOUND is not set
644
645#
646# USB support
647#
648CONFIG_USB_ARCH_HAS_HCD=y
649# CONFIG_USB_ARCH_HAS_OHCI is not set
650# CONFIG_USB is not set
651
652#
653# USB Gadget Support
654#
655# CONFIG_USB_GADGET is not set
656
657#
658# MMC/SD Card support
659#
660# CONFIG_MMC is not set
661
662#
663# File systems
664#
665CONFIG_EXT2_FS=y
666# CONFIG_EXT2_FS_XATTR is not set
667# CONFIG_EXT3_FS is not set
668# CONFIG_JBD is not set
669# CONFIG_REISERFS_FS is not set
670# CONFIG_JFS_FS is not set
671
672#
673# XFS support
674#
675# CONFIG_XFS_FS is not set
676CONFIG_MINIX_FS=y
677# CONFIG_ROMFS_FS is not set
678# CONFIG_QUOTA is not set
679CONFIG_DNOTIFY=y
680# CONFIG_AUTOFS_FS is not set
681# CONFIG_AUTOFS4_FS is not set
682
683#
684# CD-ROM/DVD Filesystems
685#
686# CONFIG_ISO9660_FS is not set
687# CONFIG_UDF_FS is not set
688
689#
690# DOS/FAT/NT Filesystems
691#
692# CONFIG_MSDOS_FS is not set
693# CONFIG_VFAT_FS is not set
694# CONFIG_NTFS_FS is not set
695
696#
697# Pseudo filesystems
698#
699CONFIG_PROC_FS=y
700CONFIG_SYSFS=y
701# CONFIG_DEVFS_FS is not set
702# CONFIG_DEVPTS_FS_XATTR is not set
703# CONFIG_TMPFS is not set
704# CONFIG_HUGETLB_PAGE is not set
705CONFIG_RAMFS=y
706
707#
708# Miscellaneous filesystems
709#
710# CONFIG_ADFS_FS is not set
711# CONFIG_AFFS_FS is not set
712# CONFIG_HFS_FS is not set
713# CONFIG_HFSPLUS_FS is not set
714# CONFIG_BEFS_FS is not set
715# CONFIG_BFS_FS is not set
716# CONFIG_EFS_FS is not set
717# CONFIG_JFFS_FS is not set
718# CONFIG_JFFS2_FS is not set
719# CONFIG_CRAMFS is not set
720# CONFIG_VXFS_FS is not set
721# CONFIG_HPFS_FS is not set
722# CONFIG_QNX4FS_FS is not set
723# CONFIG_SYSV_FS is not set
724# CONFIG_UFS_FS is not set
725
726#
727# Network File Systems
728#
729CONFIG_NFS_FS=y
730# CONFIG_NFS_V3 is not set
731# CONFIG_NFS_V4 is not set
732# CONFIG_NFS_DIRECTIO is not set
733# CONFIG_NFSD is not set
734CONFIG_ROOT_NFS=y
735CONFIG_LOCKD=y
736CONFIG_SUNRPC=y
737# CONFIG_RPCSEC_GSS_KRB5 is not set
738# CONFIG_RPCSEC_GSS_SPKM3 is not set
739# CONFIG_SMB_FS is not set
740# CONFIG_CIFS is not set
741# CONFIG_NCP_FS is not set
742# CONFIG_CODA_FS is not set
743# CONFIG_AFS_FS is not set
744
745#
746# Partition Types
747#
748CONFIG_PARTITION_ADVANCED=y
749# CONFIG_ACORN_PARTITION is not set
750# CONFIG_OSF_PARTITION is not set
751# CONFIG_AMIGA_PARTITION is not set
752# CONFIG_ATARI_PARTITION is not set
753# CONFIG_MAC_PARTITION is not set
754# CONFIG_MSDOS_PARTITION is not set
755# CONFIG_LDM_PARTITION is not set
756# CONFIG_SGI_PARTITION is not set
757# CONFIG_ULTRIX_PARTITION is not set
758# CONFIG_SUN_PARTITION is not set
759# CONFIG_EFI_PARTITION is not set
760
761#
762# Native Language Support
763#
764# CONFIG_NLS is not set
765
766#
767# Profiling support
768#
769# CONFIG_PROFILING is not set
770
771#
772# Kernel hacking
773#
774# CONFIG_PRINTK_TIME is not set
775# CONFIG_DEBUG_KERNEL is not set
776CONFIG_LOG_BUF_SHIFT=14
777# CONFIG_DEBUG_BUGVERBOSE is not set
778CONFIG_FRAME_POINTER=y
779# CONFIG_DEBUG_USER is not set
780
781#
782# Security options
783#
784# CONFIG_KEYS is not set
785# CONFIG_SECURITY is not set
786
787#
788# Cryptographic options
789#
790# CONFIG_CRYPTO is not set
791
792#
793# Hardware crypto devices
794#
795
796#
797# Library routines
798#
799# CONFIG_CRC_CCITT is not set
800CONFIG_CRC32=y
801# CONFIG_LIBCRC32C is not set
diff --git a/arch/arm/kernel/isa.c b/arch/arm/kernel/isa.c
index 50a30bc91872..8ac9b8424007 100644
--- a/arch/arm/kernel/isa.c
+++ b/arch/arm/kernel/isa.c
@@ -16,6 +16,7 @@
16#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/sysctl.h> 17#include <linux/sysctl.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/io.h>
19 20
20static unsigned int isa_membase, isa_portbase, isa_portshift; 21static unsigned int isa_membase, isa_portbase, isa_portshift;
21 22
diff --git a/arch/arm/mach-at91/at91cap9.c b/arch/arm/mach-at91/at91cap9.c
index 0a38c69fdbc4..73376170fb91 100644
--- a/arch/arm/mach-at91/at91cap9.c
+++ b/arch/arm/mach-at91/at91cap9.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/pm.h> 16#include <linux/pm.h>
17 17
18#include <asm/irq.h>
18#include <asm/mach/arch.h> 19#include <asm/mach/arch.h>
19#include <asm/mach/map.h> 20#include <asm/mach/map.h>
20 21
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
index 28594fcc88e3..2e9ecad97f3d 100644
--- a/arch/arm/mach-at91/at91rm9200.c
+++ b/arch/arm/mach-at91/at91rm9200.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14 14
15#include <asm/irq.h>
15#include <asm/mach/arch.h> 16#include <asm/mach/arch.h>
16#include <asm/mach/map.h> 17#include <asm/mach/map.h>
17#include <mach/at91rm9200.h> 18#include <mach/at91rm9200.h>
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index accb69ec478e..0894f1077be7 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/pm.h> 14#include <linux/pm.h>
15 15
16#include <asm/irq.h>
16#include <asm/mach/arch.h> 17#include <asm/mach/arch.h>
17#include <asm/mach/map.h> 18#include <asm/mach/map.h>
18#include <mach/cpu.h> 19#include <mach/cpu.h>
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index 7b51a59ae8b3..3acd7d7e6a42 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/pm.h> 14#include <linux/pm.h>
15 15
16#include <asm/irq.h>
16#include <asm/mach/arch.h> 17#include <asm/mach/arch.h>
17#include <asm/mach/map.h> 18#include <asm/mach/map.h>
18#include <mach/at91sam9261.h> 19#include <mach/at91sam9261.h>
diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c
index ada4b6769107..942792d630d8 100644
--- a/arch/arm/mach-at91/at91sam9263.c
+++ b/arch/arm/mach-at91/at91sam9263.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/pm.h> 14#include <linux/pm.h>
15 15
16#include <asm/irq.h>
16#include <asm/mach/arch.h> 17#include <asm/mach/arch.h>
17#include <asm/mach/map.h> 18#include <asm/mach/map.h>
18#include <mach/at91sam9263.h> 19#include <mach/at91sam9263.h>
diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c
index 252e954b49fd..211c5c14a1e6 100644
--- a/arch/arm/mach-at91/at91sam9rl.c
+++ b/arch/arm/mach-at91/at91sam9rl.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/pm.h> 13#include <linux/pm.h>
14 14
15#include <asm/irq.h>
15#include <asm/mach/arch.h> 16#include <asm/mach/arch.h>
16#include <asm/mach/map.h> 17#include <asm/mach/map.h>
17#include <mach/cpu.h> 18#include <mach/cpu.h>
diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c
index 9b937ee4815a..35e12a49d1a6 100644
--- a/arch/arm/mach-at91/board-sam9rlek.c
+++ b/arch/arm/mach-at91/board-sam9rlek.c
@@ -29,6 +29,7 @@
29#include <mach/hardware.h> 29#include <mach/hardware.h>
30#include <mach/board.h> 30#include <mach/board.h>
31#include <mach/gpio.h> 31#include <mach/gpio.h>
32#include <mach/at91sam9_smc.h>
32#include <mach/at91_shdwc.h> 33#include <mach/at91_shdwc.h>
33 34
34#include "sam9_smc.h" 35#include "sam9_smc.h"
diff --git a/arch/arm/mach-clps711x/edb7211-mm.c b/arch/arm/mach-clps711x/edb7211-mm.c
index c58e32ec4c5d..0bea1454ae03 100644
--- a/arch/arm/mach-clps711x/edb7211-mm.c
+++ b/arch/arm/mach-clps711x/edb7211-mm.c
@@ -24,7 +24,6 @@
24 24
25#include <mach/hardware.h> 25#include <mach/hardware.h>
26#include <asm/page.h> 26#include <asm/page.h>
27#include <asm/pgtable.h>
28#include <asm/sizes.h> 27#include <asm/sizes.h>
29 28
30#include <asm/mach/map.h> 29#include <asm/mach/map.h>
diff --git a/arch/arm/mach-clps711x/fortunet.c b/arch/arm/mach-clps711x/fortunet.c
index 7122b3d21043..7430e4049d87 100644
--- a/arch/arm/mach-clps711x/fortunet.c
+++ b/arch/arm/mach-clps711x/fortunet.c
@@ -24,7 +24,6 @@
24#include <linux/initrd.h> 24#include <linux/initrd.h>
25 25
26#include <mach/hardware.h> 26#include <mach/hardware.h>
27#include <asm/irq.h>
28#include <asm/setup.h> 27#include <asm/setup.h>
29#include <asm/mach-types.h> 28#include <asm/mach-types.h>
30 29
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index 3d4b1de8f898..808633f9f03c 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -20,6 +20,7 @@
20 20
21#include <mach/hardware.h> 21#include <mach/hardware.h>
22#include <mach/i2c.h> 22#include <mach/i2c.h>
23#include <mach/irqs.h>
23 24
24static struct resource i2c_resources[] = { 25static struct resource i2c_resources[] = {
25 { 26 {
diff --git a/arch/arm/mach-davinci/include/mach/gpio.h b/arch/arm/mach-davinci/include/mach/gpio.h
index b3a2961f0f46..b456f079f43f 100644
--- a/arch/arm/mach-davinci/include/mach/gpio.h
+++ b/arch/arm/mach-davinci/include/mach/gpio.h
@@ -16,6 +16,7 @@
16#include <linux/io.h> 16#include <linux/io.h>
17#include <asm-generic/gpio.h> 17#include <asm-generic/gpio.h>
18#include <mach/hardware.h> 18#include <mach/hardware.h>
19#include <mach/irqs.h>
19 20
20/* 21/*
21 * basic gpio routines 22 * basic gpio routines
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
index 36ff06d4df15..b97f529e58e8 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -28,12 +28,17 @@
28 28
29#include "common.h" 29#include "common.h"
30 30
31extern void __init isa_init_irq(unsigned int irq);
32
33unsigned int mem_fclk_21285 = 50000000; 31unsigned int mem_fclk_21285 = 50000000;
34 32
35EXPORT_SYMBOL(mem_fclk_21285); 33EXPORT_SYMBOL(mem_fclk_21285);
36 34
35static void __init early_fclk(char **arg)
36{
37 mem_fclk_21285 = simple_strtoul(*arg, arg, 0);
38}
39
40__early_param("mem_fclk_21285=", early_fclk);
41
37static int __init parse_tag_memclk(const struct tag *tag) 42static int __init parse_tag_memclk(const struct tag *tag)
38{ 43{
39 mem_fclk_21285 = tag->u.memclk.fmemclk; 44 mem_fclk_21285 = tag->u.memclk.fmemclk;
diff --git a/arch/arm/mach-footbridge/common.h b/arch/arm/mach-footbridge/common.h
index 580e31bbc711..b05e662d21ad 100644
--- a/arch/arm/mach-footbridge/common.h
+++ b/arch/arm/mach-footbridge/common.h
@@ -7,3 +7,4 @@ extern void isa_rtc_init(void);
7extern void footbridge_map_io(void); 7extern void footbridge_map_io(void);
8extern void footbridge_init_irq(void); 8extern void footbridge_init_irq(void);
9 9
10extern void isa_init_irq(unsigned int irq);
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index 133086019e3e..3ffa54841ec5 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -287,6 +287,9 @@ struct pci_bus * __init dc21285_scan_bus(int nr, struct pci_sys_data *sys)
287 return pci_scan_bus(0, &dc21285_ops, sys); 287 return pci_scan_bus(0, &dc21285_ops, sys);
288} 288}
289 289
290#define dc21285_request_irq(_a, _b, _c, _d, _e) \
291 WARN_ON(request_irq(_a, _b, _c, _d, _e) < 0)
292
290void __init dc21285_preinit(void) 293void __init dc21285_preinit(void)
291{ 294{
292 unsigned int mem_size, mem_mask; 295 unsigned int mem_size, mem_mask;
@@ -335,16 +338,16 @@ void __init dc21285_preinit(void)
335 /* 338 /*
336 * We don't care if these fail. 339 * We don't care if these fail.
337 */ 340 */
338 request_irq(IRQ_PCI_SERR, dc21285_serr_irq, IRQF_DISABLED, 341 dc21285_request_irq(IRQ_PCI_SERR, dc21285_serr_irq, IRQF_DISABLED,
339 "PCI system error", &serr_timer); 342 "PCI system error", &serr_timer);
340 request_irq(IRQ_PCI_PERR, dc21285_parity_irq, IRQF_DISABLED, 343 dc21285_request_irq(IRQ_PCI_PERR, dc21285_parity_irq, IRQF_DISABLED,
341 "PCI parity error", &perr_timer); 344 "PCI parity error", &perr_timer);
342 request_irq(IRQ_PCI_ABORT, dc21285_abort_irq, IRQF_DISABLED, 345 dc21285_request_irq(IRQ_PCI_ABORT, dc21285_abort_irq, IRQF_DISABLED,
343 "PCI abort", NULL); 346 "PCI abort", NULL);
344 request_irq(IRQ_DISCARD_TIMER, dc21285_discard_irq, IRQF_DISABLED, 347 dc21285_request_irq(IRQ_DISCARD_TIMER, dc21285_discard_irq, IRQF_DISABLED,
345 "Discard timer", NULL); 348 "Discard timer", NULL);
346 request_irq(IRQ_PCI_DPERR, dc21285_dparity_irq, IRQF_DISABLED, 349 dc21285_request_irq(IRQ_PCI_DPERR, dc21285_dparity_irq, IRQF_DISABLED,
347 "PCI data parity", NULL); 350 "PCI data parity", NULL);
348 351
349 if (cfn_mode) { 352 if (cfn_mode) {
350 static struct resource csrio; 353 static struct resource csrio;
diff --git a/arch/arm/mach-footbridge/isa-irq.c b/arch/arm/mach-footbridge/isa-irq.c
index 9ee80a211d3c..8bfd06aeb64d 100644
--- a/arch/arm/mach-footbridge/isa-irq.c
+++ b/arch/arm/mach-footbridge/isa-irq.c
@@ -28,6 +28,8 @@
28#include <asm/irq.h> 28#include <asm/irq.h>
29#include <asm/mach-types.h> 29#include <asm/mach-types.h>
30 30
31#include "common.h"
32
31static void isa_mask_pic_lo_irq(unsigned int irq) 33static void isa_mask_pic_lo_irq(unsigned int irq)
32{ 34{
33 unsigned int mask = 1 << (irq & 7); 35 unsigned int mask = 1 << (irq & 7);
diff --git a/arch/arm/mach-h720x/h7202-eval.c b/arch/arm/mach-h720x/h7202-eval.c
index 56161d55cf47..8c0ba99d683f 100644
--- a/arch/arm/mach-h720x/h7202-eval.c
+++ b/arch/arm/mach-h720x/h7202-eval.c
@@ -25,6 +25,7 @@
25#include <asm/page.h> 25#include <asm/page.h>
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
27#include <asm/mach/arch.h> 27#include <asm/mach/arch.h>
28#include <mach/irqs.h>
28#include <mach/hardware.h> 29#include <mach/hardware.h>
29#include "common.h" 30#include "common.h"
30 31
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 7b8ef97fb501..b3404b7775b3 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -698,6 +698,7 @@ void __init kirkwood_init(void)
698 printk(KERN_INFO "Kirkwood: %s, TCLK=%d.\n", 698 printk(KERN_INFO "Kirkwood: %s, TCLK=%d.\n",
699 kirkwood_id(), kirkwood_tclk); 699 kirkwood_id(), kirkwood_tclk);
700 kirkwood_ge00_shared_data.t_clk = kirkwood_tclk; 700 kirkwood_ge00_shared_data.t_clk = kirkwood_tclk;
701 kirkwood_ge01_shared_data.t_clk = kirkwood_tclk;
701 kirkwood_spi_plat_data.tclk = kirkwood_tclk; 702 kirkwood_spi_plat_data.tclk = kirkwood_tclk;
702 kirkwood_uart0_data[0].uartclk = kirkwood_tclk; 703 kirkwood_uart0_data[0].uartclk = kirkwood_tclk;
703 kirkwood_uart1_data[0].uartclk = kirkwood_tclk; 704 kirkwood_uart1_data[0].uartclk = kirkwood_tclk;
diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
index f6b08f207c89..73fccacd1a73 100644
--- a/arch/arm/mach-kirkwood/pcie.c
+++ b/arch/arm/mach-kirkwood/pcie.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <linux/mbus.h> 13#include <linux/mbus.h>
14#include <asm/irq.h>
14#include <asm/mach/pci.h> 15#include <asm/mach/pci.h>
15#include <plat/pcie.h> 16#include <plat/pcie.h>
16#include "common.h" 17#include "common.h"
diff --git a/arch/arm/mach-ks8695/devices.c b/arch/arm/mach-ks8695/devices.c
index 36ab0fd3d9b6..b89fb6d46ccc 100644
--- a/arch/arm/mach-ks8695/devices.c
+++ b/arch/arm/mach-ks8695/devices.c
@@ -22,6 +22,7 @@
22 22
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24 24
25#include <mach/irqs.h>
25#include <mach/regs-wan.h> 26#include <mach/regs-wan.h>
26#include <mach/regs-lan.h> 27#include <mach/regs-lan.h>
27#include <mach/regs-hpna.h> 28#include <mach/regs-hpna.h>
diff --git a/arch/arm/mach-msm/devices.c b/arch/arm/mach-msm/devices.c
index f2a74b92a97f..31b6b30e98bf 100644
--- a/arch/arm/mach-msm/devices.c
+++ b/arch/arm/mach-msm/devices.c
@@ -16,6 +16,7 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18 18
19#include <mach/irqs.h>
19#include <mach/msm_iomap.h> 20#include <mach/msm_iomap.h>
20#include "devices.h" 21#include "devices.h"
21 22
diff --git a/arch/arm/mach-mv78xx0/pcie.c b/arch/arm/mach-mv78xx0/pcie.c
index 430ea84d587d..aad3a7a2f830 100644
--- a/arch/arm/mach-mv78xx0/pcie.c
+++ b/arch/arm/mach-mv78xx0/pcie.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <linux/mbus.h> 13#include <linux/mbus.h>
14#include <asm/irq.h>
14#include <asm/mach/pci.h> 15#include <asm/mach/pci.h>
15#include <plat/pcie.h> 16#include <plat/pcie.h>
16#include "common.h" 17#include "common.h"
diff --git a/arch/arm/mach-mx2/devices.c b/arch/arm/mach-mx2/devices.c
index af121f5ab710..2f9240be1c76 100644
--- a/arch/arm/mach-mx2/devices.c
+++ b/arch/arm/mach-mx2/devices.c
@@ -32,6 +32,7 @@
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/gpio.h> 33#include <linux/gpio.h>
34 34
35#include <mach/irqs.h>
35#include <mach/hardware.h> 36#include <mach/hardware.h>
36 37
37/* 38/*
diff --git a/arch/arm/mach-mx3/devices.c b/arch/arm/mach-mx3/devices.c
index 1d46cb4adf96..f8428800f286 100644
--- a/arch/arm/mach-mx3/devices.c
+++ b/arch/arm/mach-mx3/devices.c
@@ -22,6 +22,7 @@
22#include <linux/serial.h> 22#include <linux/serial.h>
23#include <linux/gpio.h> 23#include <linux/gpio.h>
24#include <mach/hardware.h> 24#include <mach/hardware.h>
25#include <mach/irqs.h>
25#include <mach/imx-uart.h> 26#include <mach/imx-uart.h>
26 27
27static struct resource uart0[] = { 28static struct resource uart0[] = {
diff --git a/arch/arm/mach-netx/fb.c b/arch/arm/mach-netx/fb.c
index ea8fa8898fe8..1d844e228ea9 100644
--- a/arch/arm/mach-netx/fb.c
+++ b/arch/arm/mach-netx/fb.c
@@ -24,6 +24,8 @@
24#include <linux/amba/clcd.h> 24#include <linux/amba/clcd.h>
25#include <linux/err.h> 25#include <linux/err.h>
26 26
27#include <asm/irq.h>
28
27#include <mach/netx-regs.h> 29#include <mach/netx-regs.h>
28#include <mach/hardware.h> 30#include <mach/hardware.h>
29 31
diff --git a/arch/arm/mach-netx/time.c b/arch/arm/mach-netx/time.c
index d51d627ce7cf..f201fddb594f 100644
--- a/arch/arm/mach-netx/time.c
+++ b/arch/arm/mach-netx/time.c
@@ -163,7 +163,7 @@ static void __init netx_timer_init(void)
163 * Adding some safety ... */ 163 * Adding some safety ... */
164 netx_clockevent.min_delta_ns = 164 netx_clockevent.min_delta_ns =
165 clockevent_delta2ns(0xa00, &netx_clockevent); 165 clockevent_delta2ns(0xa00, &netx_clockevent);
166 netx_clockevent.cpumask = cpumask_of_cpu(0); 166 netx_clockevent.cpumask = cpumask_of(0);
167 clockevents_register_device(&netx_clockevent); 167 clockevents_register_device(&netx_clockevent);
168} 168}
169 169
diff --git a/arch/arm/mach-netx/xc.c b/arch/arm/mach-netx/xc.c
index 8fc6205dc3a5..181a78ba8165 100644
--- a/arch/arm/mach-netx/xc.c
+++ b/arch/arm/mach-netx/xc.c
@@ -24,6 +24,7 @@
24#include <linux/io.h> 24#include <linux/io.h>
25 25
26#include <mach/hardware.h> 26#include <mach/hardware.h>
27#include <mach/irqs.h>
27#include <mach/netx-regs.h> 28#include <mach/netx-regs.h>
28 29
29#include <mach/xc.h> 30#include <mach/xc.h>
diff --git a/arch/arm/mach-omap1/mcbsp.c b/arch/arm/mach-omap1/mcbsp.c
index 7de7c6915584..4474da7bc88a 100644
--- a/arch/arm/mach-omap1/mcbsp.c
+++ b/arch/arm/mach-omap1/mcbsp.c
@@ -18,6 +18,7 @@
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19 19
20#include <mach/dma.h> 20#include <mach/dma.h>
21#include <mach/irqs.h>
21#include <mach/mux.h> 22#include <mach/mux.h>
22#include <mach/cpu.h> 23#include <mach/cpu.h>
23#include <mach/mcbsp.h> 24#include <mach/mcbsp.h>
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index cae3ebe249b3..acdc709901cd 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -18,6 +18,7 @@
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19 19
20#include <mach/dma.h> 20#include <mach/dma.h>
21#include <mach/irqs.h>
21#include <mach/mux.h> 22#include <mach/mux.h>
22#include <mach/cpu.h> 23#include <mach/cpu.h>
23#include <mach/mcbsp.h> 24#include <mach/mcbsp.h>
diff --git a/arch/arm/mach-orion5x/pci.c b/arch/arm/mach-orion5x/pci.c
index a7b7d77b1b09..d0a785a3b880 100644
--- a/arch/arm/mach-orion5x/pci.c
+++ b/arch/arm/mach-orion5x/pci.c
@@ -13,6 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/mbus.h> 15#include <linux/mbus.h>
16#include <asm/irq.h>
16#include <asm/mach/pci.h> 17#include <asm/mach/pci.h>
17#include <plat/pcie.h> 18#include <plat/pcie.h>
18#include "common.h" 19#include "common.h"
diff --git a/arch/arm/mach-pnx4008/gpio.c b/arch/arm/mach-pnx4008/gpio.c
index 015cc21d5f55..f219914f5b29 100644
--- a/arch/arm/mach-pnx4008/gpio.c
+++ b/arch/arm/mach-pnx4008/gpio.c
@@ -18,6 +18,7 @@
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <mach/hardware.h>
21#include <mach/platform.h> 22#include <mach/platform.h>
22#include <mach/gpio.h> 23#include <mach/gpio.h>
23 24
diff --git a/arch/arm/mach-pnx4008/i2c.c b/arch/arm/mach-pnx4008/i2c.c
index 87c093286ff9..f3fea29c00d3 100644
--- a/arch/arm/mach-pnx4008/i2c.c
+++ b/arch/arm/mach-pnx4008/i2c.c
@@ -15,6 +15,7 @@
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <mach/platform.h> 17#include <mach/platform.h>
18#include <mach/irqs.h>
18#include <mach/i2c.h> 19#include <mach/i2c.h>
19 20
20static int set_clock_run(struct platform_device *pdev) 21static int set_clock_run(struct platform_device *pdev)
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index c5e28a46b292..a8d91b6c136b 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -27,6 +27,7 @@
27#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
28#include <linux/spi/ads7846.h> 28#include <linux/spi/ads7846.h>
29#include <linux/spi/corgi_lcd.h> 29#include <linux/spi/corgi_lcd.h>
30#include <linux/mtd/sharpsl.h>
30#include <video/w100fb.h> 31#include <video/w100fb.h>
31 32
32#include <asm/setup.h> 33#include <asm/setup.h>
@@ -542,6 +543,55 @@ err_free_1:
542static inline void corgi_init_spi(void) {} 543static inline void corgi_init_spi(void) {}
543#endif 544#endif
544 545
546static struct mtd_partition sharpsl_nand_partitions[] = {
547 {
548 .name = "System Area",
549 .offset = 0,
550 .size = 7 * 1024 * 1024,
551 },
552 {
553 .name = "Root Filesystem",
554 .offset = 7 * 1024 * 1024,
555 .size = 25 * 1024 * 1024,
556 },
557 {
558 .name = "Home Filesystem",
559 .offset = MTDPART_OFS_APPEND,
560 .size = MTDPART_SIZ_FULL,
561 },
562};
563
564static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
565
566static struct nand_bbt_descr sharpsl_bbt = {
567 .options = 0,
568 .offs = 4,
569 .len = 2,
570 .pattern = scan_ff_pattern
571};
572
573static struct sharpsl_nand_platform_data sharpsl_nand_platform_data = {
574 .badblock_pattern = &sharpsl_bbt,
575 .partitions = sharpsl_nand_partitions,
576 .nr_partitions = ARRAY_SIZE(sharpsl_nand_partitions),
577};
578
579static struct resource sharpsl_nand_resources[] = {
580 {
581 .start = 0x0C000000,
582 .end = 0x0C000FFF,
583 .flags = IORESOURCE_MEM,
584 },
585};
586
587static struct platform_device sharpsl_nand_device = {
588 .name = "sharpsl-nand",
589 .id = -1,
590 .resource = sharpsl_nand_resources,
591 .num_resources = ARRAY_SIZE(sharpsl_nand_resources),
592 .dev.platform_data = &sharpsl_nand_platform_data,
593};
594
545static struct mtd_partition sharpsl_rom_parts[] = { 595static struct mtd_partition sharpsl_rom_parts[] = {
546 { 596 {
547 .name ="Boot PROM Filesystem", 597 .name ="Boot PROM Filesystem",
@@ -577,6 +627,7 @@ static struct platform_device *devices[] __initdata = {
577 &corgifb_device, 627 &corgifb_device,
578 &corgikbd_device, 628 &corgikbd_device,
579 &corgiled_device, 629 &corgiled_device,
630 &sharpsl_nand_device,
580 &sharpsl_rom_device, 631 &sharpsl_rom_device,
581}; 632};
582 633
@@ -617,6 +668,9 @@ static void __init corgi_init(void)
617 668
618 platform_scoop_config = &corgi_pcmcia_config; 669 platform_scoop_config = &corgi_pcmcia_config;
619 670
671 if (machine_is_husky())
672 sharpsl_nand_partitions[1].size = 53 * 1024 * 1024;
673
620 platform_add_devices(devices, ARRAY_SIZE(devices)); 674 platform_add_devices(devices, ARRAY_SIZE(devices));
621} 675}
622 676
diff --git a/arch/arm/mach-pxa/e350.c b/arch/arm/mach-pxa/e350.c
index 251129391d7d..edcd9d5ce545 100644
--- a/arch/arm/mach-pxa/e350.c
+++ b/arch/arm/mach-pxa/e350.c
@@ -20,6 +20,7 @@
20#include <asm/mach/arch.h> 20#include <asm/mach/arch.h>
21#include <asm/mach-types.h> 21#include <asm/mach-types.h>
22 22
23#include <mach/irqs.h>
23#include <mach/mfp-pxa25x.h> 24#include <mach/mfp-pxa25x.h>
24#include <mach/pxa-regs.h> 25#include <mach/pxa-regs.h>
25#include <mach/hardware.h> 26#include <mach/hardware.h>
diff --git a/arch/arm/mach-pxa/e400.c b/arch/arm/mach-pxa/e400.c
index bed0336aca3d..77bb8e2c48c0 100644
--- a/arch/arm/mach-pxa/e400.c
+++ b/arch/arm/mach-pxa/e400.c
@@ -28,6 +28,7 @@
28#include <mach/eseries-gpio.h> 28#include <mach/eseries-gpio.h>
29#include <mach/pxafb.h> 29#include <mach/pxafb.h>
30#include <mach/udc.h> 30#include <mach/udc.h>
31#include <mach/irqs.h>
31 32
32#include "generic.h" 33#include "generic.h"
33#include "eseries.h" 34#include "eseries.h"
diff --git a/arch/arm/mach-pxa/e740.c b/arch/arm/mach-pxa/e740.c
index b00d670b2ea6..6d48e00f4f0b 100644
--- a/arch/arm/mach-pxa/e740.c
+++ b/arch/arm/mach-pxa/e740.c
@@ -30,6 +30,7 @@
30#include <mach/eseries-gpio.h> 30#include <mach/eseries-gpio.h>
31#include <mach/udc.h> 31#include <mach/udc.h>
32#include <mach/irda.h> 32#include <mach/irda.h>
33#include <mach/irqs.h>
33 34
34#include "generic.h" 35#include "generic.h"
35#include "eseries.h" 36#include "eseries.h"
diff --git a/arch/arm/mach-pxa/e750.c b/arch/arm/mach-pxa/e750.c
index 84d7c1aac58d..be1ab8edb973 100644
--- a/arch/arm/mach-pxa/e750.c
+++ b/arch/arm/mach-pxa/e750.c
@@ -29,6 +29,7 @@
29#include <mach/eseries-gpio.h> 29#include <mach/eseries-gpio.h>
30#include <mach/udc.h> 30#include <mach/udc.h>
31#include <mach/irda.h> 31#include <mach/irda.h>
32#include <mach/irqs.h>
32 33
33#include "generic.h" 34#include "generic.h"
34#include "eseries.h" 35#include "eseries.h"
@@ -105,6 +106,57 @@ static struct platform_device e750_fb_device = {
105 .resource = e750_fb_resources, 106 .resource = e750_fb_resources,
106}; 107};
107 108
109/* -------------------- e750 MFP parameters -------------------- */
110
111static unsigned long e750_pin_config[] __initdata = {
112 /* Chip selects */
113 GPIO15_nCS_1, /* CS1 - Flash */
114 GPIO79_nCS_3, /* CS3 - IMAGEON */
115 GPIO80_nCS_4, /* CS4 - TMIO */
116
117 /* Clocks */
118 GPIO11_3_6MHz,
119
120 /* BTUART */
121 GPIO42_BTUART_RXD,
122 GPIO43_BTUART_TXD,
123 GPIO44_BTUART_CTS,
124
125 /* TMIO controller */
126 GPIO19_GPIO, /* t7l66xb #PCLR */
127 GPIO45_GPIO, /* t7l66xb #SUSPEND (NOT BTUART!) */
128
129 /* UDC */
130 GPIO13_GPIO,
131 GPIO3_GPIO,
132
133 /* IrDA */
134 GPIO38_GPIO | MFP_LPM_DRIVE_HIGH,
135
136 /* PC Card */
137 GPIO8_GPIO, /* CD0 */
138 GPIO44_GPIO, /* CD1 */
139 GPIO11_GPIO, /* IRQ0 */
140 GPIO6_GPIO, /* IRQ1 */
141 GPIO27_GPIO, /* RST0 */
142 GPIO24_GPIO, /* RST1 */
143 GPIO20_GPIO, /* PWR0 */
144 GPIO23_GPIO, /* PWR1 */
145 GPIO48_nPOE,
146 GPIO49_nPWE,
147 GPIO50_nPIOR,
148 GPIO51_nPIOW,
149 GPIO52_nPCE_1,
150 GPIO53_nPCE_2,
151 GPIO54_nPSKTSEL,
152 GPIO55_nPREG,
153 GPIO56_nPWAIT,
154 GPIO57_nIOIS16,
155
156 /* wakeup */
157 GPIO0_GPIO | WAKEUP_ON_EDGE_RISE,
158};
159
108/* ----------------- e750 tc6393xb parameters ------------------ */ 160/* ----------------- e750 tc6393xb parameters ------------------ */
109 161
110static struct tc6393xb_platform_data e750_tc6393xb_info = { 162static struct tc6393xb_platform_data e750_tc6393xb_info = {
@@ -137,6 +189,7 @@ static struct platform_device *devices[] __initdata = {
137 189
138static void __init e750_init(void) 190static void __init e750_init(void)
139{ 191{
192 pxa2xx_mfp_config(ARRAY_AND_SIZE(e750_pin_config));
140 clk_add_alias("CLK_CK3P6MI", &e750_tc6393xb_device.dev, 193 clk_add_alias("CLK_CK3P6MI", &e750_tc6393xb_device.dev,
141 "GPIO11_CLK", NULL), 194 "GPIO11_CLK", NULL),
142 eseries_get_tmio_gpios(); 195 eseries_get_tmio_gpios();
diff --git a/arch/arm/mach-pxa/e800.c b/arch/arm/mach-pxa/e800.c
index 9a86a426f924..cc9b1293e866 100644
--- a/arch/arm/mach-pxa/e800.c
+++ b/arch/arm/mach-pxa/e800.c
@@ -28,6 +28,7 @@
28#include <mach/hardware.h> 28#include <mach/hardware.h>
29#include <mach/eseries-gpio.h> 29#include <mach/eseries-gpio.h>
30#include <mach/udc.h> 30#include <mach/udc.h>
31#include <mach/irqs.h>
31 32
32#include "generic.h" 33#include "generic.h"
33#include "eseries.h" 34#include "eseries.h"
diff --git a/arch/arm/mach-pxa/include/mach/pxa3xx-regs.h b/arch/arm/mach-pxa/include/mach/pxa3xx-regs.h
index b1fcd10ab6c6..bcf3fb2c4b3a 100644
--- a/arch/arm/mach-pxa/include/mach/pxa3xx-regs.h
+++ b/arch/arm/mach-pxa/include/mach/pxa3xx-regs.h
@@ -193,10 +193,8 @@
193#define CKEN_MINI_IM 48 /* < Mini-IM */ 193#define CKEN_MINI_IM 48 /* < Mini-IM */
194#define CKEN_MINI_LCD 49 /* < Mini LCD */ 194#define CKEN_MINI_LCD 49 /* < Mini LCD */
195 195
196#if defined(CONFIG_CPU_PXA310)
197#define CKEN_MMC3 5 /* < MMC3 Clock Enable */ 196#define CKEN_MMC3 5 /* < MMC3 Clock Enable */
198#define CKEN_MVED 43 /* < MVED clock enable */ 197#define CKEN_MVED 43 /* < MVED clock enable */
199#endif
200 198
201/* Note: GCU clock enable bit differs on PXA300/PXA310 and PXA320 */ 199/* Note: GCU clock enable bit differs on PXA300/PXA310 and PXA320 */
202#define PXA300_CKEN_GRAPHICS 42 /* Graphics controller clock enable */ 200#define PXA300_CKEN_GRAPHICS 42 /* Graphics controller clock enable */
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index ae88855bf974..f9093beba752 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -24,6 +24,7 @@
24#include <linux/gpio.h> 24#include <linux/gpio.h>
25#include <linux/spi/spi.h> 25#include <linux/spi/spi.h>
26#include <linux/spi/ads7846.h> 26#include <linux/spi/ads7846.h>
27#include <linux/mtd/sharpsl.h>
27 28
28#include <mach/hardware.h> 29#include <mach/hardware.h>
29#include <asm/mach-types.h> 30#include <asm/mach-types.h>
@@ -414,6 +415,55 @@ static struct pxafb_mach_info poodle_fb_info = {
414 .lcd_conn = LCD_COLOR_TFT_16BPP, 415 .lcd_conn = LCD_COLOR_TFT_16BPP,
415}; 416};
416 417
418static struct mtd_partition sharpsl_nand_partitions[] = {
419 {
420 .name = "System Area",
421 .offset = 0,
422 .size = 7 * 1024 * 1024,
423 },
424 {
425 .name = "Root Filesystem",
426 .offset = 7 * 1024 * 1024,
427 .size = 22 * 1024 * 1024,
428 },
429 {
430 .name = "Home Filesystem",
431 .offset = MTDPART_OFS_APPEND,
432 .size = MTDPART_SIZ_FULL,
433 },
434};
435
436static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
437
438static struct nand_bbt_descr sharpsl_bbt = {
439 .options = 0,
440 .offs = 4,
441 .len = 2,
442 .pattern = scan_ff_pattern
443};
444
445static struct sharpsl_nand_platform_data sharpsl_nand_platform_data = {
446 .badblock_pattern = &sharpsl_bbt,
447 .partitions = sharpsl_nand_partitions,
448 .nr_partitions = ARRAY_SIZE(sharpsl_nand_partitions),
449};
450
451static struct resource sharpsl_nand_resources[] = {
452 {
453 .start = 0x0C000000,
454 .end = 0x0C000FFF,
455 .flags = IORESOURCE_MEM,
456 },
457};
458
459static struct platform_device sharpsl_nand_device = {
460 .name = "sharpsl-nand",
461 .id = -1,
462 .resource = sharpsl_nand_resources,
463 .num_resources = ARRAY_SIZE(sharpsl_nand_resources),
464 .dev.platform_data = &sharpsl_nand_platform_data,
465};
466
417static struct mtd_partition sharpsl_rom_parts[] = { 467static struct mtd_partition sharpsl_rom_parts[] = {
418 { 468 {
419 .name ="Boot PROM Filesystem", 469 .name ="Boot PROM Filesystem",
@@ -447,6 +497,7 @@ static struct platform_device sharpsl_rom_device = {
447static struct platform_device *devices[] __initdata = { 497static struct platform_device *devices[] __initdata = {
448 &poodle_locomo_device, 498 &poodle_locomo_device,
449 &poodle_scoop_device, 499 &poodle_scoop_device,
500 &sharpsl_nand_device,
450 &sharpsl_rom_device, 501 &sharpsl_rom_device,
451}; 502};
452 503
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 7299d87a1cb3..6d447c9ce8ab 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -31,6 +31,7 @@
31#include <linux/spi/spi.h> 31#include <linux/spi/spi.h>
32#include <linux/spi/ads7846.h> 32#include <linux/spi/ads7846.h>
33#include <linux/spi/corgi_lcd.h> 33#include <linux/spi/corgi_lcd.h>
34#include <linux/mtd/sharpsl.h>
34 35
35#include <asm/setup.h> 36#include <asm/setup.h>
36#include <asm/memory.h> 37#include <asm/memory.h>
@@ -613,6 +614,54 @@ static struct pxafb_mach_info spitz_pxafb_info = {
613 .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_ALTERNATE_MAPPING, 614 .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_ALTERNATE_MAPPING,
614}; 615};
615 616
617static struct mtd_partition sharpsl_nand_partitions[] = {
618 {
619 .name = "System Area",
620 .offset = 0,
621 .size = 7 * 1024 * 1024,
622 },
623 {
624 .name = "Root Filesystem",
625 .offset = 7 * 1024 * 1024,
626 },
627 {
628 .name = "Home Filesystem",
629 .offset = MTDPART_OFS_APPEND,
630 .size = MTDPART_SIZ_FULL,
631 },
632};
633
634static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
635
636static struct nand_bbt_descr sharpsl_bbt = {
637 .options = 0,
638 .offs = 4,
639 .len = 2,
640 .pattern = scan_ff_pattern
641};
642
643static struct sharpsl_nand_platform_data sharpsl_nand_platform_data = {
644 .badblock_pattern = &sharpsl_bbt,
645 .partitions = sharpsl_nand_partitions,
646 .nr_partitions = ARRAY_SIZE(sharpsl_nand_partitions),
647};
648
649static struct resource sharpsl_nand_resources[] = {
650 {
651 .start = 0x0C000000,
652 .end = 0x0C000FFF,
653 .flags = IORESOURCE_MEM,
654 },
655};
656
657static struct platform_device sharpsl_nand_device = {
658 .name = "sharpsl-nand",
659 .id = -1,
660 .resource = sharpsl_nand_resources,
661 .num_resources = ARRAY_SIZE(sharpsl_nand_resources),
662 .dev.platform_data = &sharpsl_nand_platform_data,
663};
664
616 665
617static struct mtd_partition sharpsl_rom_parts[] = { 666static struct mtd_partition sharpsl_rom_parts[] = {
618 { 667 {
@@ -648,6 +697,7 @@ static struct platform_device *devices[] __initdata = {
648 &spitzscoop_device, 697 &spitzscoop_device,
649 &spitzkbd_device, 698 &spitzkbd_device,
650 &spitzled_device, 699 &spitzled_device,
700 &sharpsl_nand_device,
651 &sharpsl_rom_device, 701 &sharpsl_rom_device,
652}; 702};
653 703
@@ -671,6 +721,14 @@ static void __init common_init(void)
671 pm_power_off = spitz_poweroff; 721 pm_power_off = spitz_poweroff;
672 arm_pm_restart = spitz_restart; 722 arm_pm_restart = spitz_restart;
673 723
724 if (machine_is_spitz()) {
725 sharpsl_nand_partitions[1].size = 5 * 1024 * 1024;
726 } else if (machine_is_akita()) {
727 sharpsl_nand_partitions[1].size = 58 * 1024 * 1024;
728 } else if (machine_is_borzoi()) {
729 sharpsl_nand_partitions[1].size = 32 * 1024 * 1024;
730 }
731
674 PMCR = 0x00; 732 PMCR = 0x00;
675 733
676 /* Stop 3.6MHz and drive HIGH to PCMCIA and CS */ 734 /* Stop 3.6MHz and drive HIGH to PCMCIA and CS */
@@ -715,10 +773,29 @@ static struct i2c_board_info akita_i2c_board_info[] = {
715 }, 773 },
716}; 774};
717 775
776static struct nand_bbt_descr sharpsl_akita_bbt = {
777 .options = 0,
778 .offs = 4,
779 .len = 1,
780 .pattern = scan_ff_pattern
781};
782
783static struct nand_ecclayout akita_oobinfo = {
784 .eccbytes = 24,
785 .eccpos = {
786 0x5, 0x1, 0x2, 0x3, 0x6, 0x7, 0x15, 0x11,
787 0x12, 0x13, 0x16, 0x17, 0x25, 0x21, 0x22, 0x23,
788 0x26, 0x27, 0x35, 0x31, 0x32, 0x33, 0x36, 0x37},
789 .oobfree = {{0x08, 0x09}}
790};
791
718static void __init akita_init(void) 792static void __init akita_init(void)
719{ 793{
720 spitz_ficp_platform_data.transceiver_mode = akita_irda_transceiver_mode; 794 spitz_ficp_platform_data.transceiver_mode = akita_irda_transceiver_mode;
721 795
796 sharpsl_nand_platform_data.badblock_pattern = &sharpsl_akita_bbt;
797 sharpsl_nand_platform_data.ecc_layout = &akita_oobinfo;
798
722 /* We just pretend the second element of the array doesn't exist */ 799 /* We just pretend the second element of the array doesn't exist */
723 spitz_pcmcia_config.num_devs = 1; 800 spitz_pcmcia_config.num_devs = 1;
724 platform_scoop_config = &spitz_pcmcia_config; 801 platform_scoop_config = &spitz_pcmcia_config;
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c
index 8fce85f33033..ea3c75595fa9 100644
--- a/arch/arm/mach-realview/platsmp.c
+++ b/arch/arm/mach-realview/platsmp.c
@@ -12,6 +12,7 @@
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/jiffies.h>
15#include <linux/smp.h> 16#include <linux/smp.h>
16#include <linux/io.h> 17#include <linux/io.h>
17 18
diff --git a/arch/arm/mach-s3c2410/include/mach/gpio.h b/arch/arm/mach-s3c2410/include/mach/gpio.h
index e0349af8a483..00476a573bbe 100644
--- a/arch/arm/mach-s3c2410/include/mach/gpio.h
+++ b/arch/arm/mach-s3c2410/include/mach/gpio.h
@@ -14,6 +14,7 @@
14#define gpio_get_value __gpio_get_value 14#define gpio_get_value __gpio_get_value
15#define gpio_set_value __gpio_set_value 15#define gpio_set_value __gpio_set_value
16#define gpio_cansleep __gpio_cansleep 16#define gpio_cansleep __gpio_cansleep
17#define gpio_to_irq __gpio_to_irq
17 18
18/* some boards require extra gpio capacity to support external 19/* some boards require extra gpio capacity to support external
19 * devices that need GPIO. 20 * devices that need GPIO.
diff --git a/arch/arm/mach-s3c2410/include/mach/irqs.h b/arch/arm/mach-s3c2410/include/mach/irqs.h
index 9565903d490b..49efce8cd4a7 100644
--- a/arch/arm/mach-s3c2410/include/mach/irqs.h
+++ b/arch/arm/mach-s3c2410/include/mach/irqs.h
@@ -12,10 +12,6 @@
12#ifndef __ASM_ARCH_IRQS_H 12#ifndef __ASM_ARCH_IRQS_H
13#define __ASM_ARCH_IRQS_H __FILE__ 13#define __ASM_ARCH_IRQS_H __FILE__
14 14
15#ifndef __ASM_ARM_IRQ_H
16#error "Do not include this directly, instead #include <asm/irq.h>"
17#endif
18
19/* we keep the first set of CPU IRQs out of the range of 15/* we keep the first set of CPU IRQs out of the range of
20 * the ISA space, so that the PC104 has them to itself 16 * the ISA space, so that the PC104 has them to itself
21 * and we don't end up having to do horrible things to the 17 * and we don't end up having to do horrible things to the
diff --git a/arch/arm/mach-s3c2440/mach-at2440evb.c b/arch/arm/mach-s3c2440/mach-at2440evb.c
index 0a6d0a5d961b..315c42e31278 100644
--- a/arch/arm/mach-s3c2440/mach-at2440evb.c
+++ b/arch/arm/mach-s3c2440/mach-at2440evb.c
@@ -47,7 +47,7 @@
47#include <plat/clock.h> 47#include <plat/clock.h>
48#include <plat/devs.h> 48#include <plat/devs.h>
49#include <plat/cpu.h> 49#include <plat/cpu.h>
50#include <asm/plat-s3c24xx/mci.h> 50#include <plat/mci.h>
51 51
52static struct map_desc at2440evb_iodesc[] __initdata = { 52static struct map_desc at2440evb_iodesc[] __initdata = {
53 /* Nothing here */ 53 /* Nothing here */
diff --git a/arch/arm/mach-s3c6400/include/mach/irqs.h b/arch/arm/mach-s3c6400/include/mach/irqs.h
index b38c47cffc28..4c97f9a4370b 100644
--- a/arch/arm/mach-s3c6400/include/mach/irqs.h
+++ b/arch/arm/mach-s3c6400/include/mach/irqs.h
@@ -11,10 +11,6 @@
11#ifndef __ASM_ARCH_IRQS_H 11#ifndef __ASM_ARCH_IRQS_H
12#define __ASM_ARCH_IRQS_H __FILE__ 12#define __ASM_ARCH_IRQS_H __FILE__
13 13
14#ifndef __ASM_ARM_IRQ_H
15#error "Do not include this directly, instead #include <asm/irq.h>"
16#endif
17
18#include <plat/irqs.h> 14#include <plat/irqs.h>
19 15
20#endif /* __ASM_ARCH_IRQ_H */ 16#endif /* __ASM_ARCH_IRQ_H */
diff --git a/arch/arm/plat-omap/i2c.c b/arch/arm/plat-omap/i2c.c
index 89a6ab0b7db8..467531edefd3 100644
--- a/arch/arm/plat-omap/i2c.c
+++ b/arch/arm/plat-omap/i2c.c
@@ -26,6 +26,7 @@
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <mach/irqs.h>
29#include <mach/mux.h> 30#include <mach/mux.h>
30 31
31#define OMAP_I2C_SIZE 0x3f 32#define OMAP_I2C_SIZE 0x3f
diff --git a/arch/arm/plat-s3c/dev-fb.c b/arch/arm/plat-s3c/dev-fb.c
index 0454b8ec02e2..a90198fc4b0f 100644
--- a/arch/arm/plat-s3c/dev-fb.c
+++ b/arch/arm/plat-s3c/dev-fb.c
@@ -16,6 +16,7 @@
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/fb.h> 17#include <linux/fb.h>
18 18
19#include <mach/irqs.h>
19#include <mach/map.h> 20#include <mach/map.h>
20#include <mach/regs-fb.h> 21#include <mach/regs-fb.h>
21 22
diff --git a/arch/arm/plat-s3c/dev-i2c0.c b/arch/arm/plat-s3c/dev-i2c0.c
index 2c0128c77c6e..fe327074037e 100644
--- a/arch/arm/plat-s3c/dev-i2c0.c
+++ b/arch/arm/plat-s3c/dev-i2c0.c
@@ -15,6 +15,7 @@
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17 17
18#include <mach/irqs.h>
18#include <mach/map.h> 19#include <mach/map.h>
19 20
20#include <plat/regs-iic.h> 21#include <plat/regs-iic.h>
diff --git a/arch/arm/plat-s3c/dev-i2c1.c b/arch/arm/plat-s3c/dev-i2c1.c
index 9658fb0aec95..2387fbf57af6 100644
--- a/arch/arm/plat-s3c/dev-i2c1.c
+++ b/arch/arm/plat-s3c/dev-i2c1.c
@@ -15,6 +15,7 @@
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17 17
18#include <mach/irqs.h>
18#include <mach/map.h> 19#include <mach/map.h>
19 20
20#include <plat/regs-iic.h> 21#include <plat/regs-iic.h>
diff --git a/arch/arm/plat-s3c24xx/gpiolib.c b/arch/arm/plat-s3c24xx/gpiolib.c
index f95c6c9d9f1a..94a341aaa4e4 100644
--- a/arch/arm/plat-s3c24xx/gpiolib.c
+++ b/arch/arm/plat-s3c24xx/gpiolib.c
@@ -59,6 +59,22 @@ static int s3c24xx_gpiolib_banka_output(struct gpio_chip *chip,
59 return 0; 59 return 0;
60} 60}
61 61
62static int s3c24xx_gpiolib_bankf_toirq(struct gpio_chip *chip, unsigned offset)
63{
64 if (offset < 4)
65 return IRQ_EINT0 + offset;
66
67 if (offset < 8)
68 return IRQ_EINT4 + offset - 4;
69
70 return -EINVAL;
71}
72
73static int s3c24xx_gpiolib_bankg_toirq(struct gpio_chip *chip, unsigned offset)
74{
75 return IRQ_EINT8 + offset;
76}
77
62struct s3c_gpio_chip s3c24xx_gpios[] = { 78struct s3c_gpio_chip s3c24xx_gpios[] = {
63 [0] = { 79 [0] = {
64 .base = S3C24XX_GPIO_BASE(S3C2410_GPA0), 80 .base = S3C24XX_GPIO_BASE(S3C2410_GPA0),
@@ -114,6 +130,7 @@ struct s3c_gpio_chip s3c24xx_gpios[] = {
114 .owner = THIS_MODULE, 130 .owner = THIS_MODULE,
115 .label = "GPIOF", 131 .label = "GPIOF",
116 .ngpio = 8, 132 .ngpio = 8,
133 .to_irq = s3c24xx_gpiolib_bankf_toirq,
117 }, 134 },
118 }, 135 },
119 [6] = { 136 [6] = {
@@ -123,6 +140,7 @@ struct s3c_gpio_chip s3c24xx_gpios[] = {
123 .owner = THIS_MODULE, 140 .owner = THIS_MODULE,
124 .label = "GPIOG", 141 .label = "GPIOG",
125 .ngpio = 10, 142 .ngpio = 10,
143 .to_irq = s3c24xx_gpiolib_bankg_toirq,
126 }, 144 },
127 }, 145 },
128}; 146};
diff --git a/arch/arm/plat-s3c24xx/pwm.c b/arch/arm/plat-s3c24xx/pwm.c
index ec56b88866c4..0120b760315b 100644
--- a/arch/arm/plat-s3c24xx/pwm.c
+++ b/arch/arm/plat-s3c24xx/pwm.c
@@ -19,6 +19,8 @@
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/pwm.h> 20#include <linux/pwm.h>
21 21
22#include <mach/irqs.h>
23
22#include <plat/devs.h> 24#include <plat/devs.h>
23#include <plat/regs-timer.h> 25#include <plat/regs-timer.h>
24 26
diff --git a/arch/arm/plat-s3c64xx/include/plat/irqs.h b/arch/arm/plat-s3c64xx/include/plat/irqs.h
index 02e8dd4c97d5..2846f550b727 100644
--- a/arch/arm/plat-s3c64xx/include/plat/irqs.h
+++ b/arch/arm/plat-s3c64xx/include/plat/irqs.h
@@ -191,7 +191,7 @@
191#define IRQ_EINT_GROUP8_BASE (IRQ_EINT_GROUP7_BASE + IRQ_EINT_GROUP7_NR) 191#define IRQ_EINT_GROUP8_BASE (IRQ_EINT_GROUP7_BASE + IRQ_EINT_GROUP7_NR)
192#define IRQ_EINT_GROUP9_BASE (IRQ_EINT_GROUP8_BASE + IRQ_EINT_GROUP8_NR) 192#define IRQ_EINT_GROUP9_BASE (IRQ_EINT_GROUP8_BASE + IRQ_EINT_GROUP8_NR)
193 193
194#define IRQ_EINT_GROUP(group, no) (IRQ_EINT_GROUP##group##__BASE + (x)) 194#define IRQ_EINT_GROUP(group, no) (IRQ_EINT_GROUP##group##_BASE + (no))
195 195
196/* Set the default NR_IRQS */ 196/* Set the default NR_IRQS */
197 197
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index ea7bc1e8562b..3fbfd1e32a9e 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1305,7 +1305,7 @@ struct platform_device *__init
1305at32_add_device_mci(unsigned int id, struct mci_platform_data *data) 1305at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1306{ 1306{
1307 struct platform_device *pdev; 1307 struct platform_device *pdev;
1308 struct dw_dma_slave *dws; 1308 struct dw_dma_slave *dws = &data->dma_slave;
1309 u32 pioa_mask; 1309 u32 pioa_mask;
1310 u32 piob_mask; 1310 u32 piob_mask;
1311 1311
@@ -1324,22 +1324,13 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1324 ARRAY_SIZE(atmel_mci0_resource))) 1324 ARRAY_SIZE(atmel_mci0_resource)))
1325 goto fail; 1325 goto fail;
1326 1326
1327 if (data->dma_slave) 1327 dws->dma_dev = &dw_dmac0_device.dev;
1328 dws = kmemdup(to_dw_dma_slave(data->dma_slave), 1328 dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
1329 sizeof(struct dw_dma_slave), GFP_KERNEL);
1330 else
1331 dws = kzalloc(sizeof(struct dw_dma_slave), GFP_KERNEL);
1332
1333 dws->slave.dev = &pdev->dev;
1334 dws->slave.dma_dev = &dw_dmac0_device.dev;
1335 dws->slave.reg_width = DMA_SLAVE_WIDTH_32BIT;
1336 dws->cfg_hi = (DWC_CFGH_SRC_PER(0) 1329 dws->cfg_hi = (DWC_CFGH_SRC_PER(0)
1337 | DWC_CFGH_DST_PER(1)); 1330 | DWC_CFGH_DST_PER(1));
1338 dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL 1331 dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL
1339 | DWC_CFGL_HS_SRC_POL); 1332 | DWC_CFGL_HS_SRC_POL);
1340 1333
1341 data->dma_slave = &dws->slave;
1342
1343 if (platform_device_add_data(pdev, data, 1334 if (platform_device_add_data(pdev, data,
1344 sizeof(struct mci_platform_data))) 1335 sizeof(struct mci_platform_data)))
1345 goto fail; 1336 goto fail;
diff --git a/arch/ia64/include/asm/acpi-ext.h b/arch/ia64/include/asm/acpi-ext.h
index 734d137dda6e..7f8362b379eb 100644
--- a/arch/ia64/include/asm/acpi-ext.h
+++ b/arch/ia64/include/asm/acpi-ext.h
@@ -14,7 +14,6 @@
14#define _ASM_IA64_ACPI_EXT_H 14#define _ASM_IA64_ACPI_EXT_H
15 15
16#include <linux/types.h> 16#include <linux/types.h>
17#include <acpi/actypes.h>
18 17
19extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length); 18extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length);
20 19
diff --git a/arch/ia64/include/asm/sn/acpi.h b/arch/ia64/include/asm/sn/acpi.h
index 9ce2801cbd57..fd480db25565 100644
--- a/arch/ia64/include/asm/sn/acpi.h
+++ b/arch/ia64/include/asm/sn/acpi.h
@@ -9,8 +9,6 @@
9#ifndef _ASM_IA64_SN_ACPI_H 9#ifndef _ASM_IA64_SN_ACPI_H
10#define _ASM_IA64_SN_ACPI_H 10#define _ASM_IA64_SN_ACPI_H
11 11
12#include "acpi/acglobal.h"
13
14extern int sn_acpi_rev; 12extern int sn_acpi_rev;
15#define SN_ACPI_BASE_SUPPORT() (sn_acpi_rev >= 0x20101) 13#define SN_ACPI_BASE_SUPPORT() (sn_acpi_rev >= 0x20101)
16 14
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 0553648b7595..d541671caf4a 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -65,6 +65,7 @@ EXPORT_SYMBOL(pm_idle);
65void (*pm_power_off) (void); 65void (*pm_power_off) (void);
66EXPORT_SYMBOL(pm_power_off); 66EXPORT_SYMBOL(pm_power_off);
67 67
68u32 acpi_rsdt_forced;
68unsigned int acpi_cpei_override; 69unsigned int acpi_cpei_override;
69unsigned int acpi_cpei_phys_cpuid; 70unsigned int acpi_cpei_phys_cpuid;
70 71
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c
index bc610a6c7851..c5a214026a77 100644
--- a/arch/ia64/sn/kernel/io_acpi_init.c
+++ b/arch/ia64/sn/kernel/io_acpi_init.c
@@ -13,7 +13,6 @@
13#include <asm/sn/sn_sal.h> 13#include <asm/sn/sn_sal.h>
14#include "xtalk/hubdev.h" 14#include "xtalk/hubdev.h"
15#include <linux/acpi.h> 15#include <linux/acpi.h>
16#include <acpi/acnamesp.h>
17 16
18 17
19/* 18/*
@@ -64,6 +63,7 @@ static acpi_status __init
64sn_acpi_hubdev_init(acpi_handle handle, u32 depth, void *context, void **ret) 63sn_acpi_hubdev_init(acpi_handle handle, u32 depth, void *context, void **ret)
65{ 64{
66 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 65 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
66 struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
67 u64 addr; 67 u64 addr;
68 struct hubdev_info *hubdev; 68 struct hubdev_info *hubdev;
69 struct hubdev_info *hubdev_ptr; 69 struct hubdev_info *hubdev_ptr;
@@ -77,11 +77,12 @@ sn_acpi_hubdev_init(acpi_handle handle, u32 depth, void *context, void **ret)
77 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS, 77 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
78 &sn_uuid, &buffer); 78 &sn_uuid, &buffer);
79 if (ACPI_FAILURE(status)) { 79 if (ACPI_FAILURE(status)) {
80 acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
80 printk(KERN_ERR 81 printk(KERN_ERR
81 "sn_acpi_hubdev_init: acpi_get_vendor_resource() " 82 "sn_acpi_hubdev_init: acpi_get_vendor_resource() "
82 "(0x%x) failed for: ", status); 83 "(0x%x) failed for: %s\n", status,
83 acpi_ns_print_node_pathname(handle, NULL); 84 (char *)name_buffer.pointer);
84 printk("\n"); 85 kfree(name_buffer.pointer);
85 return AE_OK; /* Continue walking namespace */ 86 return AE_OK; /* Continue walking namespace */
86 } 87 }
87 88
@@ -89,11 +90,12 @@ sn_acpi_hubdev_init(acpi_handle handle, u32 depth, void *context, void **ret)
89 vendor = &resource->data.vendor_typed; 90 vendor = &resource->data.vendor_typed;
90 if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) != 91 if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
91 sizeof(struct hubdev_info *)) { 92 sizeof(struct hubdev_info *)) {
93 acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
92 printk(KERN_ERR 94 printk(KERN_ERR
93 "sn_acpi_hubdev_init: Invalid vendor data length: %d for: ", 95 "sn_acpi_hubdev_init: Invalid vendor data length: "
94 vendor->byte_length); 96 "%d for: %s\n",
95 acpi_ns_print_node_pathname(handle, NULL); 97 vendor->byte_length, (char *)name_buffer.pointer);
96 printk("\n"); 98 kfree(name_buffer.pointer);
97 goto exit; 99 goto exit;
98 } 100 }
99 101
@@ -120,6 +122,7 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
120{ 122{
121 u64 addr; 123 u64 addr;
122 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 124 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
125 struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
123 acpi_handle handle; 126 acpi_handle handle;
124 struct pcibus_bussoft *prom_bussoft_ptr; 127 struct pcibus_bussoft *prom_bussoft_ptr;
125 struct acpi_resource *resource; 128 struct acpi_resource *resource;
@@ -131,11 +134,11 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
131 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS, 134 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
132 &sn_uuid, &buffer); 135 &sn_uuid, &buffer);
133 if (ACPI_FAILURE(status)) { 136 if (ACPI_FAILURE(status)) {
137 acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
134 printk(KERN_ERR "%s: " 138 printk(KERN_ERR "%s: "
135 "acpi_get_vendor_resource() failed (0x%x) for: ", 139 "acpi_get_vendor_resource() failed (0x%x) for: %s\n",
136 __func__, status); 140 __func__, status, (char *)name_buffer.pointer);
137 acpi_ns_print_node_pathname(handle, NULL); 141 kfree(name_buffer.pointer);
138 printk("\n");
139 return NULL; 142 return NULL;
140 } 143 }
141 resource = buffer.pointer; 144 resource = buffer.pointer;
@@ -168,6 +171,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
168{ 171{
169 u64 addr; 172 u64 addr;
170 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 173 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
174 struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
171 struct sn_irq_info *irq_info, *irq_info_prom; 175 struct sn_irq_info *irq_info, *irq_info_prom;
172 struct pcidev_info *pcidev_ptr, *pcidev_prom_ptr; 176 struct pcidev_info *pcidev_ptr, *pcidev_prom_ptr;
173 struct acpi_resource *resource; 177 struct acpi_resource *resource;
@@ -182,11 +186,11 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
182 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS, 186 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
183 &sn_uuid, &buffer); 187 &sn_uuid, &buffer);
184 if (ACPI_FAILURE(status)) { 188 if (ACPI_FAILURE(status)) {
189 acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
185 printk(KERN_ERR 190 printk(KERN_ERR
186 "%s: acpi_get_vendor_resource() failed (0x%x) for: ", 191 "%s: acpi_get_vendor_resource() failed (0x%x) for: %s\n",
187 __func__, status); 192 __func__, status, (char *)name_buffer.pointer);
188 acpi_ns_print_node_pathname(handle, NULL); 193 kfree(name_buffer.pointer);
189 printk("\n");
190 return 1; 194 return 1;
191 } 195 }
192 196
@@ -194,11 +198,12 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
194 vendor = &resource->data.vendor_typed; 198 vendor = &resource->data.vendor_typed;
195 if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) != 199 if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
196 sizeof(struct pci_devdev_info *)) { 200 sizeof(struct pci_devdev_info *)) {
201 acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
197 printk(KERN_ERR 202 printk(KERN_ERR
198 "%s: Invalid vendor data length: %d for: ", 203 "%s: Invalid vendor data length: %d for: %s\n",
199 __func__, vendor->byte_length); 204 __func__, vendor->byte_length,
200 acpi_ns_print_node_pathname(handle, NULL); 205 (char *)name_buffer.pointer);
201 printk("\n"); 206 kfree(name_buffer.pointer);
202 ret = 1; 207 ret = 1;
203 goto exit; 208 goto exit;
204 } 209 }
@@ -239,6 +244,9 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
239 acpi_handle parent; 244 acpi_handle parent;
240 int slot; 245 int slot;
241 acpi_status status; 246 acpi_status status;
247 struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
248
249 acpi_get_name(device_handle, ACPI_FULL_PATHNAME, &name_buffer);
242 250
243 /* 251 /*
244 * Do an upward search to find the root bus device, and 252 * Do an upward search to find the root bus device, and
@@ -249,9 +257,8 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
249 status = acpi_get_parent(child, &parent); 257 status = acpi_get_parent(child, &parent);
250 if (ACPI_FAILURE(status)) { 258 if (ACPI_FAILURE(status)) {
251 printk(KERN_ERR "%s: acpi_get_parent() failed " 259 printk(KERN_ERR "%s: acpi_get_parent() failed "
252 "(0x%x) for: ", __func__, status); 260 "(0x%x) for: %s\n", __func__, status,
253 acpi_ns_print_node_pathname(child, NULL); 261 (char *)name_buffer.pointer);
254 printk("\n");
255 panic("%s: Unable to find host devfn\n", __func__); 262 panic("%s: Unable to find host devfn\n", __func__);
256 } 263 }
257 if (parent == rootbus_handle) 264 if (parent == rootbus_handle)
@@ -259,22 +266,20 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
259 child = parent; 266 child = parent;
260 } 267 }
261 if (!child) { 268 if (!child) {
262 printk(KERN_ERR "%s: Unable to find root bus for: ", 269 printk(KERN_ERR "%s: Unable to find root bus for: %s\n",
263 __func__); 270 __func__, (char *)name_buffer.pointer);
264 acpi_ns_print_node_pathname(device_handle, NULL);
265 printk("\n");
266 BUG(); 271 BUG();
267 } 272 }
268 273
269 status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr); 274 status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr);
270 if (ACPI_FAILURE(status)) { 275 if (ACPI_FAILURE(status)) {
271 printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: ", 276 printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: %s\n",
272 __func__, status); 277 __func__, status, (char *)name_buffer.pointer);
273 acpi_ns_print_node_pathname(child, NULL);
274 printk("\n");
275 panic("%s: Unable to find host devfn\n", __func__); 278 panic("%s: Unable to find host devfn\n", __func__);
276 } 279 }
277 280
281 kfree(name_buffer.pointer);
282
278 slot = (adr >> 16) & 0xffff; 283 slot = (adr >> 16) & 0xffff;
279 function = adr & 0xffff; 284 function = adr & 0xffff;
280 devfn = PCI_DEVFN(slot, function); 285 devfn = PCI_DEVFN(slot, function);
@@ -300,27 +305,28 @@ find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv)
300 int function; 305 int function;
301 int slot; 306 int slot;
302 struct sn_pcidev_match *info = context; 307 struct sn_pcidev_match *info = context;
308 struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
303 309
304 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, 310 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
305 &adr); 311 &adr);
306 if (ACPI_SUCCESS(status)) { 312 if (ACPI_SUCCESS(status)) {
307 status = acpi_get_parent(handle, &parent); 313 status = acpi_get_parent(handle, &parent);
308 if (ACPI_FAILURE(status)) { 314 if (ACPI_FAILURE(status)) {
315 acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
309 printk(KERN_ERR 316 printk(KERN_ERR
310 "%s: acpi_get_parent() failed (0x%x) for: ", 317 "%s: acpi_get_parent() failed (0x%x) for: %s\n",
311 __func__, status); 318 __func__, status, (char *)name_buffer.pointer);
312 acpi_ns_print_node_pathname(handle, NULL); 319 kfree(name_buffer.pointer);
313 printk("\n");
314 return AE_OK; 320 return AE_OK;
315 } 321 }
316 status = acpi_evaluate_integer(parent, METHOD_NAME__BBN, 322 status = acpi_evaluate_integer(parent, METHOD_NAME__BBN,
317 NULL, &bbn); 323 NULL, &bbn);
318 if (ACPI_FAILURE(status)) { 324 if (ACPI_FAILURE(status)) {
325 acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
319 printk(KERN_ERR 326 printk(KERN_ERR
320 "%s: Failed to find _BBN in parent of: ", 327 "%s: Failed to find _BBN in parent of: %s\n",
321 __func__); 328 __func__, (char *)name_buffer.pointer);
322 acpi_ns_print_node_pathname(handle, NULL); 329 kfree(name_buffer.pointer);
323 printk("\n");
324 return AE_OK; 330 return AE_OK;
325 } 331 }
326 332
@@ -350,24 +356,27 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
350 acpi_handle rootbus_handle; 356 acpi_handle rootbus_handle;
351 unsigned long long segment; 357 unsigned long long segment;
352 acpi_status status; 358 acpi_status status;
359 struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
353 360
354 rootbus_handle = PCI_CONTROLLER(dev)->acpi_handle; 361 rootbus_handle = PCI_CONTROLLER(dev)->acpi_handle;
355 status = acpi_evaluate_integer(rootbus_handle, METHOD_NAME__SEG, NULL, 362 status = acpi_evaluate_integer(rootbus_handle, METHOD_NAME__SEG, NULL,
356 &segment); 363 &segment);
357 if (ACPI_SUCCESS(status)) { 364 if (ACPI_SUCCESS(status)) {
358 if (segment != pci_domain_nr(dev)) { 365 if (segment != pci_domain_nr(dev)) {
366 acpi_get_name(rootbus_handle, ACPI_FULL_PATHNAME,
367 &name_buffer);
359 printk(KERN_ERR 368 printk(KERN_ERR
360 "%s: Segment number mismatch, 0x%llx vs 0x%x for: ", 369 "%s: Segment number mismatch, 0x%llx vs 0x%x for: %s\n",
361 __func__, segment, pci_domain_nr(dev)); 370 __func__, segment, pci_domain_nr(dev),
362 acpi_ns_print_node_pathname(rootbus_handle, NULL); 371 (char *)name_buffer.pointer);
363 printk("\n"); 372 kfree(name_buffer.pointer);
364 return 1; 373 return 1;
365 } 374 }
366 } else { 375 } else {
367 printk(KERN_ERR "%s: Unable to get __SEG from: ", 376 acpi_get_name(rootbus_handle, ACPI_FULL_PATHNAME, &name_buffer);
368 __func__); 377 printk(KERN_ERR "%s: Unable to get __SEG from: %s\n",
369 acpi_ns_print_node_pathname(rootbus_handle, NULL); 378 __func__, (char *)name_buffer.pointer);
370 printk("\n"); 379 kfree(name_buffer.pointer);
371 return 1; 380 return 1;
372 } 381 }
373 382
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index 8a924a5661dd..0d4ffa4da1da 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -26,7 +26,6 @@
26#include <linux/acpi.h> 26#include <linux/acpi.h>
27#include <asm/sn/sn2/sn_hwperf.h> 27#include <asm/sn/sn2/sn_hwperf.h>
28#include <asm/sn/acpi.h> 28#include <asm/sn/acpi.h>
29#include "acpi/acglobal.h"
30 29
31extern void sn_init_cpei_timer(void); 30extern void sn_init_cpei_timer(void);
32extern void register_sn_procfs(void); 31extern void register_sn_procfs(void);
@@ -473,7 +472,7 @@ sn_io_early_init(void)
473 { 472 {
474 struct acpi_table_header *header = NULL; 473 struct acpi_table_header *header = NULL;
475 474
476 acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header); 475 acpi_get_table(ACPI_SIG_DSDT, 1, &header);
477 BUG_ON(header == NULL); 476 BUG_ON(header == NULL);
478 sn_acpi_rev = header->oem_revision; 477 sn_acpi_rev = header->oem_revision;
479 } 478 }
@@ -505,7 +504,7 @@ sn_io_early_init(void)
505 504
506 { 505 {
507 struct acpi_table_header *header; 506 struct acpi_table_header *header;
508 (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header); 507 (void)acpi_get_table(ACPI_SIG_DSDT, 1, &header);
509 printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n", 508 printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n",
510 header->oem_revision); 509 header->oem_revision);
511 } 510 }
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 5ddad7bd60ac..0d428278356d 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -77,7 +77,7 @@ libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name`
77 77
78drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/ 78drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/
79 79
80PALO := $(shell if which palo; then : ; \ 80PALO := $(shell if (which palo 2>&1); then : ; \
81 elif [ -x /sbin/palo ]; then echo /sbin/palo; \ 81 elif [ -x /sbin/palo ]; then echo /sbin/palo; \
82 fi) 82 fi)
83 83
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index f88b252e419c..2121d99f8364 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -1,3 +1,4 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2 2
3unifdef-y += pdc.h 3unifdef-y += pdc.h
4unifdef-y += swab.h
diff --git a/arch/parisc/include/asm/byteorder.h b/arch/parisc/include/asm/byteorder.h
index db148313de5d..da66029c4cb2 100644
--- a/arch/parisc/include/asm/byteorder.h
+++ b/arch/parisc/include/asm/byteorder.h
@@ -1,82 +1,7 @@
1#ifndef _PARISC_BYTEORDER_H 1#ifndef _PARISC_BYTEORDER_H
2#define _PARISC_BYTEORDER_H 2#define _PARISC_BYTEORDER_H
3 3
4#include <asm/types.h> 4#include <asm/swab.h>
5#include <linux/compiler.h>
6
7#ifdef __GNUC__
8
9static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x)
10{
11 __asm__("dep %0, 15, 8, %0\n\t" /* deposit 00ab -> 0bab */
12 "shd %%r0, %0, 8, %0" /* shift 000000ab -> 00ba */
13 : "=r" (x)
14 : "0" (x));
15 return x;
16}
17
18static __inline__ __attribute_const__ __u32 ___arch__swab24(__u32 x)
19{
20 __asm__("shd %0, %0, 8, %0\n\t" /* shift xabcxabc -> cxab */
21 "dep %0, 15, 8, %0\n\t" /* deposit cxab -> cbab */
22 "shd %%r0, %0, 8, %0" /* shift 0000cbab -> 0cba */
23 : "=r" (x)
24 : "0" (x));
25 return x;
26}
27
28static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
29{
30 unsigned int temp;
31 __asm__("shd %0, %0, 16, %1\n\t" /* shift abcdabcd -> cdab */
32 "dep %1, 15, 8, %1\n\t" /* deposit cdab -> cbab */
33 "shd %0, %1, 8, %0" /* shift abcdcbab -> dcba */
34 : "=r" (x), "=&r" (temp)
35 : "0" (x));
36 return x;
37}
38
39
40#if BITS_PER_LONG > 32
41/*
42** From "PA-RISC 2.0 Architecture", HP Professional Books.
43** See Appendix I page 8 , "Endian Byte Swapping".
44**
45** Pretty cool algorithm: (* == zero'd bits)
46** PERMH 01234567 -> 67452301 into %0
47** HSHL 67452301 -> 7*5*3*1* into %1
48** HSHR 67452301 -> *6*4*2*0 into %0
49** OR %0 | %1 -> 76543210 into %0 (all done!)
50*/
51static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) {
52 __u64 temp;
53 __asm__("permh,3210 %0, %0\n\t"
54 "hshl %0, 8, %1\n\t"
55 "hshr,u %0, 8, %0\n\t"
56 "or %1, %0, %0"
57 : "=r" (x), "=&r" (temp)
58 : "0" (x));
59 return x;
60}
61#define __arch__swab64(x) ___arch__swab64(x)
62#define __BYTEORDER_HAS_U64__
63#elif !defined(__STRICT_ANSI__)
64static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
65{
66 __u32 t1 = ___arch__swab32((__u32) x);
67 __u32 t2 = ___arch__swab32((__u32) (x >> 32));
68 return (((__u64) t1 << 32) | t2);
69}
70#define __arch__swab64(x) ___arch__swab64(x)
71#define __BYTEORDER_HAS_U64__
72#endif
73
74#define __arch__swab16(x) ___arch__swab16(x)
75#define __arch__swab24(x) ___arch__swab24(x)
76#define __arch__swab32(x) ___arch__swab32(x)
77
78#endif /* __GNUC__ */
79
80#include <linux/byteorder/big_endian.h> 5#include <linux/byteorder/big_endian.h>
81 6
82#endif /* _PARISC_BYTEORDER_H */ 7#endif /* _PARISC_BYTEORDER_H */
diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h
index e9639ccc3fce..c84b2fcb18a9 100644
--- a/arch/parisc/include/asm/checksum.h
+++ b/arch/parisc/include/asm/checksum.h
@@ -182,7 +182,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
182#endif 182#endif
183 : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len) 183 : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len)
184 : "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto) 184 : "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto)
185 : "r19", "r20", "r21", "r22"); 185 : "r19", "r20", "r21", "r22", "memory");
186 return csum_fold(sum); 186 return csum_fold(sum);
187} 187}
188 188
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index 55ddb1842107..d3031d1f9d03 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -4,12 +4,6 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/pgtable.h> 5#include <asm/pgtable.h>
6 6
7extern unsigned long parisc_vmerge_boundary;
8extern unsigned long parisc_vmerge_max_size;
9
10#define BIO_VMERGE_BOUNDARY parisc_vmerge_boundary
11#define BIO_VMERGE_MAX_SIZE parisc_vmerge_max_size
12
13#define virt_to_phys(a) ((unsigned long)__pa(a)) 7#define virt_to_phys(a) ((unsigned long)__pa(a))
14#define phys_to_virt(a) __va(a) 8#define phys_to_virt(a) __va(a)
15#define virt_to_bus virt_to_phys 9#define virt_to_bus virt_to_phys
@@ -182,9 +176,9 @@ static inline void __raw_writeq(unsigned long long b, volatile void __iomem *add
182 176
183/* readb can never be const, so use __fswab instead of le*_to_cpu */ 177/* readb can never be const, so use __fswab instead of le*_to_cpu */
184#define readb(addr) __raw_readb(addr) 178#define readb(addr) __raw_readb(addr)
185#define readw(addr) __fswab16(__raw_readw(addr)) 179#define readw(addr) le16_to_cpu(__raw_readw(addr))
186#define readl(addr) __fswab32(__raw_readl(addr)) 180#define readl(addr) le32_to_cpu(__raw_readl(addr))
187#define readq(addr) __fswab64(__raw_readq(addr)) 181#define readq(addr) le64_to_cpu(__raw_readq(addr))
188#define writeb(b, addr) __raw_writeb(b, addr) 182#define writeb(b, addr) __raw_writeb(b, addr)
189#define writew(b, addr) __raw_writew(cpu_to_le16(b), addr) 183#define writew(b, addr) __raw_writew(cpu_to_le16(b), addr)
190#define writel(b, addr) __raw_writel(cpu_to_le32(b), addr) 184#define writel(b, addr) __raw_writel(cpu_to_le32(b), addr)
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h
index 85856c74ad1d..354b2aca990e 100644
--- a/arch/parisc/include/asm/mmu_context.h
+++ b/arch/parisc/include/asm/mmu_context.h
@@ -34,16 +34,21 @@ destroy_context(struct mm_struct *mm)
34 mm->context = 0; 34 mm->context = 0;
35} 35}
36 36
37static inline void load_context(mm_context_t context) 37static inline unsigned long __space_to_prot(mm_context_t context)
38{ 38{
39 mtsp(context, 3);
40#if SPACEID_SHIFT == 0 39#if SPACEID_SHIFT == 0
41 mtctl(context << 1,8); 40 return context << 1;
42#else 41#else
43 mtctl(context >> (SPACEID_SHIFT - 1),8); 42 return context >> (SPACEID_SHIFT - 1);
44#endif 43#endif
45} 44}
46 45
46static inline void load_context(mm_context_t context)
47{
48 mtsp(context, 3);
49 mtctl(__space_to_prot(context), 8);
50}
51
47static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) 52static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
48{ 53{
49 54
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 3c9d34844c83..9d64df8754ba 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -17,6 +17,7 @@
17#include <asm/ptrace.h> 17#include <asm/ptrace.h>
18#include <asm/types.h> 18#include <asm/types.h>
19#include <asm/system.h> 19#include <asm/system.h>
20#include <asm/percpu.h>
20#endif /* __ASSEMBLY__ */ 21#endif /* __ASSEMBLY__ */
21 22
22#define KERNEL_STACK_SIZE (4*PAGE_SIZE) 23#define KERNEL_STACK_SIZE (4*PAGE_SIZE)
@@ -109,8 +110,7 @@ struct cpuinfo_parisc {
109}; 110};
110 111
111extern struct system_cpuinfo_parisc boot_cpu_data; 112extern struct system_cpuinfo_parisc boot_cpu_data;
112extern struct cpuinfo_parisc cpu_data[NR_CPUS]; 113DECLARE_PER_CPU(struct cpuinfo_parisc, cpu_data);
113#define current_cpu_data cpu_data[smp_processor_id()]
114 114
115#define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF) 115#define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF)
116 116
diff --git a/arch/parisc/include/asm/swab.h b/arch/parisc/include/asm/swab.h
new file mode 100644
index 000000000000..3ff16c5a3358
--- /dev/null
+++ b/arch/parisc/include/asm/swab.h
@@ -0,0 +1,66 @@
1#ifndef _PARISC_SWAB_H
2#define _PARISC_SWAB_H
3
4#include <asm/types.h>
5#include <linux/compiler.h>
6
7#define __SWAB_64_THRU_32__
8
9static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
10{
11 __asm__("dep %0, 15, 8, %0\n\t" /* deposit 00ab -> 0bab */
12 "shd %%r0, %0, 8, %0" /* shift 000000ab -> 00ba */
13 : "=r" (x)
14 : "0" (x));
15 return x;
16}
17#define __arch_swab16 __arch_swab16
18
19static inline __attribute_const__ __u32 __arch_swab24(__u32 x)
20{
21 __asm__("shd %0, %0, 8, %0\n\t" /* shift xabcxabc -> cxab */
22 "dep %0, 15, 8, %0\n\t" /* deposit cxab -> cbab */
23 "shd %%r0, %0, 8, %0" /* shift 0000cbab -> 0cba */
24 : "=r" (x)
25 : "0" (x));
26 return x;
27}
28
29static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
30{
31 unsigned int temp;
32 __asm__("shd %0, %0, 16, %1\n\t" /* shift abcdabcd -> cdab */
33 "dep %1, 15, 8, %1\n\t" /* deposit cdab -> cbab */
34 "shd %0, %1, 8, %0" /* shift abcdcbab -> dcba */
35 : "=r" (x), "=&r" (temp)
36 : "0" (x));
37 return x;
38}
39#define __arch_swab32 __arch_swab32
40
41#if BITS_PER_LONG > 32
42/*
43** From "PA-RISC 2.0 Architecture", HP Professional Books.
44** See Appendix I page 8 , "Endian Byte Swapping".
45**
46** Pretty cool algorithm: (* == zero'd bits)
47** PERMH 01234567 -> 67452301 into %0
48** HSHL 67452301 -> 7*5*3*1* into %1
49** HSHR 67452301 -> *6*4*2*0 into %0
50** OR %0 | %1 -> 76543210 into %0 (all done!)
51*/
52static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
53{
54 __u64 temp;
55 __asm__("permh,3210 %0, %0\n\t"
56 "hshl %0, 8, %1\n\t"
57 "hshr,u %0, 8, %0\n\t"
58 "or %1, %0, %0"
59 : "=r" (x), "=&r" (temp)
60 : "0" (x));
61 return x;
62}
63#define __arch_swab64 __arch_swab64
64#endif /* BITS_PER_LONG > 32 */
65
66#endif /* _PARISC_SWAB_H */
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 4878b9501f24..1c6dbb6f6e56 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -241,4 +241,6 @@ unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned lo
241#define __copy_to_user_inatomic __copy_to_user 241#define __copy_to_user_inatomic __copy_to_user
242#define __copy_from_user_inatomic __copy_from_user 242#define __copy_from_user_inatomic __copy_from_user
243 243
244int fixup_exception(struct pt_regs *regs);
245
244#endif /* __PARISC_UACCESS_H */ 246#endif /* __PARISC_UACCESS_H */
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index 884b7ce16a3b..994bcd980909 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -549,6 +549,38 @@ static int parisc_generic_match(struct device *dev, struct device_driver *drv)
549 return match_device(to_parisc_driver(drv), to_parisc_device(dev)); 549 return match_device(to_parisc_driver(drv), to_parisc_device(dev));
550} 550}
551 551
552static ssize_t make_modalias(struct device *dev, char *buf)
553{
554 const struct parisc_device *padev = to_parisc_device(dev);
555 const struct parisc_device_id *id = &padev->id;
556
557 return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n",
558 (u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev,
559 (u32)id->sversion);
560}
561
562static int parisc_uevent(struct device *dev, struct kobj_uevent_env *env)
563{
564 const struct parisc_device *padev;
565 char modalias[40];
566
567 if (!dev)
568 return -ENODEV;
569
570 padev = to_parisc_device(dev);
571 if (!padev)
572 return -ENODEV;
573
574 if (add_uevent_var(env, "PARISC_NAME=%s", padev->name))
575 return -ENOMEM;
576
577 make_modalias(dev, modalias);
578 if (add_uevent_var(env, "MODALIAS=%s", modalias))
579 return -ENOMEM;
580
581 return 0;
582}
583
552#define pa_dev_attr(name, field, format_string) \ 584#define pa_dev_attr(name, field, format_string) \
553static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \ 585static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \
554{ \ 586{ \
@@ -566,12 +598,7 @@ pa_dev_attr_id(sversion, "0x%05x\n");
566 598
567static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) 599static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
568{ 600{
569 struct parisc_device *padev = to_parisc_device(dev); 601 return make_modalias(dev, buf);
570 struct parisc_device_id *id = &padev->id;
571
572 return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n",
573 (u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev,
574 (u32)id->sversion);
575} 602}
576 603
577static struct device_attribute parisc_device_attrs[] = { 604static struct device_attribute parisc_device_attrs[] = {
@@ -587,6 +614,7 @@ static struct device_attribute parisc_device_attrs[] = {
587struct bus_type parisc_bus_type = { 614struct bus_type parisc_bus_type = {
588 .name = "parisc", 615 .name = "parisc",
589 .match = parisc_generic_match, 616 .match = parisc_generic_match,
617 .uevent = parisc_uevent,
590 .dev_attrs = parisc_device_attrs, 618 .dev_attrs = parisc_device_attrs,
591 .probe = parisc_driver_probe, 619 .probe = parisc_driver_probe,
592 .remove = parisc_driver_remove, 620 .remove = parisc_driver_remove,
diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
index 2cbf13b3ef11..5595a2f31181 100644
--- a/arch/parisc/kernel/hpmc.S
+++ b/arch/parisc/kernel/hpmc.S
@@ -80,6 +80,7 @@ END(hpmc_pim_data)
80 80
81 .import intr_save, code 81 .import intr_save, code
82ENTRY(os_hpmc) 82ENTRY(os_hpmc)
83.os_hpmc:
83 84
84 /* 85 /*
85 * registers modified: 86 * registers modified:
@@ -295,5 +296,10 @@ os_hpmc_6:
295 b . 296 b .
296 nop 297 nop
297ENDPROC(os_hpmc) 298ENDPROC(os_hpmc)
298ENTRY(os_hpmc_end) /* this label used to compute os_hpmc checksum */ 299.os_hpmc_end:
299 nop 300 nop
301.data
302.align 4
303 .export os_hpmc_size
304os_hpmc_size:
305 .word .os_hpmc_end-.os_hpmc
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 4cea935e2f99..ac2c822928c7 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -298,7 +298,7 @@ unsigned long txn_affinity_addr(unsigned int irq, int cpu)
298 irq_desc[irq].affinity = cpumask_of_cpu(cpu); 298 irq_desc[irq].affinity = cpumask_of_cpu(cpu);
299#endif 299#endif
300 300
301 return cpu_data[cpu].txn_addr; 301 return per_cpu(cpu_data, cpu).txn_addr;
302} 302}
303 303
304 304
@@ -309,8 +309,9 @@ unsigned long txn_alloc_addr(unsigned int virt_irq)
309 next_cpu++; /* assign to "next" CPU we want this bugger on */ 309 next_cpu++; /* assign to "next" CPU we want this bugger on */
310 310
311 /* validate entry */ 311 /* validate entry */
312 while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr || 312 while ((next_cpu < NR_CPUS) &&
313 !cpu_online(next_cpu))) 313 (!per_cpu(cpu_data, next_cpu).txn_addr ||
314 !cpu_online(next_cpu)))
314 next_cpu++; 315 next_cpu++;
315 316
316 if (next_cpu >= NR_CPUS) 317 if (next_cpu >= NR_CPUS)
@@ -359,7 +360,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
359 printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", 360 printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
360 irq, smp_processor_id(), cpu); 361 irq, smp_processor_id(), cpu);
361 gsc_writel(irq + CPU_IRQ_BASE, 362 gsc_writel(irq + CPU_IRQ_BASE,
362 cpu_data[cpu].hpa); 363 per_cpu(cpu_data, cpu).hpa);
363 goto set_out; 364 goto set_out;
364 } 365 }
365#endif 366#endif
@@ -421,5 +422,5 @@ void __init init_IRQ(void)
421 422
422void ack_bad_irq(unsigned int irq) 423void ack_bad_irq(unsigned int irq)
423{ 424{
424 printk("unexpected IRQ %d\n", irq); 425 printk(KERN_WARNING "unexpected IRQ %d\n", irq);
425} 426}
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
index ccb68090781e..1ff366cb9685 100644
--- a/arch/parisc/kernel/pdc_cons.c
+++ b/arch/parisc/kernel/pdc_cons.c
@@ -52,7 +52,7 @@
52#include <linux/tty.h> 52#include <linux/tty.h>
53#include <asm/pdc.h> /* for iodc_call() proto and friends */ 53#include <asm/pdc.h> /* for iodc_call() proto and friends */
54 54
55static spinlock_t pdc_console_lock = SPIN_LOCK_UNLOCKED; 55static DEFINE_SPINLOCK(pdc_console_lock);
56 56
57static void pdc_console_write(struct console *co, const char *s, unsigned count) 57static void pdc_console_write(struct console *co, const char *s, unsigned count)
58{ 58{
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index f696f57faa15..75099efb3bf3 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -541,9 +541,9 @@ static int __init perf_init(void)
541 spin_lock_init(&perf_lock); 541 spin_lock_init(&perf_lock);
542 542
543 /* TODO: this only lets us access the first cpu.. what to do for SMP? */ 543 /* TODO: this only lets us access the first cpu.. what to do for SMP? */
544 cpu_device = cpu_data[0].dev; 544 cpu_device = per_cpu(cpu_data, 0).dev;
545 printk("Performance monitoring counters enabled for %s\n", 545 printk("Performance monitoring counters enabled for %s\n",
546 cpu_data[0].dev->name); 546 per_cpu(cpu_data, 0).dev->name);
547 547
548 return 0; 548 return 0;
549} 549}
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 370086fb8333..ecb609342feb 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -3,7 +3,7 @@
3 * Initial setup-routines for HP 9000 based hardware. 3 * Initial setup-routines for HP 9000 based hardware.
4 * 4 *
5 * Copyright (C) 1991, 1992, 1995 Linus Torvalds 5 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
6 * Modifications for PA-RISC (C) 1999 Helge Deller <deller@gmx.de> 6 * Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de>
7 * Modifications copyright 1999 SuSE GmbH (Philipp Rumpf) 7 * Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
8 * Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net> 8 * Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
9 * Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org> 9 * Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
@@ -46,7 +46,7 @@
46struct system_cpuinfo_parisc boot_cpu_data __read_mostly; 46struct system_cpuinfo_parisc boot_cpu_data __read_mostly;
47EXPORT_SYMBOL(boot_cpu_data); 47EXPORT_SYMBOL(boot_cpu_data);
48 48
49struct cpuinfo_parisc cpu_data[NR_CPUS] __read_mostly; 49DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
50 50
51extern int update_cr16_clocksource(void); /* from time.c */ 51extern int update_cr16_clocksource(void); /* from time.c */
52 52
@@ -69,6 +69,23 @@ extern int update_cr16_clocksource(void); /* from time.c */
69*/ 69*/
70 70
71/** 71/**
72 * init_cpu_profiler - enable/setup per cpu profiling hooks.
73 * @cpunum: The processor instance.
74 *
75 * FIXME: doesn't do much yet...
76 */
77static void __cpuinit
78init_percpu_prof(unsigned long cpunum)
79{
80 struct cpuinfo_parisc *p;
81
82 p = &per_cpu(cpu_data, cpunum);
83 p->prof_counter = 1;
84 p->prof_multiplier = 1;
85}
86
87
88/**
72 * processor_probe - Determine if processor driver should claim this device. 89 * processor_probe - Determine if processor driver should claim this device.
73 * @dev: The device which has been found. 90 * @dev: The device which has been found.
74 * 91 *
@@ -147,7 +164,7 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
147 } 164 }
148#endif 165#endif
149 166
150 p = &cpu_data[cpuid]; 167 p = &per_cpu(cpu_data, cpuid);
151 boot_cpu_data.cpu_count++; 168 boot_cpu_data.cpu_count++;
152 169
153 /* initialize counters - CPU 0 gets it_value set in time_init() */ 170 /* initialize counters - CPU 0 gets it_value set in time_init() */
@@ -162,12 +179,9 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
162#ifdef CONFIG_SMP 179#ifdef CONFIG_SMP
163 /* 180 /*
164 ** FIXME: review if any other initialization is clobbered 181 ** FIXME: review if any other initialization is clobbered
165 ** for boot_cpu by the above memset(). 182 ** for boot_cpu by the above memset().
166 */ 183 */
167 184 init_percpu_prof(cpuid);
168 /* stolen from init_percpu_prof() */
169 cpu_data[cpuid].prof_counter = 1;
170 cpu_data[cpuid].prof_multiplier = 1;
171#endif 185#endif
172 186
173 /* 187 /*
@@ -261,19 +275,6 @@ void __init collect_boot_cpu_data(void)
261} 275}
262 276
263 277
264/**
265 * init_cpu_profiler - enable/setup per cpu profiling hooks.
266 * @cpunum: The processor instance.
267 *
268 * FIXME: doesn't do much yet...
269 */
270static inline void __init
271init_percpu_prof(int cpunum)
272{
273 cpu_data[cpunum].prof_counter = 1;
274 cpu_data[cpunum].prof_multiplier = 1;
275}
276
277 278
278/** 279/**
279 * init_per_cpu - Handle individual processor initializations. 280 * init_per_cpu - Handle individual processor initializations.
@@ -293,7 +294,7 @@ init_percpu_prof(int cpunum)
293 * 294 *
294 * o Enable CPU profiling hooks. 295 * o Enable CPU profiling hooks.
295 */ 296 */
296int __init init_per_cpu(int cpunum) 297int __cpuinit init_per_cpu(int cpunum)
297{ 298{
298 int ret; 299 int ret;
299 struct pdc_coproc_cfg coproc_cfg; 300 struct pdc_coproc_cfg coproc_cfg;
@@ -307,8 +308,8 @@ int __init init_per_cpu(int cpunum)
307 /* FWIW, FP rev/model is a more accurate way to determine 308 /* FWIW, FP rev/model is a more accurate way to determine
308 ** CPU type. CPU rev/model has some ambiguous cases. 309 ** CPU type. CPU rev/model has some ambiguous cases.
309 */ 310 */
310 cpu_data[cpunum].fp_rev = coproc_cfg.revision; 311 per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
311 cpu_data[cpunum].fp_model = coproc_cfg.model; 312 per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
312 313
313 printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n", 314 printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
314 cpunum, coproc_cfg.revision, coproc_cfg.model); 315 cpunum, coproc_cfg.revision, coproc_cfg.model);
@@ -344,16 +345,17 @@ int __init init_per_cpu(int cpunum)
344int 345int
345show_cpuinfo (struct seq_file *m, void *v) 346show_cpuinfo (struct seq_file *m, void *v)
346{ 347{
347 int n; 348 unsigned long cpu;
348 349
349 for(n=0; n<boot_cpu_data.cpu_count; n++) { 350 for_each_online_cpu(cpu) {
351 const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
350#ifdef CONFIG_SMP 352#ifdef CONFIG_SMP
351 if (0 == cpu_data[n].hpa) 353 if (0 == cpuinfo->hpa)
352 continue; 354 continue;
353#endif 355#endif
354 seq_printf(m, "processor\t: %d\n" 356 seq_printf(m, "processor\t: %lu\n"
355 "cpu family\t: PA-RISC %s\n", 357 "cpu family\t: PA-RISC %s\n",
356 n, boot_cpu_data.family_name); 358 cpu, boot_cpu_data.family_name);
357 359
358 seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name ); 360 seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name );
359 361
@@ -365,8 +367,8 @@ show_cpuinfo (struct seq_file *m, void *v)
365 seq_printf(m, "model\t\t: %s\n" 367 seq_printf(m, "model\t\t: %s\n"
366 "model name\t: %s\n", 368 "model name\t: %s\n",
367 boot_cpu_data.pdc.sys_model_name, 369 boot_cpu_data.pdc.sys_model_name,
368 cpu_data[n].dev ? 370 cpuinfo->dev ?
369 cpu_data[n].dev->name : "Unknown" ); 371 cpuinfo->dev->name : "Unknown");
370 372
371 seq_printf(m, "hversion\t: 0x%08x\n" 373 seq_printf(m, "hversion\t: 0x%08x\n"
372 "sversion\t: 0x%08x\n", 374 "sversion\t: 0x%08x\n",
@@ -377,8 +379,8 @@ show_cpuinfo (struct seq_file *m, void *v)
377 show_cache_info(m); 379 show_cache_info(m);
378 380
379 seq_printf(m, "bogomips\t: %lu.%02lu\n", 381 seq_printf(m, "bogomips\t: %lu.%02lu\n",
380 cpu_data[n].loops_per_jiffy / (500000 / HZ), 382 cpuinfo->loops_per_jiffy / (500000 / HZ),
381 (cpu_data[n].loops_per_jiffy / (5000 / HZ)) % 100); 383 (cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100);
382 384
383 seq_printf(m, "software id\t: %ld\n\n", 385 seq_printf(m, "software id\t: %ld\n\n",
384 boot_cpu_data.pdc.model.sw_id); 386 boot_cpu_data.pdc.model.sw_id);
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 7d27853ff8c8..82131ca8e05c 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -58,11 +58,6 @@ int parisc_bus_is_phys __read_mostly = 1; /* Assume no IOMMU is present */
58EXPORT_SYMBOL(parisc_bus_is_phys); 58EXPORT_SYMBOL(parisc_bus_is_phys);
59#endif 59#endif
60 60
61/* This sets the vmerge boundary and size, it's here because it has to
62 * be available on all platforms (zero means no-virtual merging) */
63unsigned long parisc_vmerge_boundary = 0;
64unsigned long parisc_vmerge_max_size = 0;
65
66void __init setup_cmdline(char **cmdline_p) 61void __init setup_cmdline(char **cmdline_p)
67{ 62{
68 extern unsigned int boot_args[]; 63 extern unsigned int boot_args[];
@@ -321,7 +316,7 @@ static int __init parisc_init(void)
321 316
322 processor_init(); 317 processor_init();
323 printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n", 318 printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n",
324 boot_cpu_data.cpu_count, 319 num_present_cpus(),
325 boot_cpu_data.cpu_name, 320 boot_cpu_data.cpu_name,
326 boot_cpu_data.cpu_hz / 1000000, 321 boot_cpu_data.cpu_hz / 1000000,
327 boot_cpu_data.cpu_hz % 1000000 ); 322 boot_cpu_data.cpu_hz % 1000000 );
@@ -387,8 +382,8 @@ void start_parisc(void)
387 if (ret >= 0 && coproc_cfg.ccr_functional) { 382 if (ret >= 0 && coproc_cfg.ccr_functional) {
388 mtctl(coproc_cfg.ccr_functional, 10); 383 mtctl(coproc_cfg.ccr_functional, 10);
389 384
390 cpu_data[cpunum].fp_rev = coproc_cfg.revision; 385 per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
391 cpu_data[cpunum].fp_model = coproc_cfg.model; 386 per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
392 387
393 asm volatile ("fstd %fr0,8(%sp)"); 388 asm volatile ("fstd %fr0,8(%sp)");
394 } else { 389 } else {
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 80bc000523fa..9995d7ed5819 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -56,16 +56,17 @@ static int smp_debug_lvl = 0;
56 if (lvl >= smp_debug_lvl) \ 56 if (lvl >= smp_debug_lvl) \
57 printk(printargs); 57 printk(printargs);
58#else 58#else
59#define smp_debug(lvl, ...) 59#define smp_debug(lvl, ...) do { } while(0)
60#endif /* DEBUG_SMP */ 60#endif /* DEBUG_SMP */
61 61
62DEFINE_SPINLOCK(smp_lock); 62DEFINE_SPINLOCK(smp_lock);
63 63
64volatile struct task_struct *smp_init_current_idle_task; 64volatile struct task_struct *smp_init_current_idle_task;
65 65
66static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is booting */ 66/* track which CPU is booting */
67static volatile int cpu_now_booting __cpuinitdata;
67 68
68static int parisc_max_cpus __read_mostly = 1; 69static int parisc_max_cpus __cpuinitdata = 1;
69 70
70DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; 71DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
71 72
@@ -123,7 +124,7 @@ irqreturn_t
123ipi_interrupt(int irq, void *dev_id) 124ipi_interrupt(int irq, void *dev_id)
124{ 125{
125 int this_cpu = smp_processor_id(); 126 int this_cpu = smp_processor_id();
126 struct cpuinfo_parisc *p = &cpu_data[this_cpu]; 127 struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
127 unsigned long ops; 128 unsigned long ops;
128 unsigned long flags; 129 unsigned long flags;
129 130
@@ -202,13 +203,13 @@ ipi_interrupt(int irq, void *dev_id)
202static inline void 203static inline void
203ipi_send(int cpu, enum ipi_message_type op) 204ipi_send(int cpu, enum ipi_message_type op)
204{ 205{
205 struct cpuinfo_parisc *p = &cpu_data[cpu]; 206 struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
206 spinlock_t *lock = &per_cpu(ipi_lock, cpu); 207 spinlock_t *lock = &per_cpu(ipi_lock, cpu);
207 unsigned long flags; 208 unsigned long flags;
208 209
209 spin_lock_irqsave(lock, flags); 210 spin_lock_irqsave(lock, flags);
210 p->pending_ipi |= 1 << op; 211 p->pending_ipi |= 1 << op;
211 gsc_writel(IPI_IRQ - CPU_IRQ_BASE, cpu_data[cpu].hpa); 212 gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa);
212 spin_unlock_irqrestore(lock, flags); 213 spin_unlock_irqrestore(lock, flags);
213} 214}
214 215
@@ -224,10 +225,7 @@ send_IPI_mask(cpumask_t mask, enum ipi_message_type op)
224static inline void 225static inline void
225send_IPI_single(int dest_cpu, enum ipi_message_type op) 226send_IPI_single(int dest_cpu, enum ipi_message_type op)
226{ 227{
227 if (dest_cpu == NO_PROC_ID) { 228 BUG_ON(dest_cpu == NO_PROC_ID);
228 BUG();
229 return;
230 }
231 229
232 ipi_send(dest_cpu, op); 230 ipi_send(dest_cpu, op);
233} 231}
@@ -309,8 +307,7 @@ smp_cpu_init(int cpunum)
309 /* Initialise the idle task for this CPU */ 307 /* Initialise the idle task for this CPU */
310 atomic_inc(&init_mm.mm_count); 308 atomic_inc(&init_mm.mm_count);
311 current->active_mm = &init_mm; 309 current->active_mm = &init_mm;
312 if(current->mm) 310 BUG_ON(current->mm);
313 BUG();
314 enter_lazy_tlb(&init_mm, current); 311 enter_lazy_tlb(&init_mm, current);
315 312
316 init_IRQ(); /* make sure no IRQs are enabled or pending */ 313 init_IRQ(); /* make sure no IRQs are enabled or pending */
@@ -345,6 +342,7 @@ void __init smp_callin(void)
345 */ 342 */
346int __cpuinit smp_boot_one_cpu(int cpuid) 343int __cpuinit smp_boot_one_cpu(int cpuid)
347{ 344{
345 const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
348 struct task_struct *idle; 346 struct task_struct *idle;
349 long timeout; 347 long timeout;
350 348
@@ -376,7 +374,7 @@ int __cpuinit smp_boot_one_cpu(int cpuid)
376 smp_init_current_idle_task = idle ; 374 smp_init_current_idle_task = idle ;
377 mb(); 375 mb();
378 376
379 printk("Releasing cpu %d now, hpa=%lx\n", cpuid, cpu_data[cpuid].hpa); 377 printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);
380 378
381 /* 379 /*
382 ** This gets PDC to release the CPU from a very tight loop. 380 ** This gets PDC to release the CPU from a very tight loop.
@@ -387,7 +385,7 @@ int __cpuinit smp_boot_one_cpu(int cpuid)
387 ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the 385 ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the
388 ** contents of memory are valid." 386 ** contents of memory are valid."
389 */ 387 */
390 gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, cpu_data[cpuid].hpa); 388 gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
391 mb(); 389 mb();
392 390
393 /* 391 /*
@@ -419,12 +417,12 @@ alive:
419 return 0; 417 return 0;
420} 418}
421 419
422void __devinit smp_prepare_boot_cpu(void) 420void __init smp_prepare_boot_cpu(void)
423{ 421{
424 int bootstrap_processor=cpu_data[0].cpuid; /* CPU ID of BSP */ 422 int bootstrap_processor = per_cpu(cpu_data, 0).cpuid;
425 423
426 /* Setup BSP mappings */ 424 /* Setup BSP mappings */
427 printk("SMP: bootstrap CPU ID is %d\n",bootstrap_processor); 425 printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor);
428 426
429 cpu_set(bootstrap_processor, cpu_online_map); 427 cpu_set(bootstrap_processor, cpu_online_map);
430 cpu_set(bootstrap_processor, cpu_present_map); 428 cpu_set(bootstrap_processor, cpu_present_map);
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 4d09203bc693..9d46c43a4152 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -60,7 +60,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
60 unsigned long cycles_elapsed, ticks_elapsed; 60 unsigned long cycles_elapsed, ticks_elapsed;
61 unsigned long cycles_remainder; 61 unsigned long cycles_remainder;
62 unsigned int cpu = smp_processor_id(); 62 unsigned int cpu = smp_processor_id();
63 struct cpuinfo_parisc *cpuinfo = &cpu_data[cpu]; 63 struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
64 64
65 /* gcc can optimize for "read-only" case with a local clocktick */ 65 /* gcc can optimize for "read-only" case with a local clocktick */
66 unsigned long cpt = clocktick; 66 unsigned long cpt = clocktick;
@@ -213,7 +213,7 @@ void __init start_cpu_itimer(void)
213 213
214 mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */ 214 mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
215 215
216 cpu_data[cpu].it_value = next_tick; 216 per_cpu(cpu_data, cpu).it_value = next_tick;
217} 217}
218 218
219struct platform_device rtc_parisc_dev = { 219struct platform_device rtc_parisc_dev = {
diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c
index d71cb018a21e..f5159381fdd6 100644
--- a/arch/parisc/kernel/topology.c
+++ b/arch/parisc/kernel/topology.c
@@ -22,14 +22,14 @@
22#include <linux/cpu.h> 22#include <linux/cpu.h>
23#include <linux/cache.h> 23#include <linux/cache.h>
24 24
25static struct cpu cpu_devices[NR_CPUS] __read_mostly; 25static DEFINE_PER_CPU(struct cpu, cpu_devices);
26 26
27static int __init topology_init(void) 27static int __init topology_init(void)
28{ 28{
29 int num; 29 int num;
30 30
31 for_each_present_cpu(num) { 31 for_each_present_cpu(num) {
32 register_cpu(&cpu_devices[num], num); 32 register_cpu(&per_cpu(cpu_devices, num), num);
33 } 33 }
34 return 0; 34 return 0;
35} 35}
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 4c771cd580ec..ba658d2086f7 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -745,6 +745,10 @@ void handle_interruption(int code, struct pt_regs *regs)
745 /* Fall Through */ 745 /* Fall Through */
746 case 27: 746 case 27:
747 /* Data memory protection ID trap */ 747 /* Data memory protection ID trap */
748 if (code == 27 && !user_mode(regs) &&
749 fixup_exception(regs))
750 return;
751
748 die_if_kernel("Protection id trap", regs, code); 752 die_if_kernel("Protection id trap", regs, code);
749 si.si_code = SEGV_MAPERR; 753 si.si_code = SEGV_MAPERR;
750 si.si_signo = SIGSEGV; 754 si.si_signo = SIGSEGV;
@@ -821,8 +825,8 @@ void handle_interruption(int code, struct pt_regs *regs)
821 825
822int __init check_ivt(void *iva) 826int __init check_ivt(void *iva)
823{ 827{
828 extern u32 os_hpmc_size;
824 extern const u32 os_hpmc[]; 829 extern const u32 os_hpmc[];
825 extern const u32 os_hpmc_end[];
826 830
827 int i; 831 int i;
828 u32 check = 0; 832 u32 check = 0;
@@ -839,8 +843,7 @@ int __init check_ivt(void *iva)
839 *ivap++ = 0; 843 *ivap++ = 0;
840 844
841 /* Compute Checksum for HPMC handler */ 845 /* Compute Checksum for HPMC handler */
842 846 length = os_hpmc_size;
843 length = os_hpmc_end - os_hpmc;
844 ivap[7] = length; 847 ivap[7] = length;
845 848
846 hpmcp = (u32 *)os_hpmc; 849 hpmcp = (u32 *)os_hpmc;
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 6773c582e457..69dad5a850a8 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -372,7 +372,7 @@ void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct
372 struct pt_regs *r = &t->thread.regs; 372 struct pt_regs *r = &t->thread.regs;
373 struct pt_regs *r2; 373 struct pt_regs *r2;
374 374
375 r2 = kmalloc(sizeof(struct pt_regs), GFP_KERNEL); 375 r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
376 if (!r2) 376 if (!r2)
377 return; 377 return;
378 *r2 = *r; 378 *r2 = *r;
diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c
index 9abed07db7fc..5069e8b2ca71 100644
--- a/arch/parisc/lib/iomap.c
+++ b/arch/parisc/lib/iomap.c
@@ -261,7 +261,7 @@ static const struct iomap_ops iomem_ops = {
261 iomem_write32r, 261 iomem_write32r,
262}; 262};
263 263
264const struct iomap_ops *iomap_ops[8] = { 264static const struct iomap_ops *iomap_ops[8] = {
265 [0] = &ioport_ops, 265 [0] = &ioport_ops,
266 [7] = &iomem_ops 266 [7] = &iomem_ops
267}; 267};
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index 2d68431fc22e..bbda909c866e 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -275,7 +275,7 @@ handle_store_error:
275 275
276 276
277/* Returns 0 for success, otherwise, returns number of bytes not transferred. */ 277/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
278unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) 278static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
279{ 279{
280 register unsigned long src, dst, t1, t2, t3; 280 register unsigned long src, dst, t1, t2, t3;
281 register unsigned char *pcs, *pcd; 281 register unsigned char *pcs, *pcd;
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index b2e3e9a8cece..92c7fa4ecc3f 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -139,13 +139,41 @@ parisc_acctyp(unsigned long code, unsigned int inst)
139 } 139 }
140#endif 140#endif
141 141
142int fixup_exception(struct pt_regs *regs)
143{
144 const struct exception_table_entry *fix;
145
146 fix = search_exception_tables(regs->iaoq[0]);
147 if (fix) {
148 struct exception_data *d;
149 d = &__get_cpu_var(exception_data);
150 d->fault_ip = regs->iaoq[0];
151 d->fault_space = regs->isr;
152 d->fault_addr = regs->ior;
153
154 regs->iaoq[0] = ((fix->fixup) & ~3);
155 /*
156 * NOTE: In some cases the faulting instruction
157 * may be in the delay slot of a branch. We
158 * don't want to take the branch, so we don't
159 * increment iaoq[1], instead we set it to be
160 * iaoq[0]+4, and clear the B bit in the PSW
161 */
162 regs->iaoq[1] = regs->iaoq[0] + 4;
163 regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
164
165 return 1;
166 }
167
168 return 0;
169}
170
142void do_page_fault(struct pt_regs *regs, unsigned long code, 171void do_page_fault(struct pt_regs *regs, unsigned long code,
143 unsigned long address) 172 unsigned long address)
144{ 173{
145 struct vm_area_struct *vma, *prev_vma; 174 struct vm_area_struct *vma, *prev_vma;
146 struct task_struct *tsk = current; 175 struct task_struct *tsk = current;
147 struct mm_struct *mm = tsk->mm; 176 struct mm_struct *mm = tsk->mm;
148 const struct exception_table_entry *fix;
149 unsigned long acc_type; 177 unsigned long acc_type;
150 int fault; 178 int fault;
151 179
@@ -229,32 +257,8 @@ bad_area:
229 257
230no_context: 258no_context:
231 259
232 if (!user_mode(regs)) { 260 if (!user_mode(regs) && fixup_exception(regs)) {
233 fix = search_exception_tables(regs->iaoq[0]); 261 return;
234
235 if (fix) {
236 struct exception_data *d;
237
238 d = &__get_cpu_var(exception_data);
239 d->fault_ip = regs->iaoq[0];
240 d->fault_space = regs->isr;
241 d->fault_addr = regs->ior;
242
243 regs->iaoq[0] = ((fix->fixup) & ~3);
244
245 /*
246 * NOTE: In some cases the faulting instruction
247 * may be in the delay slot of a branch. We
248 * don't want to take the branch, so we don't
249 * increment iaoq[1], instead we set it to be
250 * iaoq[0]+4, and clear the B bit in the PSW
251 */
252
253 regs->iaoq[1] = regs->iaoq[0] + 4;
254 regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
255
256 return;
257 }
258 } 262 }
259 263
260 parisc_terminate("Bad Address (null pointer deref?)", regs, code, address); 264 parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 79f25cef32df..84b861316ce7 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -108,6 +108,8 @@ config ARCH_NO_VIRT_TO_BUS
108config PPC 108config PPC
109 bool 109 bool
110 default y 110 default y
111 select HAVE_FTRACE_MCOUNT_RECORD
112 select HAVE_DYNAMIC_FTRACE
111 select HAVE_FUNCTION_TRACER 113 select HAVE_FUNCTION_TRACER
112 select ARCH_WANT_OPTIONAL_GPIOLIB 114 select ARCH_WANT_OPTIONAL_GPIOLIB
113 select HAVE_IDE 115 select HAVE_IDE
@@ -326,7 +328,8 @@ config KEXEC
326 328
327config CRASH_DUMP 329config CRASH_DUMP
328 bool "Build a kdump crash kernel" 330 bool "Build a kdump crash kernel"
329 depends on (PPC64 && RELOCATABLE) || 6xx 331 depends on PPC64 || 6xx
332 select RELOCATABLE if PPC64
330 help 333 help
331 Build a kernel suitable for use as a kdump capture kernel. 334 Build a kernel suitable for use as a kdump capture kernel.
332 The same kernel binary can be used as production kernel and dump 335 The same kernel binary can be used as production kernel and dump
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index ab6dda372438..e84df338ea29 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -356,7 +356,7 @@ $(obj)/zImage.initrd: $(addprefix $(obj)/, $(initrd-y))
356 @rm -f $@; ln $< $@ 356 @rm -f $@; ln $< $@
357 357
358install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y)) 358install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
359 sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" $< 359 sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" $^
360 360
361# anything not in $(targets) 361# anything not in $(targets)
362clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \ 362clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
diff --git a/arch/powerpc/boot/dts/mpc836x_mds.dts b/arch/powerpc/boot/dts/mpc836x_mds.dts
index 14534d04e4db..6e34f170fa62 100644
--- a/arch/powerpc/boot/dts/mpc836x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc836x_mds.dts
@@ -69,8 +69,18 @@
69 }; 69 };
70 70
71 bcsr@1,0 { 71 bcsr@1,0 {
72 #address-cells = <1>;
73 #size-cells = <1>;
72 compatible = "fsl,mpc8360mds-bcsr"; 74 compatible = "fsl,mpc8360mds-bcsr";
73 reg = <1 0 0x8000>; 75 reg = <1 0 0x8000>;
76 ranges = <0 1 0 0x8000>;
77
78 bcsr13: gpio-controller@d {
79 #gpio-cells = <2>;
80 compatible = "fsl,mpc8360mds-bcsr-gpio";
81 reg = <0xd 1>;
82 gpio-controller;
83 };
74 }; 84 };
75 }; 85 };
76 86
@@ -195,10 +205,21 @@
195 }; 205 };
196 206
197 par_io@1400 { 207 par_io@1400 {
208 #address-cells = <1>;
209 #size-cells = <1>;
198 reg = <0x1400 0x100>; 210 reg = <0x1400 0x100>;
211 ranges = <0 0x1400 0x100>;
199 device_type = "par_io"; 212 device_type = "par_io";
200 num-ports = <7>; 213 num-ports = <7>;
201 214
215 qe_pio_b: gpio-controller@18 {
216 #gpio-cells = <2>;
217 compatible = "fsl,mpc8360-qe-pario-bank",
218 "fsl,mpc8323-qe-pario-bank";
219 reg = <0x18 0x18>;
220 gpio-controller;
221 };
222
202 pio1: ucc_pin@01 { 223 pio1: ucc_pin@01 {
203 pio-map = < 224 pio-map = <
204 /* port pin dir open_drain assignment has_irq */ 225 /* port pin dir open_drain assignment has_irq */
@@ -282,6 +303,15 @@
282 }; 303 };
283 }; 304 };
284 305
306 timer@440 {
307 compatible = "fsl,mpc8360-qe-gtm",
308 "fsl,qe-gtm", "fsl,gtm";
309 reg = <0x440 0x40>;
310 clock-frequency = <132000000>;
311 interrupts = <12 13 14 15>;
312 interrupt-parent = <&qeic>;
313 };
314
285 spi@4c0 { 315 spi@4c0 {
286 cell-index = <0>; 316 cell-index = <0>;
287 compatible = "fsl,spi"; 317 compatible = "fsl,spi";
@@ -301,11 +331,20 @@
301 }; 331 };
302 332
303 usb@6c0 { 333 usb@6c0 {
304 compatible = "qe_udc"; 334 compatible = "fsl,mpc8360-qe-usb",
335 "fsl,mpc8323-qe-usb";
305 reg = <0x6c0 0x40 0x8b00 0x100>; 336 reg = <0x6c0 0x40 0x8b00 0x100>;
306 interrupts = <11>; 337 interrupts = <11>;
307 interrupt-parent = <&qeic>; 338 interrupt-parent = <&qeic>;
308 mode = "slave"; 339 fsl,fullspeed-clock = "clk21";
340 fsl,lowspeed-clock = "brg9";
341 gpios = <&qe_pio_b 2 0 /* USBOE */
342 &qe_pio_b 3 0 /* USBTP */
343 &qe_pio_b 8 0 /* USBTN */
344 &qe_pio_b 9 0 /* USBRP */
345 &qe_pio_b 11 0 /* USBRN */
346 &bcsr13 5 0 /* SPEED */
347 &bcsr13 4 1>; /* POWER */
309 }; 348 };
310 349
311 enet0: ucc@2000 { 350 enet0: ucc@2000 {
diff --git a/arch/powerpc/boot/dts/mpc836x_rdk.dts b/arch/powerpc/boot/dts/mpc836x_rdk.dts
index decadf3d9e98..37b789510d68 100644
--- a/arch/powerpc/boot/dts/mpc836x_rdk.dts
+++ b/arch/powerpc/boot/dts/mpc836x_rdk.dts
@@ -218,8 +218,23 @@
218 reg = <0x440 0x40>; 218 reg = <0x440 0x40>;
219 interrupts = <12 13 14 15>; 219 interrupts = <12 13 14 15>;
220 interrupt-parent = <&qeic>; 220 interrupt-parent = <&qeic>;
221 /* filled by u-boot */ 221 clock-frequency = <166666666>;
222 clock-frequency = <0>; 222 };
223
224 usb@6c0 {
225 compatible = "fsl,mpc8360-qe-usb",
226 "fsl,mpc8323-qe-usb";
227 reg = <0x6c0 0x40 0x8b00 0x100>;
228 interrupts = <11>;
229 interrupt-parent = <&qeic>;
230 fsl,fullspeed-clock = "clk21";
231 gpios = <&qe_pio_b 2 0 /* USBOE */
232 &qe_pio_b 3 0 /* USBTP */
233 &qe_pio_b 8 0 /* USBTN */
234 &qe_pio_b 9 0 /* USBRP */
235 &qe_pio_b 11 0 /* USBRN */
236 &qe_pio_e 20 0 /* SPEED */
237 &qe_pio_e 21 1 /* POWER */>;
223 }; 238 };
224 239
225 spi@4c0 { 240 spi@4c0 {
diff --git a/arch/powerpc/boot/dts/mpc8641_hpcn.dts b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
index 35d5e248ccd7..4481532cbe77 100644
--- a/arch/powerpc/boot/dts/mpc8641_hpcn.dts
+++ b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
@@ -26,7 +26,13 @@
26 serial1 = &serial1; 26 serial1 = &serial1;
27 pci0 = &pci0; 27 pci0 = &pci0;
28 pci1 = &pci1; 28 pci1 = &pci1;
29 rapidio0 = &rapidio0; 29/*
30 * Only one of Rapid IO or PCI can be present due to HW limitations and
31 * due to the fact that the 2 now share address space in the new memory
32 * map. The most likely case is that we have PCI, so comment out the
33 * rapidio node. Leave it here for reference.
34 */
35 /* rapidio0 = &rapidio0; */
30 }; 36 };
31 37
32 cpus { 38 cpus {
@@ -62,18 +68,17 @@
62 reg = <0x00000000 0x40000000>; // 1G at 0x0 68 reg = <0x00000000 0x40000000>; // 1G at 0x0
63 }; 69 };
64 70
65 localbus@f8005000 { 71 localbus@ffe05000 {
66 #address-cells = <2>; 72 #address-cells = <2>;
67 #size-cells = <1>; 73 #size-cells = <1>;
68 compatible = "fsl,mpc8641-localbus", "simple-bus"; 74 compatible = "fsl,mpc8641-localbus", "simple-bus";
69 reg = <0xf8005000 0x1000>; 75 reg = <0xffe05000 0x1000>;
70 interrupts = <19 2>; 76 interrupts = <19 2>;
71 interrupt-parent = <&mpic>; 77 interrupt-parent = <&mpic>;
72 78
73 ranges = <0 0 0xff800000 0x00800000 79 ranges = <0 0 0xef800000 0x00800000
74 1 0 0xfe000000 0x01000000 80 2 0 0xffdf8000 0x00008000
75 2 0 0xf8200000 0x00100000 81 3 0 0xffdf0000 0x00008000>;
76 3 0 0xf8100000 0x00100000>;
77 82
78 flash@0,0 { 83 flash@0,0 {
79 compatible = "cfi-flash"; 84 compatible = "cfi-flash";
@@ -103,13 +108,13 @@
103 }; 108 };
104 }; 109 };
105 110
106 soc8641@f8000000 { 111 soc8641@ffe00000 {
107 #address-cells = <1>; 112 #address-cells = <1>;
108 #size-cells = <1>; 113 #size-cells = <1>;
109 device_type = "soc"; 114 device_type = "soc";
110 compatible = "simple-bus"; 115 compatible = "simple-bus";
111 ranges = <0x00000000 0xf8000000 0x00100000>; 116 ranges = <0x00000000 0xffe00000 0x00100000>;
112 reg = <0xf8000000 0x00001000>; // CCSRBAR 117 reg = <0xffe00000 0x00001000>; // CCSRBAR
113 bus-frequency = <0>; 118 bus-frequency = <0>;
114 119
115 i2c@3000 { 120 i2c@3000 {
@@ -340,17 +345,17 @@
340 }; 345 };
341 }; 346 };
342 347
343 pci0: pcie@f8008000 { 348 pci0: pcie@ffe08000 {
344 cell-index = <0>; 349 cell-index = <0>;
345 compatible = "fsl,mpc8641-pcie"; 350 compatible = "fsl,mpc8641-pcie";
346 device_type = "pci"; 351 device_type = "pci";
347 #interrupt-cells = <1>; 352 #interrupt-cells = <1>;
348 #size-cells = <2>; 353 #size-cells = <2>;
349 #address-cells = <3>; 354 #address-cells = <3>;
350 reg = <0xf8008000 0x1000>; 355 reg = <0xffe08000 0x1000>;
351 bus-range = <0x0 0xff>; 356 bus-range = <0x0 0xff>;
352 ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x20000000 357 ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x20000000
353 0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00100000>; 358 0x01000000 0x0 0x00000000 0xffc00000 0x0 0x00010000>;
354 clock-frequency = <33333333>; 359 clock-frequency = <33333333>;
355 interrupt-parent = <&mpic>; 360 interrupt-parent = <&mpic>;
356 interrupts = <24 2>; 361 interrupts = <24 2>;
@@ -481,7 +486,7 @@
481 486
482 0x01000000 0x0 0x00000000 487 0x01000000 0x0 0x00000000
483 0x01000000 0x0 0x00000000 488 0x01000000 0x0 0x00000000
484 0x0 0x00100000>; 489 0x0 0x00010000>;
485 uli1575@0 { 490 uli1575@0 {
486 reg = <0 0 0 0 0>; 491 reg = <0 0 0 0 0>;
487 #size-cells = <2>; 492 #size-cells = <2>;
@@ -491,7 +496,7 @@
491 0x0 0x20000000 496 0x0 0x20000000
492 0x01000000 0x0 0x00000000 497 0x01000000 0x0 0x00000000
493 0x01000000 0x0 0x00000000 498 0x01000000 0x0 0x00000000
494 0x0 0x00100000>; 499 0x0 0x00010000>;
495 isa@1e { 500 isa@1e {
496 device_type = "isa"; 501 device_type = "isa";
497 #interrupt-cells = <2>; 502 #interrupt-cells = <2>;
@@ -549,17 +554,17 @@
549 554
550 }; 555 };
551 556
552 pci1: pcie@f8009000 { 557 pci1: pcie@ffe09000 {
553 cell-index = <1>; 558 cell-index = <1>;
554 compatible = "fsl,mpc8641-pcie"; 559 compatible = "fsl,mpc8641-pcie";
555 device_type = "pci"; 560 device_type = "pci";
556 #interrupt-cells = <1>; 561 #interrupt-cells = <1>;
557 #size-cells = <2>; 562 #size-cells = <2>;
558 #address-cells = <3>; 563 #address-cells = <3>;
559 reg = <0xf8009000 0x1000>; 564 reg = <0xffe09000 0x1000>;
560 bus-range = <0 0xff>; 565 bus-range = <0 0xff>;
561 ranges = <0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000 566 ranges = <0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000
562 0x01000000 0x0 0x00000000 0xe3000000 0x0 0x00100000>; 567 0x01000000 0x0 0x00000000 0xffc10000 0x0 0x00010000>;
563 clock-frequency = <33333333>; 568 clock-frequency = <33333333>;
564 interrupt-parent = <&mpic>; 569 interrupt-parent = <&mpic>;
565 interrupts = <25 2>; 570 interrupts = <25 2>;
@@ -582,18 +587,21 @@
582 587
583 0x01000000 0x0 0x00000000 588 0x01000000 0x0 0x00000000
584 0x01000000 0x0 0x00000000 589 0x01000000 0x0 0x00000000
585 0x0 0x00100000>; 590 0x0 0x00010000>;
586 }; 591 };
587 }; 592 };
588 rapidio0: rapidio@f80c0000 { 593/*
594 rapidio0: rapidio@ffec0000 {
589 #address-cells = <2>; 595 #address-cells = <2>;
590 #size-cells = <2>; 596 #size-cells = <2>;
591 compatible = "fsl,rapidio-delta"; 597 compatible = "fsl,rapidio-delta";
592 reg = <0xf80c0000 0x20000>; 598 reg = <0xffec0000 0x20000>;
593 ranges = <0 0 0xc0000000 0 0x20000000>; 599 ranges = <0 0 0x80000000 0 0x20000000>;
594 interrupt-parent = <&mpic>; 600 interrupt-parent = <&mpic>;
595 /* err_irq bell_outb_irq bell_inb_irq 601 // err_irq bell_outb_irq bell_inb_irq
596 msg1_tx_irq msg1_rx_irq msg2_tx_irq msg2_rx_irq */ 602 // msg1_tx_irq msg1_rx_irq msg2_tx_irq msg2_rx_irq
597 interrupts = <48 2 49 2 50 2 53 2 54 2 55 2 56 2>; 603 interrupts = <48 2 49 2 50 2 53 2 54 2 55 2 56 2>;
598 }; 604 };
605*/
606
599}; 607};
diff --git a/arch/powerpc/boot/install.sh b/arch/powerpc/boot/install.sh
index b002bfd56786..51b2387bdba0 100644
--- a/arch/powerpc/boot/install.sh
+++ b/arch/powerpc/boot/install.sh
@@ -15,7 +15,7 @@
15# $2 - kernel image file 15# $2 - kernel image file
16# $3 - kernel map file 16# $3 - kernel map file
17# $4 - default install path (blank if root directory) 17# $4 - default install path (blank if root directory)
18# $5 - kernel boot file, the zImage 18# $5 and more - kernel boot files; zImage*, uImage, cuImage.*, etc.
19# 19#
20 20
21# User may have a custom install script 21# User may have a custom install script
@@ -38,3 +38,15 @@ fi
38 38
39cat $2 > $4/$image_name 39cat $2 > $4/$image_name
40cp $3 $4/System.map 40cp $3 $4/System.map
41
42# Copy all the bootable image files
43path=$4
44shift 4
45while [ $# -ne 0 ]; do
46 image_name=`basename $1`
47 if [ -f $path/$image_name ]; then
48 mv $path/$image_name $path/$image_name.old
49 fi
50 cat $1 > $path/$image_name
51 shift
52done;
diff --git a/arch/powerpc/configs/85xx/mpc8572_ds_defconfig b/arch/powerpc/configs/85xx/mpc8572_ds_defconfig
index 635588319e0d..32aeb79216f7 100644
--- a/arch/powerpc/configs/85xx/mpc8572_ds_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8572_ds_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28-rc3 3# Linux kernel version: 2.6.28-rc8
4# Sat Nov 8 12:40:13 2008 4# Tue Dec 30 11:17:46 2008
5# 5#
6# CONFIG_PPC64 is not set 6# CONFIG_PPC64 is not set
7 7
@@ -21,7 +21,10 @@ CONFIG_FSL_BOOKE=y
21CONFIG_FSL_EMB_PERFMON=y 21CONFIG_FSL_EMB_PERFMON=y
22# CONFIG_PHYS_64BIT is not set 22# CONFIG_PHYS_64BIT is not set
23CONFIG_SPE=y 23CONFIG_SPE=y
24CONFIG_PPC_MMU_NOHASH=y
24# CONFIG_PPC_MM_SLICES is not set 25# CONFIG_PPC_MM_SLICES is not set
26CONFIG_SMP=y
27CONFIG_NR_CPUS=2
25CONFIG_PPC32=y 28CONFIG_PPC32=y
26CONFIG_WORD_SIZE=32 29CONFIG_WORD_SIZE=32
27# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set 30# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
@@ -50,7 +53,7 @@ CONFIG_ARCH_MAY_HAVE_PC_FDC=y
50CONFIG_PPC_OF=y 53CONFIG_PPC_OF=y
51CONFIG_OF=y 54CONFIG_OF=y
52CONFIG_PPC_UDBG_16550=y 55CONFIG_PPC_UDBG_16550=y
53# CONFIG_GENERIC_TBSYNC is not set 56CONFIG_GENERIC_TBSYNC=y
54CONFIG_AUDIT_ARCH=y 57CONFIG_AUDIT_ARCH=y
55CONFIG_GENERIC_BUG=y 58CONFIG_GENERIC_BUG=y
56CONFIG_DEFAULT_UIMAGE=y 59CONFIG_DEFAULT_UIMAGE=y
@@ -62,7 +65,7 @@ CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
62# General setup 65# General setup
63# 66#
64CONFIG_EXPERIMENTAL=y 67CONFIG_EXPERIMENTAL=y
65CONFIG_BROKEN_ON_SMP=y 68CONFIG_LOCK_KERNEL=y
66CONFIG_INIT_ENV_ARG_LIMIT=32 69CONFIG_INIT_ENV_ARG_LIMIT=32
67CONFIG_LOCALVERSION="" 70CONFIG_LOCALVERSION=""
68CONFIG_LOCALVERSION_AUTO=y 71CONFIG_LOCALVERSION_AUTO=y
@@ -126,6 +129,7 @@ CONFIG_HAVE_IOREMAP_PROT=y
126CONFIG_HAVE_KPROBES=y 129CONFIG_HAVE_KPROBES=y
127CONFIG_HAVE_KRETPROBES=y 130CONFIG_HAVE_KRETPROBES=y
128CONFIG_HAVE_ARCH_TRACEHOOK=y 131CONFIG_HAVE_ARCH_TRACEHOOK=y
132CONFIG_USE_GENERIC_SMP_HELPERS=y
129# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 133# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
130CONFIG_SLABINFO=y 134CONFIG_SLABINFO=y
131CONFIG_RT_MUTEXES=y 135CONFIG_RT_MUTEXES=y
@@ -138,6 +142,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
138CONFIG_MODVERSIONS=y 142CONFIG_MODVERSIONS=y
139# CONFIG_MODULE_SRCVERSION_ALL is not set 143# CONFIG_MODULE_SRCVERSION_ALL is not set
140CONFIG_KMOD=y 144CONFIG_KMOD=y
145CONFIG_STOP_MACHINE=y
141CONFIG_BLOCK=y 146CONFIG_BLOCK=y
142CONFIG_LBD=y 147CONFIG_LBD=y
143# CONFIG_BLK_DEV_IO_TRACE is not set 148# CONFIG_BLK_DEV_IO_TRACE is not set
@@ -197,6 +202,7 @@ CONFIG_PPC_I8259=y
197# CONFIG_CPM2 is not set 202# CONFIG_CPM2 is not set
198CONFIG_FSL_ULI1575=y 203CONFIG_FSL_ULI1575=y
199# CONFIG_MPC8xxx_GPIO is not set 204# CONFIG_MPC8xxx_GPIO is not set
205# CONFIG_SIMPLE_GPIO is not set
200 206
201# 207#
202# Kernel options 208# Kernel options
@@ -224,6 +230,7 @@ CONFIG_MATH_EMULATION=y
224CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y 230CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
225CONFIG_ARCH_HAS_WALK_MEMORY=y 231CONFIG_ARCH_HAS_WALK_MEMORY=y
226CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y 232CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
233# CONFIG_IRQ_ALL_CPUS is not set
227CONFIG_ARCH_FLATMEM_ENABLE=y 234CONFIG_ARCH_FLATMEM_ENABLE=y
228CONFIG_ARCH_POPULATES_NODE_MAP=y 235CONFIG_ARCH_POPULATES_NODE_MAP=y
229CONFIG_SELECT_MEMORY_MODEL=y 236CONFIG_SELECT_MEMORY_MODEL=y
@@ -241,6 +248,9 @@ CONFIG_ZONE_DMA_FLAG=1
241CONFIG_BOUNCE=y 248CONFIG_BOUNCE=y
242CONFIG_VIRT_TO_BUS=y 249CONFIG_VIRT_TO_BUS=y
243CONFIG_UNEVICTABLE_LRU=y 250CONFIG_UNEVICTABLE_LRU=y
251CONFIG_PPC_4K_PAGES=y
252# CONFIG_PPC_16K_PAGES is not set
253# CONFIG_PPC_64K_PAGES is not set
244CONFIG_FORCE_MAX_ZONEORDER=11 254CONFIG_FORCE_MAX_ZONEORDER=11
245CONFIG_PROC_DEVICETREE=y 255CONFIG_PROC_DEVICETREE=y
246# CONFIG_CMDLINE_BOOL is not set 256# CONFIG_CMDLINE_BOOL is not set
@@ -443,8 +453,10 @@ CONFIG_MISC_DEVICES=y
443# CONFIG_EEPROM_93CX6 is not set 453# CONFIG_EEPROM_93CX6 is not set
444# CONFIG_SGI_IOC4 is not set 454# CONFIG_SGI_IOC4 is not set
445# CONFIG_TIFM_CORE is not set 455# CONFIG_TIFM_CORE is not set
456# CONFIG_ICS932S401 is not set
446# CONFIG_ENCLOSURE_SERVICES is not set 457# CONFIG_ENCLOSURE_SERVICES is not set
447# CONFIG_HP_ILO is not set 458# CONFIG_HP_ILO is not set
459# CONFIG_C2PORT is not set
448CONFIG_HAVE_IDE=y 460CONFIG_HAVE_IDE=y
449# CONFIG_IDE is not set 461# CONFIG_IDE is not set
450 462
@@ -784,6 +796,7 @@ CONFIG_SERIAL_CORE_CONSOLE=y
784CONFIG_UNIX98_PTYS=y 796CONFIG_UNIX98_PTYS=y
785CONFIG_LEGACY_PTYS=y 797CONFIG_LEGACY_PTYS=y
786CONFIG_LEGACY_PTY_COUNT=256 798CONFIG_LEGACY_PTY_COUNT=256
799# CONFIG_HVC_UDBG is not set
787# CONFIG_IPMI_HANDLER is not set 800# CONFIG_IPMI_HANDLER is not set
788CONFIG_HW_RANDOM=y 801CONFIG_HW_RANDOM=y
789CONFIG_NVRAM=y 802CONFIG_NVRAM=y
@@ -869,11 +882,11 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
869# CONFIG_THERMAL is not set 882# CONFIG_THERMAL is not set
870# CONFIG_THERMAL_HWMON is not set 883# CONFIG_THERMAL_HWMON is not set
871# CONFIG_WATCHDOG is not set 884# CONFIG_WATCHDOG is not set
885CONFIG_SSB_POSSIBLE=y
872 886
873# 887#
874# Sonics Silicon Backplane 888# Sonics Silicon Backplane
875# 889#
876CONFIG_SSB_POSSIBLE=y
877# CONFIG_SSB is not set 890# CONFIG_SSB is not set
878 891
879# 892#
@@ -886,14 +899,7 @@ CONFIG_SSB_POSSIBLE=y
886# CONFIG_PMIC_DA903X is not set 899# CONFIG_PMIC_DA903X is not set
887# CONFIG_MFD_WM8400 is not set 900# CONFIG_MFD_WM8400 is not set
888# CONFIG_MFD_WM8350_I2C is not set 901# CONFIG_MFD_WM8350_I2C is not set
889
890#
891# Voltage and Current regulators
892#
893# CONFIG_REGULATOR is not set 902# CONFIG_REGULATOR is not set
894# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
895# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
896# CONFIG_REGULATOR_BQ24022 is not set
897 903
898# 904#
899# Multimedia devices 905# Multimedia devices
@@ -1252,11 +1258,11 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
1252# CONFIG_USB_TMC is not set 1258# CONFIG_USB_TMC is not set
1253 1259
1254# 1260#
1255# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' 1261# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
1256# 1262#
1257 1263
1258# 1264#
1259# may also be needed; see USB_STORAGE Help for more information 1265# see USB_STORAGE Help for more information
1260# 1266#
1261CONFIG_USB_STORAGE=y 1267CONFIG_USB_STORAGE=y
1262# CONFIG_USB_STORAGE_DEBUG is not set 1268# CONFIG_USB_STORAGE_DEBUG is not set
@@ -1348,6 +1354,7 @@ CONFIG_RTC_INTF_DEV=y
1348# CONFIG_RTC_DRV_M41T80 is not set 1354# CONFIG_RTC_DRV_M41T80 is not set
1349# CONFIG_RTC_DRV_S35390A is not set 1355# CONFIG_RTC_DRV_S35390A is not set
1350# CONFIG_RTC_DRV_FM3130 is not set 1356# CONFIG_RTC_DRV_FM3130 is not set
1357# CONFIG_RTC_DRV_RX8581 is not set
1351 1358
1352# 1359#
1353# SPI RTC drivers 1360# SPI RTC drivers
@@ -1624,6 +1631,7 @@ CONFIG_HAVE_FUNCTION_TRACER=y
1624# CONFIG_SAMPLES is not set 1631# CONFIG_SAMPLES is not set
1625CONFIG_HAVE_ARCH_KGDB=y 1632CONFIG_HAVE_ARCH_KGDB=y
1626# CONFIG_KGDB is not set 1633# CONFIG_KGDB is not set
1634CONFIG_PRINT_STACK_DEPTH=64
1627# CONFIG_DEBUG_STACKOVERFLOW is not set 1635# CONFIG_DEBUG_STACKOVERFLOW is not set
1628# CONFIG_DEBUG_STACK_USAGE is not set 1636# CONFIG_DEBUG_STACK_USAGE is not set
1629# CONFIG_DEBUG_PAGEALLOC is not set 1637# CONFIG_DEBUG_PAGEALLOC is not set
@@ -1649,11 +1657,16 @@ CONFIG_CRYPTO=y
1649# 1657#
1650# CONFIG_CRYPTO_FIPS is not set 1658# CONFIG_CRYPTO_FIPS is not set
1651CONFIG_CRYPTO_ALGAPI=y 1659CONFIG_CRYPTO_ALGAPI=y
1660CONFIG_CRYPTO_ALGAPI2=y
1652CONFIG_CRYPTO_AEAD=y 1661CONFIG_CRYPTO_AEAD=y
1662CONFIG_CRYPTO_AEAD2=y
1653CONFIG_CRYPTO_BLKCIPHER=y 1663CONFIG_CRYPTO_BLKCIPHER=y
1664CONFIG_CRYPTO_BLKCIPHER2=y
1654CONFIG_CRYPTO_HASH=y 1665CONFIG_CRYPTO_HASH=y
1655CONFIG_CRYPTO_RNG=y 1666CONFIG_CRYPTO_HASH2=y
1667CONFIG_CRYPTO_RNG2=y
1656CONFIG_CRYPTO_MANAGER=y 1668CONFIG_CRYPTO_MANAGER=y
1669CONFIG_CRYPTO_MANAGER2=y
1657# CONFIG_CRYPTO_GF128MUL is not set 1670# CONFIG_CRYPTO_GF128MUL is not set
1658# CONFIG_CRYPTO_NULL is not set 1671# CONFIG_CRYPTO_NULL is not set
1659# CONFIG_CRYPTO_CRYPTD is not set 1672# CONFIG_CRYPTO_CRYPTD is not set
diff --git a/arch/powerpc/include/asm/cell-pmu.h b/arch/powerpc/include/asm/cell-pmu.h
index 8066eede3a0c..b4b7338ad79e 100644
--- a/arch/powerpc/include/asm/cell-pmu.h
+++ b/arch/powerpc/include/asm/cell-pmu.h
@@ -37,9 +37,11 @@
37#define CBE_PM_STOP_AT_MAX 0x40000000 37#define CBE_PM_STOP_AT_MAX 0x40000000
38#define CBE_PM_TRACE_MODE_GET(pm_control) (((pm_control) >> 28) & 0x3) 38#define CBE_PM_TRACE_MODE_GET(pm_control) (((pm_control) >> 28) & 0x3)
39#define CBE_PM_TRACE_MODE_SET(mode) (((mode) & 0x3) << 28) 39#define CBE_PM_TRACE_MODE_SET(mode) (((mode) & 0x3) << 28)
40#define CBE_PM_TRACE_BUF_OVFLW(bit) (((bit) & 0x1) << 17)
40#define CBE_PM_COUNT_MODE_SET(count) (((count) & 0x3) << 18) 41#define CBE_PM_COUNT_MODE_SET(count) (((count) & 0x3) << 18)
41#define CBE_PM_FREEZE_ALL_CTRS 0x00100000 42#define CBE_PM_FREEZE_ALL_CTRS 0x00100000
42#define CBE_PM_ENABLE_EXT_TRACE 0x00008000 43#define CBE_PM_ENABLE_EXT_TRACE 0x00008000
44#define CBE_PM_SPU_ADDR_TRACE_SET(msk) (((msk) & 0x3) << 9)
43 45
44/* Macros for the trace_address register. */ 46/* Macros for the trace_address register. */
45#define CBE_PM_TRACE_BUF_FULL 0x00000800 47#define CBE_PM_TRACE_BUF_FULL 0x00000800
diff --git a/arch/powerpc/include/asm/ioctls.h b/arch/powerpc/include/asm/ioctls.h
index 279a6229584b..1842186d872c 100644
--- a/arch/powerpc/include/asm/ioctls.h
+++ b/arch/powerpc/include/asm/ioctls.h
@@ -89,6 +89,8 @@
89#define TIOCSBRK 0x5427 /* BSD compatibility */ 89#define TIOCSBRK 0x5427 /* BSD compatibility */
90#define TIOCCBRK 0x5428 /* BSD compatibility */ 90#define TIOCCBRK 0x5428 /* BSD compatibility */
91#define TIOCGSID 0x5429 /* Return the session ID of FD */ 91#define TIOCGSID 0x5429 /* Return the session ID of FD */
92#define TIOCGRS485 0x542e
93#define TIOCSRS485 0x542f
92#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ 94#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
93#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ 95#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
94 96
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 6dbffc981702..7e06b43720d3 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -48,63 +48,8 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
48{ 48{
49 if (oldregs) 49 if (oldregs)
50 memcpy(newregs, oldregs, sizeof(*newregs)); 50 memcpy(newregs, oldregs, sizeof(*newregs));
51#ifdef __powerpc64__
52 else {
53 /* FIXME Merge this with xmon_save_regs ?? */
54 unsigned long tmp1, tmp2;
55 __asm__ __volatile__ (
56 "std 0,0(%2)\n"
57 "std 1,8(%2)\n"
58 "std 2,16(%2)\n"
59 "std 3,24(%2)\n"
60 "std 4,32(%2)\n"
61 "std 5,40(%2)\n"
62 "std 6,48(%2)\n"
63 "std 7,56(%2)\n"
64 "std 8,64(%2)\n"
65 "std 9,72(%2)\n"
66 "std 10,80(%2)\n"
67 "std 11,88(%2)\n"
68 "std 12,96(%2)\n"
69 "std 13,104(%2)\n"
70 "std 14,112(%2)\n"
71 "std 15,120(%2)\n"
72 "std 16,128(%2)\n"
73 "std 17,136(%2)\n"
74 "std 18,144(%2)\n"
75 "std 19,152(%2)\n"
76 "std 20,160(%2)\n"
77 "std 21,168(%2)\n"
78 "std 22,176(%2)\n"
79 "std 23,184(%2)\n"
80 "std 24,192(%2)\n"
81 "std 25,200(%2)\n"
82 "std 26,208(%2)\n"
83 "std 27,216(%2)\n"
84 "std 28,224(%2)\n"
85 "std 29,232(%2)\n"
86 "std 30,240(%2)\n"
87 "std 31,248(%2)\n"
88 "mfmsr %0\n"
89 "std %0, 264(%2)\n"
90 "mfctr %0\n"
91 "std %0, 280(%2)\n"
92 "mflr %0\n"
93 "std %0, 288(%2)\n"
94 "bl 1f\n"
95 "1: mflr %1\n"
96 "std %1, 256(%2)\n"
97 "mtlr %0\n"
98 "mfxer %0\n"
99 "std %0, 296(%2)\n"
100 : "=&r" (tmp1), "=&r" (tmp2)
101 : "b" (newregs)
102 : "memory");
103 }
104#else
105 else 51 else
106 ppc_save_regs(newregs); 52 ppc_save_regs(newregs);
107#endif /* __powerpc64__ */
108} 53}
109 54
110extern void kexec_smp_wait(void); /* get and clear naca physid, wait for 55extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
diff --git a/arch/powerpc/include/asm/oprofile_impl.h b/arch/powerpc/include/asm/oprofile_impl.h
index 95035c602ba6..639dc96077ab 100644
--- a/arch/powerpc/include/asm/oprofile_impl.h
+++ b/arch/powerpc/include/asm/oprofile_impl.h
@@ -32,6 +32,12 @@ struct op_system_config {
32 unsigned long mmcr0; 32 unsigned long mmcr0;
33 unsigned long mmcr1; 33 unsigned long mmcr1;
34 unsigned long mmcra; 34 unsigned long mmcra;
35#ifdef CONFIG_OPROFILE_CELL
36 /* Register for oprofile user tool to check cell kernel profiling
37 * suport.
38 */
39 unsigned long cell_support;
40#endif
35#endif 41#endif
36 unsigned long enable_kernel; 42 unsigned long enable_kernel;
37 unsigned long enable_user; 43 unsigned long enable_user;
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index cff30c0ef1ff..eead5c67197a 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -320,6 +320,7 @@ enum ps3_match_id {
320 320
321enum ps3_match_sub_id { 321enum ps3_match_sub_id {
322 PS3_MATCH_SUB_ID_GPU_FB = 1, 322 PS3_MATCH_SUB_ID_GPU_FB = 1,
323 PS3_MATCH_SUB_ID_GPU_RAMDISK = 2,
323}; 324};
324 325
325#define PS3_MODULE_ALIAS_EHCI "ps3:1:0" 326#define PS3_MODULE_ALIAS_EHCI "ps3:1:0"
@@ -332,6 +333,7 @@ enum ps3_match_sub_id {
332#define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8:0" 333#define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8:0"
333#define PS3_MODULE_ALIAS_SOUND "ps3:9:0" 334#define PS3_MODULE_ALIAS_SOUND "ps3:9:0"
334#define PS3_MODULE_ALIAS_GPU_FB "ps3:10:1" 335#define PS3_MODULE_ALIAS_GPU_FB "ps3:10:1"
336#define PS3_MODULE_ALIAS_GPU_RAMDISK "ps3:10:2"
335#define PS3_MODULE_ALIAS_LPM "ps3:11:0" 337#define PS3_MODULE_ALIAS_LPM "ps3:11:0"
336 338
337enum ps3_system_bus_device_type { 339enum ps3_system_bus_device_type {
diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h
index edee15d269ea..a0a15311d0d8 100644
--- a/arch/powerpc/include/asm/qe.h
+++ b/arch/powerpc/include/asm/qe.h
@@ -17,6 +17,8 @@
17#ifdef __KERNEL__ 17#ifdef __KERNEL__
18 18
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/errno.h>
21#include <linux/err.h>
20#include <asm/cpm.h> 22#include <asm/cpm.h>
21#include <asm/immap_qe.h> 23#include <asm/immap_qe.h>
22 24
@@ -84,7 +86,11 @@ static inline bool qe_clock_is_brg(enum qe_clock clk)
84extern spinlock_t cmxgcr_lock; 86extern spinlock_t cmxgcr_lock;
85 87
86/* Export QE common operations */ 88/* Export QE common operations */
89#ifdef CONFIG_QUICC_ENGINE
87extern void __init qe_reset(void); 90extern void __init qe_reset(void);
91#else
92static inline void qe_reset(void) {}
93#endif
88 94
89/* QE PIO */ 95/* QE PIO */
90#define QE_PIO_PINS 32 96#define QE_PIO_PINS 32
@@ -101,16 +107,43 @@ struct qe_pio_regs {
101#endif 107#endif
102}; 108};
103 109
104extern int par_io_init(struct device_node *np);
105extern int par_io_of_config(struct device_node *np);
106#define QE_PIO_DIR_IN 2 110#define QE_PIO_DIR_IN 2
107#define QE_PIO_DIR_OUT 1 111#define QE_PIO_DIR_OUT 1
108extern void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, 112extern void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin,
109 int dir, int open_drain, int assignment, 113 int dir, int open_drain, int assignment,
110 int has_irq); 114 int has_irq);
115#ifdef CONFIG_QUICC_ENGINE
116extern int par_io_init(struct device_node *np);
117extern int par_io_of_config(struct device_node *np);
111extern int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain, 118extern int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
112 int assignment, int has_irq); 119 int assignment, int has_irq);
113extern int par_io_data_set(u8 port, u8 pin, u8 val); 120extern int par_io_data_set(u8 port, u8 pin, u8 val);
121#else
122static inline int par_io_init(struct device_node *np) { return -ENOSYS; }
123static inline int par_io_of_config(struct device_node *np) { return -ENOSYS; }
124static inline int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
125 int assignment, int has_irq) { return -ENOSYS; }
126static inline int par_io_data_set(u8 port, u8 pin, u8 val) { return -ENOSYS; }
127#endif /* CONFIG_QUICC_ENGINE */
128
129/*
130 * Pin multiplexing functions.
131 */
132struct qe_pin;
133#ifdef CONFIG_QE_GPIO
134extern struct qe_pin *qe_pin_request(struct device_node *np, int index);
135extern void qe_pin_free(struct qe_pin *qe_pin);
136extern void qe_pin_set_gpio(struct qe_pin *qe_pin);
137extern void qe_pin_set_dedicated(struct qe_pin *pin);
138#else
139static inline struct qe_pin *qe_pin_request(struct device_node *np, int index)
140{
141 return ERR_PTR(-ENOSYS);
142}
143static inline void qe_pin_free(struct qe_pin *qe_pin) {}
144static inline void qe_pin_set_gpio(struct qe_pin *qe_pin) {}
145static inline void qe_pin_set_dedicated(struct qe_pin *pin) {}
146#endif /* CONFIG_QE_GPIO */
114 147
115/* QE internal API */ 148/* QE internal API */
116int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input); 149int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input);
diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h
index 56a7745ca343..cf519663a791 100644
--- a/arch/powerpc/include/asm/qe_ic.h
+++ b/arch/powerpc/include/asm/qe_ic.h
@@ -17,6 +17,9 @@
17 17
18#include <linux/irq.h> 18#include <linux/irq.h>
19 19
20struct device_node;
21struct qe_ic;
22
20#define NUM_OF_QE_IC_GROUPS 6 23#define NUM_OF_QE_IC_GROUPS 6
21 24
22/* Flags when we init the QE IC */ 25/* Flags when we init the QE IC */
@@ -54,17 +57,27 @@ enum qe_ic_grp_id {
54 QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */ 57 QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */
55}; 58};
56 59
60#ifdef CONFIG_QUICC_ENGINE
57void qe_ic_init(struct device_node *node, unsigned int flags, 61void qe_ic_init(struct device_node *node, unsigned int flags,
58 void (*low_handler)(unsigned int irq, struct irq_desc *desc), 62 void (*low_handler)(unsigned int irq, struct irq_desc *desc),
59 void (*high_handler)(unsigned int irq, struct irq_desc *desc)); 63 void (*high_handler)(unsigned int irq, struct irq_desc *desc));
64unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
65unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
66#else
67static inline void qe_ic_init(struct device_node *node, unsigned int flags,
68 void (*low_handler)(unsigned int irq, struct irq_desc *desc),
69 void (*high_handler)(unsigned int irq, struct irq_desc *desc))
70{}
71static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
72{ return 0; }
73static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
74{ return 0; }
75#endif /* CONFIG_QUICC_ENGINE */
76
60void qe_ic_set_highest_priority(unsigned int virq, int high); 77void qe_ic_set_highest_priority(unsigned int virq, int high);
61int qe_ic_set_priority(unsigned int virq, unsigned int priority); 78int qe_ic_set_priority(unsigned int virq, unsigned int priority);
62int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high); 79int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
63 80
64struct qe_ic;
65unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
66unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
67
68static inline void qe_ic_cascade_low_ipic(unsigned int irq, 81static inline void qe_ic_cascade_low_ipic(unsigned int irq,
69 struct irq_desc *desc) 82 struct irq_desc *desc)
70{ 83{
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
index 8b2eb044270a..0ab8d869e3d6 100644
--- a/arch/powerpc/include/asm/spu.h
+++ b/arch/powerpc/include/asm/spu.h
@@ -128,7 +128,7 @@ struct spu {
128 int number; 128 int number;
129 unsigned int irqs[3]; 129 unsigned int irqs[3];
130 u32 node; 130 u32 node;
131 u64 flags; 131 unsigned long flags;
132 u64 class_0_pending; 132 u64 class_0_pending;
133 u64 class_0_dar; 133 u64 class_0_dar;
134 u64 class_1_dar; 134 u64 class_1_dar;
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 1308a86e9070..8d1a419df35d 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -29,7 +29,7 @@ endif
29obj-y := cputable.o ptrace.o syscalls.o \ 29obj-y := cputable.o ptrace.o syscalls.o \
30 irq.o align.o signal_32.o pmc.o vdso.o \ 30 irq.o align.o signal_32.o pmc.o vdso.o \
31 init_task.o process.o systbl.o idle.o \ 31 init_task.o process.o systbl.o idle.o \
32 signal.o sysfs.o 32 signal.o sysfs.o cacheinfo.o
33obj-y += vdso32/ 33obj-y += vdso32/
34obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ 34obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
35 signal_64.o ptrace32.o \ 35 signal_64.o ptrace32.o \
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
new file mode 100644
index 000000000000..b33f0417a4bf
--- /dev/null
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -0,0 +1,837 @@
1/*
2 * Processor cache information made available to userspace via sysfs;
3 * intended to be compatible with x86 intel_cacheinfo implementation.
4 *
5 * Copyright 2008 IBM Corporation
6 * Author: Nathan Lynch
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 */
12
13#include <linux/cpu.h>
14#include <linux/cpumask.h>
15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/kobject.h>
18#include <linux/list.h>
19#include <linux/notifier.h>
20#include <linux/of.h>
21#include <linux/percpu.h>
22#include <asm/prom.h>
23
24#include "cacheinfo.h"
25
26/* per-cpu object for tracking:
27 * - a "cache" kobject for the top-level directory
28 * - a list of "index" objects representing the cpu's local cache hierarchy
29 */
30struct cache_dir {
31 struct kobject *kobj; /* bare (not embedded) kobject for cache
32 * directory */
33 struct cache_index_dir *index; /* list of index objects */
34};
35
36/* "index" object: each cpu's cache directory has an index
37 * subdirectory corresponding to a cache object associated with the
38 * cpu. This object's lifetime is managed via the embedded kobject.
39 */
40struct cache_index_dir {
41 struct kobject kobj;
42 struct cache_index_dir *next; /* next index in parent directory */
43 struct cache *cache;
44};
45
46/* Template for determining which OF properties to query for a given
47 * cache type */
48struct cache_type_info {
49 const char *name;
50 const char *size_prop;
51
52 /* Allow for both [di]-cache-line-size and
53 * [di]-cache-block-size properties. According to the PowerPC
54 * Processor binding, -line-size should be provided if it
55 * differs from the cache block size (that which is operated
56 * on by cache instructions), so we look for -line-size first.
57 * See cache_get_line_size(). */
58
59 const char *line_size_props[2];
60 const char *nr_sets_prop;
61};
62
63/* These are used to index the cache_type_info array. */
64#define CACHE_TYPE_UNIFIED 0
65#define CACHE_TYPE_INSTRUCTION 1
66#define CACHE_TYPE_DATA 2
67
68static const struct cache_type_info cache_type_info[] = {
69 {
70 /* PowerPC Processor binding says the [di]-cache-*
71 * must be equal on unified caches, so just use
72 * d-cache properties. */
73 .name = "Unified",
74 .size_prop = "d-cache-size",
75 .line_size_props = { "d-cache-line-size",
76 "d-cache-block-size", },
77 .nr_sets_prop = "d-cache-sets",
78 },
79 {
80 .name = "Instruction",
81 .size_prop = "i-cache-size",
82 .line_size_props = { "i-cache-line-size",
83 "i-cache-block-size", },
84 .nr_sets_prop = "i-cache-sets",
85 },
86 {
87 .name = "Data",
88 .size_prop = "d-cache-size",
89 .line_size_props = { "d-cache-line-size",
90 "d-cache-block-size", },
91 .nr_sets_prop = "d-cache-sets",
92 },
93};
94
95/* Cache object: each instance of this corresponds to a distinct cache
96 * in the system. There are separate objects for Harvard caches: one
97 * each for instruction and data, and each refers to the same OF node.
98 * The refcount of the OF node is elevated for the lifetime of the
99 * cache object. A cache object is released when its shared_cpu_map
100 * is cleared (see cache_cpu_clear).
101 *
102 * A cache object is on two lists: an unsorted global list
103 * (cache_list) of cache objects; and a singly-linked list
104 * representing the local cache hierarchy, which is ordered by level
105 * (e.g. L1d -> L1i -> L2 -> L3).
106 */
107struct cache {
108 struct device_node *ofnode; /* OF node for this cache, may be cpu */
109 struct cpumask shared_cpu_map; /* online CPUs using this cache */
110 int type; /* split cache disambiguation */
111 int level; /* level not explicit in device tree */
112 struct list_head list; /* global list of cache objects */
113 struct cache *next_local; /* next cache of >= level */
114};
115
116static DEFINE_PER_CPU(struct cache_dir *, cache_dir);
117
118/* traversal/modification of this list occurs only at cpu hotplug time;
119 * access is serialized by cpu hotplug locking
120 */
121static LIST_HEAD(cache_list);
122
123static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
124{
125 return container_of(k, struct cache_index_dir, kobj);
126}
127
128static const char *cache_type_string(const struct cache *cache)
129{
130 return cache_type_info[cache->type].name;
131}
132
133static void __cpuinit cache_init(struct cache *cache, int type, int level, struct device_node *ofnode)
134{
135 cache->type = type;
136 cache->level = level;
137 cache->ofnode = of_node_get(ofnode);
138 INIT_LIST_HEAD(&cache->list);
139 list_add(&cache->list, &cache_list);
140}
141
142static struct cache *__cpuinit new_cache(int type, int level, struct device_node *ofnode)
143{
144 struct cache *cache;
145
146 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
147 if (cache)
148 cache_init(cache, type, level, ofnode);
149
150 return cache;
151}
152
153static void release_cache_debugcheck(struct cache *cache)
154{
155 struct cache *iter;
156
157 list_for_each_entry(iter, &cache_list, list)
158 WARN_ONCE(iter->next_local == cache,
159 "cache for %s(%s) refers to cache for %s(%s)\n",
160 iter->ofnode->full_name,
161 cache_type_string(iter),
162 cache->ofnode->full_name,
163 cache_type_string(cache));
164}
165
166static void release_cache(struct cache *cache)
167{
168 if (!cache)
169 return;
170
171 pr_debug("freeing L%d %s cache for %s\n", cache->level,
172 cache_type_string(cache), cache->ofnode->full_name);
173
174 release_cache_debugcheck(cache);
175 list_del(&cache->list);
176 of_node_put(cache->ofnode);
177 kfree(cache);
178}
179
180static void cache_cpu_set(struct cache *cache, int cpu)
181{
182 struct cache *next = cache;
183
184 while (next) {
185 WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
186 "CPU %i already accounted in %s(%s)\n",
187 cpu, next->ofnode->full_name,
188 cache_type_string(next));
189 cpumask_set_cpu(cpu, &next->shared_cpu_map);
190 next = next->next_local;
191 }
192}
193
194static int cache_size(const struct cache *cache, unsigned int *ret)
195{
196 const char *propname;
197 const u32 *cache_size;
198
199 propname = cache_type_info[cache->type].size_prop;
200
201 cache_size = of_get_property(cache->ofnode, propname, NULL);
202 if (!cache_size)
203 return -ENODEV;
204
205 *ret = *cache_size;
206 return 0;
207}
208
209static int cache_size_kb(const struct cache *cache, unsigned int *ret)
210{
211 unsigned int size;
212
213 if (cache_size(cache, &size))
214 return -ENODEV;
215
216 *ret = size / 1024;
217 return 0;
218}
219
220/* not cache_line_size() because that's a macro in include/linux/cache.h */
221static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
222{
223 const u32 *line_size;
224 int i, lim;
225
226 lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
227
228 for (i = 0; i < lim; i++) {
229 const char *propname;
230
231 propname = cache_type_info[cache->type].line_size_props[i];
232 line_size = of_get_property(cache->ofnode, propname, NULL);
233 if (line_size)
234 break;
235 }
236
237 if (!line_size)
238 return -ENODEV;
239
240 *ret = *line_size;
241 return 0;
242}
243
244static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
245{
246 const char *propname;
247 const u32 *nr_sets;
248
249 propname = cache_type_info[cache->type].nr_sets_prop;
250
251 nr_sets = of_get_property(cache->ofnode, propname, NULL);
252 if (!nr_sets)
253 return -ENODEV;
254
255 *ret = *nr_sets;
256 return 0;
257}
258
259static int cache_associativity(const struct cache *cache, unsigned int *ret)
260{
261 unsigned int line_size;
262 unsigned int nr_sets;
263 unsigned int size;
264
265 if (cache_nr_sets(cache, &nr_sets))
266 goto err;
267
268 /* If the cache is fully associative, there is no need to
269 * check the other properties.
270 */
271 if (nr_sets == 1) {
272 *ret = 0;
273 return 0;
274 }
275
276 if (cache_get_line_size(cache, &line_size))
277 goto err;
278 if (cache_size(cache, &size))
279 goto err;
280
281 if (!(nr_sets > 0 && size > 0 && line_size > 0))
282 goto err;
283
284 *ret = (size / nr_sets) / line_size;
285 return 0;
286err:
287 return -ENODEV;
288}
289
290/* helper for dealing with split caches */
291static struct cache *cache_find_first_sibling(struct cache *cache)
292{
293 struct cache *iter;
294
295 if (cache->type == CACHE_TYPE_UNIFIED)
296 return cache;
297
298 list_for_each_entry(iter, &cache_list, list)
299 if (iter->ofnode == cache->ofnode && iter->next_local == cache)
300 return iter;
301
302 return cache;
303}
304
305/* return the first cache on a local list matching node */
306static struct cache *cache_lookup_by_node(const struct device_node *node)
307{
308 struct cache *cache = NULL;
309 struct cache *iter;
310
311 list_for_each_entry(iter, &cache_list, list) {
312 if (iter->ofnode != node)
313 continue;
314 cache = cache_find_first_sibling(iter);
315 break;
316 }
317
318 return cache;
319}
320
321static bool cache_node_is_unified(const struct device_node *np)
322{
323 return of_get_property(np, "cache-unified", NULL);
324}
325
326static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level)
327{
328 struct cache *cache;
329
330 pr_debug("creating L%d ucache for %s\n", level, node->full_name);
331
332 cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
333
334 return cache;
335}
336
337static struct cache *__cpuinit cache_do_one_devnode_split(struct device_node *node, int level)
338{
339 struct cache *dcache, *icache;
340
341 pr_debug("creating L%d dcache and icache for %s\n", level,
342 node->full_name);
343
344 dcache = new_cache(CACHE_TYPE_DATA, level, node);
345 icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
346
347 if (!dcache || !icache)
348 goto err;
349
350 dcache->next_local = icache;
351
352 return dcache;
353err:
354 release_cache(dcache);
355 release_cache(icache);
356 return NULL;
357}
358
359static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, int level)
360{
361 struct cache *cache;
362
363 if (cache_node_is_unified(node))
364 cache = cache_do_one_devnode_unified(node, level);
365 else
366 cache = cache_do_one_devnode_split(node, level);
367
368 return cache;
369}
370
371static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *node, int level)
372{
373 struct cache *cache;
374
375 cache = cache_lookup_by_node(node);
376
377 WARN_ONCE(cache && cache->level != level,
378 "cache level mismatch on lookup (got %d, expected %d)\n",
379 cache->level, level);
380
381 if (!cache)
382 cache = cache_do_one_devnode(node, level);
383
384 return cache;
385}
386
387static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigger)
388{
389 while (smaller->next_local) {
390 if (smaller->next_local == bigger)
391 return; /* already linked */
392 smaller = smaller->next_local;
393 }
394
395 smaller->next_local = bigger;
396}
397
398static void __cpuinit do_subsidiary_caches_debugcheck(struct cache *cache)
399{
400 WARN_ON_ONCE(cache->level != 1);
401 WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
402}
403
404static void __cpuinit do_subsidiary_caches(struct cache *cache)
405{
406 struct device_node *subcache_node;
407 int level = cache->level;
408
409 do_subsidiary_caches_debugcheck(cache);
410
411 while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
412 struct cache *subcache;
413
414 level++;
415 subcache = cache_lookup_or_instantiate(subcache_node, level);
416 of_node_put(subcache_node);
417 if (!subcache)
418 break;
419
420 link_cache_lists(cache, subcache);
421 cache = subcache;
422 }
423}
424
425static struct cache *__cpuinit cache_chain_instantiate(unsigned int cpu_id)
426{
427 struct device_node *cpu_node;
428 struct cache *cpu_cache = NULL;
429
430 pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
431
432 cpu_node = of_get_cpu_node(cpu_id, NULL);
433 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
434 if (!cpu_node)
435 goto out;
436
437 cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
438 if (!cpu_cache)
439 goto out;
440
441 do_subsidiary_caches(cpu_cache);
442
443 cache_cpu_set(cpu_cache, cpu_id);
444out:
445 of_node_put(cpu_node);
446
447 return cpu_cache;
448}
449
450static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id)
451{
452 struct cache_dir *cache_dir;
453 struct sys_device *sysdev;
454 struct kobject *kobj = NULL;
455
456 sysdev = get_cpu_sysdev(cpu_id);
457 WARN_ONCE(!sysdev, "no sysdev for CPU %i\n", cpu_id);
458 if (!sysdev)
459 goto err;
460
461 kobj = kobject_create_and_add("cache", &sysdev->kobj);
462 if (!kobj)
463 goto err;
464
465 cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
466 if (!cache_dir)
467 goto err;
468
469 cache_dir->kobj = kobj;
470
471 WARN_ON_ONCE(per_cpu(cache_dir, cpu_id) != NULL);
472
473 per_cpu(cache_dir, cpu_id) = cache_dir;
474
475 return cache_dir;
476err:
477 kobject_put(kobj);
478 return NULL;
479}
480
481static void cache_index_release(struct kobject *kobj)
482{
483 struct cache_index_dir *index;
484
485 index = kobj_to_cache_index_dir(kobj);
486
487 pr_debug("freeing index directory for L%d %s cache\n",
488 index->cache->level, cache_type_string(index->cache));
489
490 kfree(index);
491}
492
493static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
494{
495 struct kobj_attribute *kobj_attr;
496
497 kobj_attr = container_of(attr, struct kobj_attribute, attr);
498
499 return kobj_attr->show(k, kobj_attr, buf);
500}
501
502static struct cache *index_kobj_to_cache(struct kobject *k)
503{
504 struct cache_index_dir *index;
505
506 index = kobj_to_cache_index_dir(k);
507
508 return index->cache;
509}
510
511static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
512{
513 unsigned int size_kb;
514 struct cache *cache;
515
516 cache = index_kobj_to_cache(k);
517
518 if (cache_size_kb(cache, &size_kb))
519 return -ENODEV;
520
521 return sprintf(buf, "%uK\n", size_kb);
522}
523
524static struct kobj_attribute cache_size_attr =
525 __ATTR(size, 0444, size_show, NULL);
526
527
528static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
529{
530 unsigned int line_size;
531 struct cache *cache;
532
533 cache = index_kobj_to_cache(k);
534
535 if (cache_get_line_size(cache, &line_size))
536 return -ENODEV;
537
538 return sprintf(buf, "%u\n", line_size);
539}
540
541static struct kobj_attribute cache_line_size_attr =
542 __ATTR(coherency_line_size, 0444, line_size_show, NULL);
543
544static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
545{
546 unsigned int nr_sets;
547 struct cache *cache;
548
549 cache = index_kobj_to_cache(k);
550
551 if (cache_nr_sets(cache, &nr_sets))
552 return -ENODEV;
553
554 return sprintf(buf, "%u\n", nr_sets);
555}
556
557static struct kobj_attribute cache_nr_sets_attr =
558 __ATTR(number_of_sets, 0444, nr_sets_show, NULL);
559
560static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
561{
562 unsigned int associativity;
563 struct cache *cache;
564
565 cache = index_kobj_to_cache(k);
566
567 if (cache_associativity(cache, &associativity))
568 return -ENODEV;
569
570 return sprintf(buf, "%u\n", associativity);
571}
572
573static struct kobj_attribute cache_assoc_attr =
574 __ATTR(ways_of_associativity, 0444, associativity_show, NULL);
575
576static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
577{
578 struct cache *cache;
579
580 cache = index_kobj_to_cache(k);
581
582 return sprintf(buf, "%s\n", cache_type_string(cache));
583}
584
585static struct kobj_attribute cache_type_attr =
586 __ATTR(type, 0444, type_show, NULL);
587
588static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
589{
590 struct cache_index_dir *index;
591 struct cache *cache;
592
593 index = kobj_to_cache_index_dir(k);
594 cache = index->cache;
595
596 return sprintf(buf, "%d\n", cache->level);
597}
598
599static struct kobj_attribute cache_level_attr =
600 __ATTR(level, 0444, level_show, NULL);
601
602static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
603{
604 struct cache_index_dir *index;
605 struct cache *cache;
606 int len;
607 int n = 0;
608
609 index = kobj_to_cache_index_dir(k);
610 cache = index->cache;
611 len = PAGE_SIZE - 2;
612
613 if (len > 1) {
614 n = cpumask_scnprintf(buf, len, &cache->shared_cpu_map);
615 buf[n++] = '\n';
616 buf[n] = '\0';
617 }
618 return n;
619}
620
621static struct kobj_attribute cache_shared_cpu_map_attr =
622 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
623
624/* Attributes which should always be created -- the kobject/sysfs core
625 * does this automatically via kobj_type->default_attrs. This is the
626 * minimum data required to uniquely identify a cache.
627 */
628static struct attribute *cache_index_default_attrs[] = {
629 &cache_type_attr.attr,
630 &cache_level_attr.attr,
631 &cache_shared_cpu_map_attr.attr,
632 NULL,
633};
634
635/* Attributes which should be created if the cache device node has the
636 * right properties -- see cacheinfo_create_index_opt_attrs
637 */
638static struct kobj_attribute *cache_index_opt_attrs[] = {
639 &cache_size_attr,
640 &cache_line_size_attr,
641 &cache_nr_sets_attr,
642 &cache_assoc_attr,
643};
644
645static struct sysfs_ops cache_index_ops = {
646 .show = cache_index_show,
647};
648
649static struct kobj_type cache_index_type = {
650 .release = cache_index_release,
651 .sysfs_ops = &cache_index_ops,
652 .default_attrs = cache_index_default_attrs,
653};
654
655static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
656{
657 const char *cache_name;
658 const char *cache_type;
659 struct cache *cache;
660 char *buf;
661 int i;
662
663 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
664 if (!buf)
665 return;
666
667 cache = dir->cache;
668 cache_name = cache->ofnode->full_name;
669 cache_type = cache_type_string(cache);
670
671 /* We don't want to create an attribute that can't provide a
672 * meaningful value. Check the return value of each optional
673 * attribute's ->show method before registering the
674 * attribute.
675 */
676 for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
677 struct kobj_attribute *attr;
678 ssize_t rc;
679
680 attr = cache_index_opt_attrs[i];
681
682 rc = attr->show(&dir->kobj, attr, buf);
683 if (rc <= 0) {
684 pr_debug("not creating %s attribute for "
685 "%s(%s) (rc = %zd)\n",
686 attr->attr.name, cache_name,
687 cache_type, rc);
688 continue;
689 }
690 if (sysfs_create_file(&dir->kobj, &attr->attr))
691 pr_debug("could not create %s attribute for %s(%s)\n",
692 attr->attr.name, cache_name, cache_type);
693 }
694
695 kfree(buf);
696}
697
698static void __cpuinit cacheinfo_create_index_dir(struct cache *cache, int index, struct cache_dir *cache_dir)
699{
700 struct cache_index_dir *index_dir;
701 int rc;
702
703 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
704 if (!index_dir)
705 goto err;
706
707 index_dir->cache = cache;
708
709 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
710 cache_dir->kobj, "index%d", index);
711 if (rc)
712 goto err;
713
714 index_dir->next = cache_dir->index;
715 cache_dir->index = index_dir;
716
717 cacheinfo_create_index_opt_attrs(index_dir);
718
719 return;
720err:
721 kfree(index_dir);
722}
723
724static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache *cache_list)
725{
726 struct cache_dir *cache_dir;
727 struct cache *cache;
728 int index = 0;
729
730 cache_dir = cacheinfo_create_cache_dir(cpu_id);
731 if (!cache_dir)
732 return;
733
734 cache = cache_list;
735 while (cache) {
736 cacheinfo_create_index_dir(cache, index, cache_dir);
737 index++;
738 cache = cache->next_local;
739 }
740}
741
742void __cpuinit cacheinfo_cpu_online(unsigned int cpu_id)
743{
744 struct cache *cache;
745
746 cache = cache_chain_instantiate(cpu_id);
747 if (!cache)
748 return;
749
750 cacheinfo_sysfs_populate(cpu_id, cache);
751}
752
753#ifdef CONFIG_HOTPLUG_CPU /* functions needed for cpu offline */
754
755static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
756{
757 struct device_node *cpu_node;
758 struct cache *cache;
759
760 cpu_node = of_get_cpu_node(cpu_id, NULL);
761 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
762 if (!cpu_node)
763 return NULL;
764
765 cache = cache_lookup_by_node(cpu_node);
766 of_node_put(cpu_node);
767
768 return cache;
769}
770
771static void remove_index_dirs(struct cache_dir *cache_dir)
772{
773 struct cache_index_dir *index;
774
775 index = cache_dir->index;
776
777 while (index) {
778 struct cache_index_dir *next;
779
780 next = index->next;
781 kobject_put(&index->kobj);
782 index = next;
783 }
784}
785
786static void remove_cache_dir(struct cache_dir *cache_dir)
787{
788 remove_index_dirs(cache_dir);
789
790 kobject_put(cache_dir->kobj);
791
792 kfree(cache_dir);
793}
794
795static void cache_cpu_clear(struct cache *cache, int cpu)
796{
797 while (cache) {
798 struct cache *next = cache->next_local;
799
800 WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
801 "CPU %i not accounted in %s(%s)\n",
802 cpu, cache->ofnode->full_name,
803 cache_type_string(cache));
804
805 cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
806
807 /* Release the cache object if all the cpus using it
808 * are offline */
809 if (cpumask_empty(&cache->shared_cpu_map))
810 release_cache(cache);
811
812 cache = next;
813 }
814}
815
816void cacheinfo_cpu_offline(unsigned int cpu_id)
817{
818 struct cache_dir *cache_dir;
819 struct cache *cache;
820
821 /* Prevent userspace from seeing inconsistent state - remove
822 * the sysfs hierarchy first */
823 cache_dir = per_cpu(cache_dir, cpu_id);
824
825 /* careful, sysfs population may have failed */
826 if (cache_dir)
827 remove_cache_dir(cache_dir);
828
829 per_cpu(cache_dir, cpu_id) = NULL;
830
831 /* clear the CPU's bit in its cache chain, possibly freeing
832 * cache objects */
833 cache = cache_lookup_by_cpu(cpu_id);
834 if (cache)
835 cache_cpu_clear(cache, cpu_id);
836}
837#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/powerpc/kernel/cacheinfo.h b/arch/powerpc/kernel/cacheinfo.h
new file mode 100644
index 000000000000..a7b74d36acd7
--- /dev/null
+++ b/arch/powerpc/kernel/cacheinfo.h
@@ -0,0 +1,8 @@
1#ifndef _PPC_CACHEINFO_H
2#define _PPC_CACHEINFO_H
3
4/* These are just hooks for sysfs.c to use. */
5extern void cacheinfo_cpu_online(unsigned int cpu_id);
6extern void cacheinfo_cpu_offline(unsigned int cpu_id);
7
8#endif /* _PPC_CACHEINFO_H */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 2538030954d8..da5a3855a0c4 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -16,7 +16,7 @@
16 * 2 of the License, or (at your option) any later version. 16 * 2 of the License, or (at your option) any later version.
17 */ 17 */
18 18
19#undef DEBUG 19#define DEBUG
20 20
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/pci.h> 22#include <linux/pci.h>
@@ -1356,6 +1356,63 @@ static void __init pcibios_allocate_resources(int pass)
1356 } 1356 }
1357} 1357}
1358 1358
1359static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1360{
1361 struct pci_controller *hose = pci_bus_to_host(bus);
1362 resource_size_t offset;
1363 struct resource *res, *pres;
1364 int i;
1365
1366 pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
1367
1368 /* Check for IO */
1369 if (!(hose->io_resource.flags & IORESOURCE_IO))
1370 goto no_io;
1371 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1372 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1373 BUG_ON(res == NULL);
1374 res->name = "Legacy IO";
1375 res->flags = IORESOURCE_IO;
1376 res->start = offset;
1377 res->end = (offset + 0xfff) & 0xfffffffful;
1378 pr_debug("Candidate legacy IO: %pR\n", res);
1379 if (request_resource(&hose->io_resource, res)) {
1380 printk(KERN_DEBUG
1381 "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1382 pci_domain_nr(bus), bus->number, res);
1383 kfree(res);
1384 }
1385
1386 no_io:
1387 /* Check for memory */
1388 offset = hose->pci_mem_offset;
1389 pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
1390 for (i = 0; i < 3; i++) {
1391 pres = &hose->mem_resources[i];
1392 if (!(pres->flags & IORESOURCE_MEM))
1393 continue;
1394 pr_debug("hose mem res: %pR\n", pres);
1395 if ((pres->start - offset) <= 0xa0000 &&
1396 (pres->end - offset) >= 0xbffff)
1397 break;
1398 }
1399 if (i >= 3)
1400 return;
1401 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1402 BUG_ON(res == NULL);
1403 res->name = "Legacy VGA memory";
1404 res->flags = IORESOURCE_MEM;
1405 res->start = 0xa0000 + offset;
1406 res->end = 0xbffff + offset;
1407 pr_debug("Candidate VGA memory: %pR\n", res);
1408 if (request_resource(pres, res)) {
1409 printk(KERN_DEBUG
1410 "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1411 pci_domain_nr(bus), bus->number, res);
1412 kfree(res);
1413 }
1414}
1415
1359void __init pcibios_resource_survey(void) 1416void __init pcibios_resource_survey(void)
1360{ 1417{
1361 struct pci_bus *b; 1418 struct pci_bus *b;
@@ -1371,6 +1428,18 @@ void __init pcibios_resource_survey(void)
1371 pcibios_allocate_resources(1); 1428 pcibios_allocate_resources(1);
1372 } 1429 }
1373 1430
1431 /* Before we start assigning unassigned resource, we try to reserve
1432 * the low IO area and the VGA memory area if they intersect the
1433 * bus available resources to avoid allocating things on top of them
1434 */
1435 if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) {
1436 list_for_each_entry(b, &pci_root_buses, node)
1437 pcibios_reserve_legacy_regions(b);
1438 }
1439
1440 /* Now, if the platform didn't decide to blindly trust the firmware,
1441 * we proceed to assigning things that were left unassigned
1442 */
1374 if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) { 1443 if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) {
1375 pr_debug("PCI: Assigning unassigned resouces...\n"); 1444 pr_debug("PCI: Assigning unassigned resouces...\n");
1376 pci_assign_unassigned_resources(); 1445 pci_assign_unassigned_resources();
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 39fadc6e1492..586962f65c2a 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -560,9 +560,14 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus,
560 * G5 machines... So when something asks for bus 0 io base 560 * G5 machines... So when something asks for bus 0 io base
561 * (bus 0 is HT root), we return the AGP one instead. 561 * (bus 0 is HT root), we return the AGP one instead.
562 */ 562 */
563 if (machine_is_compatible("MacRISC4")) 563 if (in_bus == 0 && machine_is_compatible("MacRISC4")) {
564 if (in_bus == 0) 564 struct device_node *agp;
565
566 agp = of_find_compatible_node(NULL, NULL, "u3-agp");
567 if (agp)
565 in_bus = 0xf0; 568 in_bus = 0xf0;
569 of_node_put(agp);
570 }
566 571
567 /* That syscall isn't quite compatible with PCI domains, but it's 572 /* That syscall isn't quite compatible with PCI domains, but it's
568 * used on pre-domains setup. We return the first match 573 * used on pre-domains setup. We return the first match
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index dcec1325d340..c8b27bb4dbde 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -165,6 +165,7 @@ EXPORT_SYMBOL(timer_interrupt);
165EXPORT_SYMBOL(irq_desc); 165EXPORT_SYMBOL(irq_desc);
166EXPORT_SYMBOL(tb_ticks_per_jiffy); 166EXPORT_SYMBOL(tb_ticks_per_jiffy);
167EXPORT_SYMBOL(cacheable_memcpy); 167EXPORT_SYMBOL(cacheable_memcpy);
168EXPORT_SYMBOL(cacheable_memzero);
168#endif 169#endif
169 170
170#ifdef CONFIG_PPC32 171#ifdef CONFIG_PPC32
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 6f73c739f1e2..c09cffafb6ee 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -824,11 +824,11 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
824#endif 824#endif
825 825
826#ifdef CONFIG_KEXEC 826#ifdef CONFIG_KEXEC
827 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); 827 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
828 if (lprop) 828 if (lprop)
829 crashk_res.start = *lprop; 829 crashk_res.start = *lprop;
830 830
831 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL); 831 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
832 if (lprop) 832 if (lprop)
833 crashk_res.end = crashk_res.start + *lprop - 1; 833 crashk_res.end = crashk_res.start + *lprop - 1;
834#endif 834#endif
@@ -893,12 +893,12 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
893 u64 base, size, lmb_size; 893 u64 base, size, lmb_size;
894 unsigned int is_kexec_kdump = 0, rngs; 894 unsigned int is_kexec_kdump = 0, rngs;
895 895
896 ls = (cell_t *)of_get_flat_dt_prop(node, "ibm,lmb-size", &l); 896 ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
897 if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t)) 897 if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t))
898 return 0; 898 return 0;
899 lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls); 899 lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls);
900 900
901 dm = (cell_t *)of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l); 901 dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
902 if (dm == NULL || l < sizeof(cell_t)) 902 if (dm == NULL || l < sizeof(cell_t))
903 return 0; 903 return 0;
904 904
@@ -907,7 +907,7 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
907 return 0; 907 return 0;
908 908
909 /* check if this is a kexec/kdump kernel. */ 909 /* check if this is a kexec/kdump kernel. */
910 usm = (cell_t *)of_get_flat_dt_prop(node, "linux,drconf-usable-memory", 910 usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory",
911 &l); 911 &l);
912 if (usm != NULL) 912 if (usm != NULL)
913 is_kexec_kdump = 1; 913 is_kexec_kdump = 1;
@@ -981,9 +981,9 @@ static int __init early_init_dt_scan_memory(unsigned long node,
981 } else if (strcmp(type, "memory") != 0) 981 } else if (strcmp(type, "memory") != 0)
982 return 0; 982 return 0;
983 983
984 reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l); 984 reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
985 if (reg == NULL) 985 if (reg == NULL)
986 reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l); 986 reg = of_get_flat_dt_prop(node, "reg", &l);
987 if (reg == NULL) 987 if (reg == NULL)
988 return 0; 988 return 0;
989 989
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 2445945d3761..7f1b33d5e30d 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1210,7 +1210,7 @@ static void __init prom_initialize_tce_table(void)
1210 /* Initialize the table to have a one-to-one mapping 1210 /* Initialize the table to have a one-to-one mapping
1211 * over the allocated size. 1211 * over the allocated size.
1212 */ 1212 */
1213 tce_entryp = (unsigned long *)base; 1213 tce_entryp = (u64 *)base;
1214 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) { 1214 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1215 tce_entry = (i << PAGE_SHIFT); 1215 tce_entry = (i << PAGE_SHIFT);
1216 tce_entry |= 0x3; 1216 tce_entry |= 0x3;
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 0c64f10087b9..4a2ee08af6a7 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -18,6 +18,8 @@
18#include <asm/machdep.h> 18#include <asm/machdep.h>
19#include <asm/smp.h> 19#include <asm/smp.h>
20 20
21#include "cacheinfo.h"
22
21#ifdef CONFIG_PPC64 23#ifdef CONFIG_PPC64
22#include <asm/paca.h> 24#include <asm/paca.h>
23#include <asm/lppaca.h> 25#include <asm/lppaca.h>
@@ -25,8 +27,6 @@
25 27
26static DEFINE_PER_CPU(struct cpu, cpu_devices); 28static DEFINE_PER_CPU(struct cpu, cpu_devices);
27 29
28static DEFINE_PER_CPU(struct kobject *, cache_toplevel);
29
30/* 30/*
31 * SMT snooze delay stuff, 64-bit only for now 31 * SMT snooze delay stuff, 64-bit only for now
32 */ 32 */
@@ -343,283 +343,6 @@ static struct sysdev_attribute pa6t_attrs[] = {
343#endif /* HAS_PPC_PMC_PA6T */ 343#endif /* HAS_PPC_PMC_PA6T */
344#endif /* HAS_PPC_PMC_CLASSIC */ 344#endif /* HAS_PPC_PMC_CLASSIC */
345 345
346struct cache_desc {
347 struct kobject kobj;
348 struct cache_desc *next;
349 const char *type; /* Instruction, Data, or Unified */
350 u32 size; /* total cache size in KB */
351 u32 line_size; /* in bytes */
352 u32 nr_sets; /* number of sets */
353 u32 level; /* e.g. 1, 2, 3... */
354 u32 associativity; /* e.g. 8-way... 0 is fully associative */
355};
356
357DEFINE_PER_CPU(struct cache_desc *, cache_desc);
358
359static struct cache_desc *kobj_to_cache_desc(struct kobject *k)
360{
361 return container_of(k, struct cache_desc, kobj);
362}
363
364static void cache_desc_release(struct kobject *k)
365{
366 struct cache_desc *desc = kobj_to_cache_desc(k);
367
368 pr_debug("%s: releasing %s\n", __func__, kobject_name(k));
369
370 if (desc->next)
371 kobject_put(&desc->next->kobj);
372
373 kfree(kobj_to_cache_desc(k));
374}
375
376static ssize_t cache_desc_show(struct kobject *k, struct attribute *attr, char *buf)
377{
378 struct kobj_attribute *kobj_attr;
379
380 kobj_attr = container_of(attr, struct kobj_attribute, attr);
381
382 return kobj_attr->show(k, kobj_attr, buf);
383}
384
385static struct sysfs_ops cache_desc_sysfs_ops = {
386 .show = cache_desc_show,
387};
388
389static struct kobj_type cache_desc_type = {
390 .release = cache_desc_release,
391 .sysfs_ops = &cache_desc_sysfs_ops,
392};
393
394static ssize_t cache_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
395{
396 struct cache_desc *cache = kobj_to_cache_desc(k);
397
398 return sprintf(buf, "%uK\n", cache->size);
399}
400
401static struct kobj_attribute cache_size_attr =
402 __ATTR(size, 0444, cache_size_show, NULL);
403
404static ssize_t cache_line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
405{
406 struct cache_desc *cache = kobj_to_cache_desc(k);
407
408 return sprintf(buf, "%u\n", cache->line_size);
409}
410
411static struct kobj_attribute cache_line_size_attr =
412 __ATTR(coherency_line_size, 0444, cache_line_size_show, NULL);
413
414static ssize_t cache_nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
415{
416 struct cache_desc *cache = kobj_to_cache_desc(k);
417
418 return sprintf(buf, "%u\n", cache->nr_sets);
419}
420
421static struct kobj_attribute cache_nr_sets_attr =
422 __ATTR(number_of_sets, 0444, cache_nr_sets_show, NULL);
423
424static ssize_t cache_type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
425{
426 struct cache_desc *cache = kobj_to_cache_desc(k);
427
428 return sprintf(buf, "%s\n", cache->type);
429}
430
431static struct kobj_attribute cache_type_attr =
432 __ATTR(type, 0444, cache_type_show, NULL);
433
434static ssize_t cache_level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
435{
436 struct cache_desc *cache = kobj_to_cache_desc(k);
437
438 return sprintf(buf, "%u\n", cache->level);
439}
440
441static struct kobj_attribute cache_level_attr =
442 __ATTR(level, 0444, cache_level_show, NULL);
443
444static ssize_t cache_assoc_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
445{
446 struct cache_desc *cache = kobj_to_cache_desc(k);
447
448 return sprintf(buf, "%u\n", cache->associativity);
449}
450
451static struct kobj_attribute cache_assoc_attr =
452 __ATTR(ways_of_associativity, 0444, cache_assoc_show, NULL);
453
454struct cache_desc_info {
455 const char *type;
456 const char *size_prop;
457 const char *line_size_prop;
458 const char *nr_sets_prop;
459};
460
461/* PowerPC Processor binding says the [di]-cache-* must be equal on
462 * unified caches, so just use d-cache properties. */
463static struct cache_desc_info ucache_info = {
464 .type = "Unified",
465 .size_prop = "d-cache-size",
466 .line_size_prop = "d-cache-line-size",
467 .nr_sets_prop = "d-cache-sets",
468};
469
470static struct cache_desc_info dcache_info = {
471 .type = "Data",
472 .size_prop = "d-cache-size",
473 .line_size_prop = "d-cache-line-size",
474 .nr_sets_prop = "d-cache-sets",
475};
476
477static struct cache_desc_info icache_info = {
478 .type = "Instruction",
479 .size_prop = "i-cache-size",
480 .line_size_prop = "i-cache-line-size",
481 .nr_sets_prop = "i-cache-sets",
482};
483
484static struct cache_desc * __cpuinit create_cache_desc(struct device_node *np, struct kobject *parent, int index, int level, struct cache_desc_info *info)
485{
486 const u32 *cache_line_size;
487 struct cache_desc *new;
488 const u32 *cache_size;
489 const u32 *nr_sets;
490 int rc;
491
492 new = kzalloc(sizeof(*new), GFP_KERNEL);
493 if (!new)
494 return NULL;
495
496 rc = kobject_init_and_add(&new->kobj, &cache_desc_type, parent,
497 "index%d", index);
498 if (rc)
499 goto err;
500
501 /* type */
502 new->type = info->type;
503 rc = sysfs_create_file(&new->kobj, &cache_type_attr.attr);
504 WARN_ON(rc);
505
506 /* level */
507 new->level = level;
508 rc = sysfs_create_file(&new->kobj, &cache_level_attr.attr);
509 WARN_ON(rc);
510
511 /* size */
512 cache_size = of_get_property(np, info->size_prop, NULL);
513 if (cache_size) {
514 new->size = *cache_size / 1024;
515 rc = sysfs_create_file(&new->kobj,
516 &cache_size_attr.attr);
517 WARN_ON(rc);
518 }
519
520 /* coherency_line_size */
521 cache_line_size = of_get_property(np, info->line_size_prop, NULL);
522 if (cache_line_size) {
523 new->line_size = *cache_line_size;
524 rc = sysfs_create_file(&new->kobj,
525 &cache_line_size_attr.attr);
526 WARN_ON(rc);
527 }
528
529 /* number_of_sets */
530 nr_sets = of_get_property(np, info->nr_sets_prop, NULL);
531 if (nr_sets) {
532 new->nr_sets = *nr_sets;
533 rc = sysfs_create_file(&new->kobj,
534 &cache_nr_sets_attr.attr);
535 WARN_ON(rc);
536 }
537
538 /* ways_of_associativity */
539 if (new->nr_sets == 1) {
540 /* fully associative */
541 new->associativity = 0;
542 goto create_assoc;
543 }
544
545 if (new->nr_sets && new->size && new->line_size) {
546 /* If we have values for all of these we can derive
547 * the associativity. */
548 new->associativity =
549 ((new->size * 1024) / new->nr_sets) / new->line_size;
550create_assoc:
551 rc = sysfs_create_file(&new->kobj,
552 &cache_assoc_attr.attr);
553 WARN_ON(rc);
554 }
555
556 return new;
557err:
558 kfree(new);
559 return NULL;
560}
561
562static bool cache_is_unified(struct device_node *np)
563{
564 return of_get_property(np, "cache-unified", NULL);
565}
566
567static struct cache_desc * __cpuinit create_cache_index_info(struct device_node *np, struct kobject *parent, int index, int level)
568{
569 struct device_node *next_cache;
570 struct cache_desc *new, **end;
571
572 pr_debug("%s(node = %s, index = %d)\n", __func__, np->full_name, index);
573
574 if (cache_is_unified(np)) {
575 new = create_cache_desc(np, parent, index, level,
576 &ucache_info);
577 } else {
578 new = create_cache_desc(np, parent, index, level,
579 &dcache_info);
580 if (new) {
581 index++;
582 new->next = create_cache_desc(np, parent, index, level,
583 &icache_info);
584 }
585 }
586 if (!new)
587 return NULL;
588
589 end = &new->next;
590 while (*end)
591 end = &(*end)->next;
592
593 next_cache = of_find_next_cache_node(np);
594 if (!next_cache)
595 goto out;
596
597 *end = create_cache_index_info(next_cache, parent, ++index, ++level);
598
599 of_node_put(next_cache);
600out:
601 return new;
602}
603
604static void __cpuinit create_cache_info(struct sys_device *sysdev)
605{
606 struct kobject *cache_toplevel;
607 struct device_node *np = NULL;
608 int cpu = sysdev->id;
609
610 cache_toplevel = kobject_create_and_add("cache", &sysdev->kobj);
611 if (!cache_toplevel)
612 return;
613 per_cpu(cache_toplevel, cpu) = cache_toplevel;
614 np = of_get_cpu_node(cpu, NULL);
615 if (np != NULL) {
616 per_cpu(cache_desc, cpu) =
617 create_cache_index_info(np, cache_toplevel, 0, 1);
618 of_node_put(np);
619 }
620 return;
621}
622
623static void __cpuinit register_cpu_online(unsigned int cpu) 346static void __cpuinit register_cpu_online(unsigned int cpu)
624{ 347{
625 struct cpu *c = &per_cpu(cpu_devices, cpu); 348 struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -684,25 +407,10 @@ static void __cpuinit register_cpu_online(unsigned int cpu)
684 sysdev_create_file(s, &attr_dscr); 407 sysdev_create_file(s, &attr_dscr);
685#endif /* CONFIG_PPC64 */ 408#endif /* CONFIG_PPC64 */
686 409
687 create_cache_info(s); 410 cacheinfo_cpu_online(cpu);
688} 411}
689 412
690#ifdef CONFIG_HOTPLUG_CPU 413#ifdef CONFIG_HOTPLUG_CPU
691static void remove_cache_info(struct sys_device *sysdev)
692{
693 struct kobject *cache_toplevel;
694 struct cache_desc *cache_desc;
695 int cpu = sysdev->id;
696
697 cache_desc = per_cpu(cache_desc, cpu);
698 if (cache_desc != NULL)
699 kobject_put(&cache_desc->kobj);
700
701 cache_toplevel = per_cpu(cache_toplevel, cpu);
702 if (cache_toplevel != NULL)
703 kobject_put(cache_toplevel);
704}
705
706static void unregister_cpu_online(unsigned int cpu) 414static void unregister_cpu_online(unsigned int cpu)
707{ 415{
708 struct cpu *c = &per_cpu(cpu_devices, cpu); 416 struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -769,7 +477,7 @@ static void unregister_cpu_online(unsigned int cpu)
769 sysdev_remove_file(s, &attr_dscr); 477 sysdev_remove_file(s, &attr_dscr);
770#endif /* CONFIG_PPC64 */ 478#endif /* CONFIG_PPC64 */
771 479
772 remove_cache_info(s); 480 cacheinfo_cpu_offline(cpu);
773} 481}
774#endif /* CONFIG_HOTPLUG_CPU */ 482#endif /* CONFIG_HOTPLUG_CPU */
775 483
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 4314b39b6faf..ad123bced404 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -30,11 +30,11 @@
30#if defined(CONFIG_40x) || defined(CONFIG_8xx) 30#if defined(CONFIG_40x) || defined(CONFIG_8xx)
31static inline void _tlbil_all(void) 31static inline void _tlbil_all(void)
32{ 32{
33 asm volatile ("sync; tlbia; isync" : : : "memory") 33 asm volatile ("sync; tlbia; isync" : : : "memory");
34} 34}
35static inline void _tlbil_pid(unsigned int pid) 35static inline void _tlbil_pid(unsigned int pid)
36{ 36{
37 asm volatile ("sync; tlbia; isync" : : : "memory") 37 asm volatile ("sync; tlbia; isync" : : : "memory");
38} 38}
39#else /* CONFIG_40x || CONFIG_8xx */ 39#else /* CONFIG_40x || CONFIG_8xx */
40extern void _tlbil_all(void); 40extern void _tlbil_all(void);
@@ -47,7 +47,7 @@ extern void _tlbil_pid(unsigned int pid);
47#ifdef CONFIG_8xx 47#ifdef CONFIG_8xx
48static inline void _tlbil_va(unsigned long address, unsigned int pid) 48static inline void _tlbil_va(unsigned long address, unsigned int pid)
49{ 49{
50 asm volatile ("tlbie %0; sync" : : "r" (address) : "memory") 50 asm volatile ("tlbie %0; sync" : : "r" (address) : "memory");
51} 51}
52#else /* CONFIG_8xx */ 52#else /* CONFIG_8xx */
53extern void _tlbil_va(unsigned long address, unsigned int pid); 53extern void _tlbil_va(unsigned long address, unsigned int pid);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index cf81049e1e51..7393bd76d698 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -822,42 +822,50 @@ static void __init dump_numa_memory_topology(void)
822 * required. nid is the preferred node and end is the physical address of 822 * required. nid is the preferred node and end is the physical address of
823 * the highest address in the node. 823 * the highest address in the node.
824 * 824 *
825 * Returns the physical address of the memory. 825 * Returns the virtual address of the memory.
826 */ 826 */
827static void __init *careful_allocation(int nid, unsigned long size, 827static void __init *careful_zallocation(int nid, unsigned long size,
828 unsigned long align, 828 unsigned long align,
829 unsigned long end_pfn) 829 unsigned long end_pfn)
830{ 830{
831 void *ret;
831 int new_nid; 832 int new_nid;
832 unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); 833 unsigned long ret_paddr;
834
835 ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
833 836
834 /* retry over all memory */ 837 /* retry over all memory */
835 if (!ret) 838 if (!ret_paddr)
836 ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM()); 839 ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
837 840
838 if (!ret) 841 if (!ret_paddr)
839 panic("numa.c: cannot allocate %lu bytes on node %d", 842 panic("numa.c: cannot allocate %lu bytes for node %d",
840 size, nid); 843 size, nid);
841 844
845 ret = __va(ret_paddr);
846
842 /* 847 /*
843 * If the memory came from a previously allocated node, we must 848 * We initialize the nodes in numeric order: 0, 1, 2...
844 * retry with the bootmem allocator. 849 * and hand over control from the LMB allocator to the
850 * bootmem allocator. If this function is called for
851 * node 5, then we know that all nodes <5 are using the
852 * bootmem allocator instead of the LMB allocator.
853 *
854 * So, check the nid from which this allocation came
855 * and double check to see if we need to use bootmem
856 * instead of the LMB. We don't free the LMB memory
857 * since it would be useless.
845 */ 858 */
846 new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT); 859 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
847 if (new_nid < nid) { 860 if (new_nid < nid) {
848 ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid), 861 ret = __alloc_bootmem_node(NODE_DATA(new_nid),
849 size, align, 0); 862 size, align, 0);
850 863
851 if (!ret) 864 dbg("alloc_bootmem %p %lx\n", ret, size);
852 panic("numa.c: cannot allocate %lu bytes on node %d",
853 size, new_nid);
854
855 ret = __pa(ret);
856
857 dbg("alloc_bootmem %lx %lx\n", ret, size);
858 } 865 }
859 866
860 return (void *)ret; 867 memset(ret, 0, size);
868 return ret;
861} 869}
862 870
863static struct notifier_block __cpuinitdata ppc64_numa_nb = { 871static struct notifier_block __cpuinitdata ppc64_numa_nb = {
@@ -952,7 +960,7 @@ void __init do_init_bootmem(void)
952 960
953 for_each_online_node(nid) { 961 for_each_online_node(nid) {
954 unsigned long start_pfn, end_pfn; 962 unsigned long start_pfn, end_pfn;
955 unsigned long bootmem_paddr; 963 void *bootmem_vaddr;
956 unsigned long bootmap_pages; 964 unsigned long bootmap_pages;
957 965
958 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 966 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
@@ -964,11 +972,9 @@ void __init do_init_bootmem(void)
964 * previous nodes' bootmem to be initialized and have 972 * previous nodes' bootmem to be initialized and have
965 * all reserved areas marked. 973 * all reserved areas marked.
966 */ 974 */
967 NODE_DATA(nid) = careful_allocation(nid, 975 NODE_DATA(nid) = careful_zallocation(nid,
968 sizeof(struct pglist_data), 976 sizeof(struct pglist_data),
969 SMP_CACHE_BYTES, end_pfn); 977 SMP_CACHE_BYTES, end_pfn);
970 NODE_DATA(nid) = __va(NODE_DATA(nid));
971 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
972 978
973 dbg("node %d\n", nid); 979 dbg("node %d\n", nid);
974 dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); 980 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
@@ -984,20 +990,20 @@ void __init do_init_bootmem(void)
984 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); 990 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
985 991
986 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 992 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
987 bootmem_paddr = (unsigned long)careful_allocation(nid, 993 bootmem_vaddr = careful_zallocation(nid,
988 bootmap_pages << PAGE_SHIFT, 994 bootmap_pages << PAGE_SHIFT,
989 PAGE_SIZE, end_pfn); 995 PAGE_SIZE, end_pfn);
990 memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT);
991 996
992 dbg("bootmap_paddr = %lx\n", bootmem_paddr); 997 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
993 998
994 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, 999 init_bootmem_node(NODE_DATA(nid),
1000 __pa(bootmem_vaddr) >> PAGE_SHIFT,
995 start_pfn, end_pfn); 1001 start_pfn, end_pfn);
996 1002
997 free_bootmem_with_active_regions(nid, end_pfn); 1003 free_bootmem_with_active_regions(nid, end_pfn);
998 /* 1004 /*
999 * Be very careful about moving this around. Future 1005 * Be very careful about moving this around. Future
1000 * calls to careful_allocation() depend on this getting 1006 * calls to careful_zallocation() depend on this getting
1001 * done correctly. 1007 * done correctly.
1002 */ 1008 */
1003 mark_reserved_regions_for_nid(nid); 1009 mark_reserved_regions_for_nid(nid);
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 38ff35f2142a..22972cd83cc9 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -266,7 +266,8 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
266 /* The PTE should never be already set nor present in the 266 /* The PTE should never be already set nor present in the
267 * hash table 267 * hash table
268 */ 268 */
269 BUG_ON(pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)); 269 BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) &&
270 flags);
270 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, 271 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
271 __pgprot(flags))); 272 __pgprot(flags)));
272 } 273 }
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 803a64c02b06..39ac22b13c73 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -189,8 +189,9 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
189 smp_call_function(do_flush_tlb_mm_ipi, NULL, 1); 189 smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
190 _tlbil_pid(0); 190 _tlbil_pid(0);
191 preempt_enable(); 191 preempt_enable();
192#endif 192#else
193 _tlbil_pid(0); 193 _tlbil_pid(0);
194#endif
194} 195}
195EXPORT_SYMBOL(flush_tlb_kernel_range); 196EXPORT_SYMBOL(flush_tlb_kernel_range);
196 197
diff --git a/arch/powerpc/oprofile/cell/pr_util.h b/arch/powerpc/oprofile/cell/pr_util.h
index 628009c01958..964b93974d89 100644
--- a/arch/powerpc/oprofile/cell/pr_util.h
+++ b/arch/powerpc/oprofile/cell/pr_util.h
@@ -30,6 +30,10 @@
30extern struct delayed_work spu_work; 30extern struct delayed_work spu_work;
31extern int spu_prof_running; 31extern int spu_prof_running;
32 32
33#define TRACE_ARRAY_SIZE 1024
34
35extern spinlock_t oprof_spu_smpl_arry_lck;
36
33struct spu_overlay_info { /* map of sections within an SPU overlay */ 37struct spu_overlay_info { /* map of sections within an SPU overlay */
34 unsigned int vma; /* SPU virtual memory address from elf */ 38 unsigned int vma; /* SPU virtual memory address from elf */
35 unsigned int size; /* size of section from elf */ 39 unsigned int size; /* size of section from elf */
@@ -79,7 +83,7 @@ struct spu_buffer {
79 * the vma-to-fileoffset map. 83 * the vma-to-fileoffset map.
80 */ 84 */
81struct vma_to_fileoffset_map *create_vma_map(const struct spu *spu, 85struct vma_to_fileoffset_map *create_vma_map(const struct spu *spu,
82 u64 objectid); 86 unsigned long objectid);
83unsigned int vma_map_lookup(struct vma_to_fileoffset_map *map, 87unsigned int vma_map_lookup(struct vma_to_fileoffset_map *map,
84 unsigned int vma, const struct spu *aSpu, 88 unsigned int vma, const struct spu *aSpu,
85 int *grd_val); 89 int *grd_val);
@@ -89,10 +93,11 @@ void vma_map_free(struct vma_to_fileoffset_map *map);
89 * Entry point for SPU profiling. 93 * Entry point for SPU profiling.
90 * cycles_reset is the SPU_CYCLES count value specified by the user. 94 * cycles_reset is the SPU_CYCLES count value specified by the user.
91 */ 95 */
92int start_spu_profiling(unsigned int cycles_reset); 96int start_spu_profiling_cycles(unsigned int cycles_reset);
93 97void start_spu_profiling_events(void);
94void stop_spu_profiling(void);
95 98
99void stop_spu_profiling_cycles(void);
100void stop_spu_profiling_events(void);
96 101
97/* add the necessary profiling hooks */ 102/* add the necessary profiling hooks */
98int spu_sync_start(void); 103int spu_sync_start(void);
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c
index 83faa958b9d4..9305ddaac512 100644
--- a/arch/powerpc/oprofile/cell/spu_profiler.c
+++ b/arch/powerpc/oprofile/cell/spu_profiler.c
@@ -18,11 +18,21 @@
18#include <asm/cell-pmu.h> 18#include <asm/cell-pmu.h>
19#include "pr_util.h" 19#include "pr_util.h"
20 20
21#define TRACE_ARRAY_SIZE 1024
22#define SCALE_SHIFT 14 21#define SCALE_SHIFT 14
23 22
24static u32 *samples; 23static u32 *samples;
25 24
25/* spu_prof_running is a flag used to indicate if spu profiling is enabled
26 * or not. It is set by the routines start_spu_profiling_cycles() and
27 * start_spu_profiling_events(). The flag is cleared by the routines
28 * stop_spu_profiling_cycles() and stop_spu_profiling_events(). These
29 * routines are called via global_start() and global_stop() which are called in
30 * op_powerpc_start() and op_powerpc_stop(). These routines are called once
31 * per system as a result of the user starting/stopping oprofile. Hence, only
32 * one CPU per user at a time will be changing the value of spu_prof_running.
33 * In general, OProfile does not protect against multiple users trying to run
34 * OProfile at a time.
35 */
26int spu_prof_running; 36int spu_prof_running;
27static unsigned int profiling_interval; 37static unsigned int profiling_interval;
28 38
@@ -31,8 +41,8 @@ static unsigned int profiling_interval;
31 41
32#define SPU_PC_MASK 0xFFFF 42#define SPU_PC_MASK 0xFFFF
33 43
34static DEFINE_SPINLOCK(sample_array_lock); 44DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck);
35unsigned long sample_array_lock_flags; 45unsigned long oprof_spu_smpl_arry_lck_flags;
36 46
37void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset) 47void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset)
38{ 48{
@@ -145,13 +155,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
145 * sample array must be loaded and then processed for a given 155 * sample array must be loaded and then processed for a given
146 * cpu. The sample array is not per cpu. 156 * cpu. The sample array is not per cpu.
147 */ 157 */
148 spin_lock_irqsave(&sample_array_lock, 158 spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
149 sample_array_lock_flags); 159 oprof_spu_smpl_arry_lck_flags);
150 num_samples = cell_spu_pc_collection(cpu); 160 num_samples = cell_spu_pc_collection(cpu);
151 161
152 if (num_samples == 0) { 162 if (num_samples == 0) {
153 spin_unlock_irqrestore(&sample_array_lock, 163 spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
154 sample_array_lock_flags); 164 oprof_spu_smpl_arry_lck_flags);
155 continue; 165 continue;
156 } 166 }
157 167
@@ -162,8 +172,8 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
162 num_samples); 172 num_samples);
163 } 173 }
164 174
165 spin_unlock_irqrestore(&sample_array_lock, 175 spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
166 sample_array_lock_flags); 176 oprof_spu_smpl_arry_lck_flags);
167 177
168 } 178 }
169 smp_wmb(); /* insure spu event buffer updates are written */ 179 smp_wmb(); /* insure spu event buffer updates are written */
@@ -182,13 +192,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
182 192
183static struct hrtimer timer; 193static struct hrtimer timer;
184/* 194/*
185 * Entry point for SPU profiling. 195 * Entry point for SPU cycle profiling.
186 * NOTE: SPU profiling is done system-wide, not per-CPU. 196 * NOTE: SPU profiling is done system-wide, not per-CPU.
187 * 197 *
188 * cycles_reset is the count value specified by the user when 198 * cycles_reset is the count value specified by the user when
189 * setting up OProfile to count SPU_CYCLES. 199 * setting up OProfile to count SPU_CYCLES.
190 */ 200 */
191int start_spu_profiling(unsigned int cycles_reset) 201int start_spu_profiling_cycles(unsigned int cycles_reset)
192{ 202{
193 ktime_t kt; 203 ktime_t kt;
194 204
@@ -212,10 +222,30 @@ int start_spu_profiling(unsigned int cycles_reset)
212 return 0; 222 return 0;
213} 223}
214 224
215void stop_spu_profiling(void) 225/*
226 * Entry point for SPU event profiling.
227 * NOTE: SPU profiling is done system-wide, not per-CPU.
228 *
229 * cycles_reset is the count value specified by the user when
230 * setting up OProfile to count SPU_CYCLES.
231 */
232void start_spu_profiling_events(void)
233{
234 spu_prof_running = 1;
235 schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
236
237 return;
238}
239
240void stop_spu_profiling_cycles(void)
216{ 241{
217 spu_prof_running = 0; 242 spu_prof_running = 0;
218 hrtimer_cancel(&timer); 243 hrtimer_cancel(&timer);
219 kfree(samples); 244 kfree(samples);
220 pr_debug("SPU_PROF: stop_spu_profiling issued\n"); 245 pr_debug("SPU_PROF: stop_spu_profiling_cycles issued\n");
246}
247
248void stop_spu_profiling_events(void)
249{
250 spu_prof_running = 0;
221} 251}
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index 17807acb05d9..21f16edf6c8d 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -132,6 +132,28 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
132 oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0); 132 oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0);
133 oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1); 133 oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1);
134 oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra); 134 oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra);
135#ifdef CONFIG_OPROFILE_CELL
136 /* create a file the user tool can check to see what level of profiling
137 * support exits with this kernel. Initialize bit mask to indicate
138 * what support the kernel has:
139 * bit 0 - Supports SPU event profiling in addition to PPU
140 * event and cycles; and SPU cycle profiling
141 * bits 1-31 - Currently unused.
142 *
143 * If the file does not exist, then the kernel only supports SPU
144 * cycle profiling, PPU event and cycle profiling.
145 */
146 oprofilefs_create_ulong(sb, root, "cell_support", &sys.cell_support);
147 sys.cell_support = 0x1; /* Note, the user OProfile tool must check
148 * that this bit is set before attempting to
149 * user SPU event profiling. Older kernels
150 * will not have this file, hence the user
151 * tool is not allowed to do SPU event
152 * profiling on older kernels. Older kernels
153 * will accept SPU events but collected data
154 * is garbage.
155 */
156#endif
135#endif 157#endif
136 158
137 for (i = 0; i < model->num_counters; ++i) { 159 for (i = 0; i < model->num_counters; ++i) {
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index 25a4ec2514a3..ae06c6236d9c 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -40,14 +40,15 @@
40#include "../platforms/cell/interrupt.h" 40#include "../platforms/cell/interrupt.h"
41#include "cell/pr_util.h" 41#include "cell/pr_util.h"
42 42
43static void cell_global_stop_spu(void); 43#define PPU_PROFILING 0
44#define SPU_PROFILING_CYCLES 1
45#define SPU_PROFILING_EVENTS 2
44 46
45/* 47#define SPU_EVENT_NUM_START 4100
46 * spu_cycle_reset is the number of cycles between samples. 48#define SPU_EVENT_NUM_STOP 4399
47 * This variable is used for SPU profiling and should ONLY be set 49#define SPU_PROFILE_EVENT_ADDR 4363 /* spu, address trace, decimal */
48 * at the beginning of cell_reg_setup; otherwise, it's read-only. 50#define SPU_PROFILE_EVENT_ADDR_MASK_A 0x146 /* sub unit set to zero */
49 */ 51#define SPU_PROFILE_EVENT_ADDR_MASK_B 0x186 /* sub unit set to zero */
50static unsigned int spu_cycle_reset;
51 52
52#define NUM_SPUS_PER_NODE 8 53#define NUM_SPUS_PER_NODE 8
53#define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */ 54#define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */
@@ -66,6 +67,21 @@ static unsigned int spu_cycle_reset;
66 67
67#define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */ 68#define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */
68 69
70/* Minumum HW interval timer setting to send value to trace buffer is 10 cycle.
71 * To configure counter to send value every N cycles set counter to
72 * 2^32 - 1 - N.
73 */
74#define NUM_INTERVAL_CYC 0xFFFFFFFF - 10
75
76/*
77 * spu_cycle_reset is the number of cycles between samples.
78 * This variable is used for SPU profiling and should ONLY be set
79 * at the beginning of cell_reg_setup; otherwise, it's read-only.
80 */
81static unsigned int spu_cycle_reset;
82static unsigned int profiling_mode;
83static int spu_evnt_phys_spu_indx;
84
69struct pmc_cntrl_data { 85struct pmc_cntrl_data {
70 unsigned long vcntr; 86 unsigned long vcntr;
71 unsigned long evnts; 87 unsigned long evnts;
@@ -105,6 +121,8 @@ struct pm_cntrl {
105 u16 trace_mode; 121 u16 trace_mode;
106 u16 freeze; 122 u16 freeze;
107 u16 count_mode; 123 u16 count_mode;
124 u16 spu_addr_trace;
125 u8 trace_buf_ovflw;
108}; 126};
109 127
110static struct { 128static struct {
@@ -122,7 +140,7 @@ static struct {
122#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2) 140#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
123 141
124static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values); 142static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
125 143static unsigned long spu_pm_cnt[MAX_NUMNODES * NUM_SPUS_PER_NODE];
126static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS]; 144static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
127 145
128/* 146/*
@@ -152,6 +170,7 @@ static u32 hdw_thread;
152 170
153static u32 virt_cntr_inter_mask; 171static u32 virt_cntr_inter_mask;
154static struct timer_list timer_virt_cntr; 172static struct timer_list timer_virt_cntr;
173static struct timer_list timer_spu_event_swap;
155 174
156/* 175/*
157 * pm_signal needs to be global since it is initialized in 176 * pm_signal needs to be global since it is initialized in
@@ -165,7 +184,7 @@ static int spu_rtas_token; /* token for SPU cycle profiling */
165static u32 reset_value[NR_PHYS_CTRS]; 184static u32 reset_value[NR_PHYS_CTRS];
166static int num_counters; 185static int num_counters;
167static int oprofile_running; 186static int oprofile_running;
168static DEFINE_SPINLOCK(virt_cntr_lock); 187static DEFINE_SPINLOCK(cntr_lock);
169 188
170static u32 ctr_enabled; 189static u32 ctr_enabled;
171 190
@@ -336,13 +355,13 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask)
336 for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) { 355 for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
337 if (bus_word & (1 << i)) { 356 if (bus_word & (1 << i)) {
338 pm_regs.debug_bus_control |= 357 pm_regs.debug_bus_control |=
339 (bus_type << (30 - (2 * i))); 358 (bus_type << (30 - (2 * i)));
340 359
341 for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) { 360 for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
342 if (input_bus[j] == 0xff) { 361 if (input_bus[j] == 0xff) {
343 input_bus[j] = i; 362 input_bus[j] = i;
344 pm_regs.group_control |= 363 pm_regs.group_control |=
345 (i << (30 - (2 * j))); 364 (i << (30 - (2 * j)));
346 365
347 break; 366 break;
348 } 367 }
@@ -367,12 +386,16 @@ static void write_pm_cntrl(int cpu)
367 if (pm_regs.pm_cntrl.stop_at_max == 1) 386 if (pm_regs.pm_cntrl.stop_at_max == 1)
368 val |= CBE_PM_STOP_AT_MAX; 387 val |= CBE_PM_STOP_AT_MAX;
369 388
370 if (pm_regs.pm_cntrl.trace_mode == 1) 389 if (pm_regs.pm_cntrl.trace_mode != 0)
371 val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode); 390 val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
372 391
392 if (pm_regs.pm_cntrl.trace_buf_ovflw == 1)
393 val |= CBE_PM_TRACE_BUF_OVFLW(pm_regs.pm_cntrl.trace_buf_ovflw);
373 if (pm_regs.pm_cntrl.freeze == 1) 394 if (pm_regs.pm_cntrl.freeze == 1)
374 val |= CBE_PM_FREEZE_ALL_CTRS; 395 val |= CBE_PM_FREEZE_ALL_CTRS;
375 396
397 val |= CBE_PM_SPU_ADDR_TRACE_SET(pm_regs.pm_cntrl.spu_addr_trace);
398
376 /* 399 /*
377 * Routine set_count_mode must be called previously to set 400 * Routine set_count_mode must be called previously to set
378 * the count mode based on the user selection of user and kernel. 401 * the count mode based on the user selection of user and kernel.
@@ -441,7 +464,7 @@ static void cell_virtual_cntr(unsigned long data)
441 * not both playing with the counters on the same node. 464 * not both playing with the counters on the same node.
442 */ 465 */
443 466
444 spin_lock_irqsave(&virt_cntr_lock, flags); 467 spin_lock_irqsave(&cntr_lock, flags);
445 468
446 prev_hdw_thread = hdw_thread; 469 prev_hdw_thread = hdw_thread;
447 470
@@ -480,7 +503,7 @@ static void cell_virtual_cntr(unsigned long data)
480 cbe_disable_pm_interrupts(cpu); 503 cbe_disable_pm_interrupts(cpu);
481 for (i = 0; i < num_counters; i++) { 504 for (i = 0; i < num_counters; i++) {
482 per_cpu(pmc_values, cpu + prev_hdw_thread)[i] 505 per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
483 = cbe_read_ctr(cpu, i); 506 = cbe_read_ctr(cpu, i);
484 507
485 if (per_cpu(pmc_values, cpu + next_hdw_thread)[i] 508 if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
486 == 0xFFFFFFFF) 509 == 0xFFFFFFFF)
@@ -527,7 +550,7 @@ static void cell_virtual_cntr(unsigned long data)
527 cbe_enable_pm(cpu); 550 cbe_enable_pm(cpu);
528 } 551 }
529 552
530 spin_unlock_irqrestore(&virt_cntr_lock, flags); 553 spin_unlock_irqrestore(&cntr_lock, flags);
531 554
532 mod_timer(&timer_virt_cntr, jiffies + HZ / 10); 555 mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
533} 556}
@@ -541,38 +564,146 @@ static void start_virt_cntrs(void)
541 add_timer(&timer_virt_cntr); 564 add_timer(&timer_virt_cntr);
542} 565}
543 566
544/* This function is called once for all cpus combined */ 567static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr,
545static int cell_reg_setup(struct op_counter_config *ctr,
546 struct op_system_config *sys, int num_ctrs) 568 struct op_system_config *sys, int num_ctrs)
547{ 569{
548 int i, j, cpu; 570 spu_cycle_reset = ctr[0].count;
549 spu_cycle_reset = 0;
550 571
551 if (ctr[0].event == SPU_CYCLES_EVENT_NUM) { 572 /*
552 spu_cycle_reset = ctr[0].count; 573 * Each node will need to make the rtas call to start
574 * and stop SPU profiling. Get the token once and store it.
575 */
576 spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
577
578 if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
579 printk(KERN_ERR
580 "%s: rtas token ibm,cbe-spu-perftools unknown\n",
581 __func__);
582 return -EIO;
583 }
584 return 0;
585}
586
587/* Unfortunately, the hardware will only support event profiling
588 * on one SPU per node at a time. Therefore, we must time slice
589 * the profiling across all SPUs in the node. Note, we do this
590 * in parallel for each node. The following routine is called
591 * periodically based on kernel timer to switch which SPU is
592 * being monitored in a round robbin fashion.
593 */
594static void spu_evnt_swap(unsigned long data)
595{
596 int node;
597 int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx;
598 unsigned long flags;
599 int cpu;
600 int ret;
601 u32 interrupt_mask;
602
603
604 /* enable interrupts on cntr 0 */
605 interrupt_mask = CBE_PM_CTR_OVERFLOW_INTR(0);
606
607 hdw_thread = 0;
608
609 /* Make sure spu event interrupt handler and spu event swap
610 * don't access the counters simultaneously.
611 */
612 spin_lock_irqsave(&cntr_lock, flags);
613
614 cur_spu_evnt_phys_spu_indx = spu_evnt_phys_spu_indx;
615
616 if (++(spu_evnt_phys_spu_indx) == NUM_SPUS_PER_NODE)
617 spu_evnt_phys_spu_indx = 0;
618
619 pm_signal[0].sub_unit = spu_evnt_phys_spu_indx;
620 pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
621 pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
622
623 /* switch the SPU being profiled on each node */
624 for_each_online_cpu(cpu) {
625 if (cbe_get_hw_thread_id(cpu))
626 continue;
627
628 node = cbe_cpu_to_node(cpu);
629 cur_phys_spu = (node * NUM_SPUS_PER_NODE)
630 + cur_spu_evnt_phys_spu_indx;
631 nxt_phys_spu = (node * NUM_SPUS_PER_NODE)
632 + spu_evnt_phys_spu_indx;
553 633
554 /* 634 /*
555 * Each node will need to make the rtas call to start 635 * stop counters, save counter values, restore counts
556 * and stop SPU profiling. Get the token once and store it. 636 * for previous physical SPU
557 */ 637 */
558 spu_rtas_token = rtas_token("ibm,cbe-spu-perftools"); 638 cbe_disable_pm(cpu);
639 cbe_disable_pm_interrupts(cpu);
559 640
560 if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) { 641 spu_pm_cnt[cur_phys_spu]
561 printk(KERN_ERR 642 = cbe_read_ctr(cpu, 0);
562 "%s: rtas token ibm,cbe-spu-perftools unknown\n", 643
563 __func__); 644 /* restore previous count for the next spu to sample */
564 return -EIO; 645 /* NOTE, hardware issue, counter will not start if the
565 } 646 * counter value is at max (0xFFFFFFFF).
647 */
648 if (spu_pm_cnt[nxt_phys_spu] >= 0xFFFFFFFF)
649 cbe_write_ctr(cpu, 0, 0xFFFFFFF0);
650 else
651 cbe_write_ctr(cpu, 0, spu_pm_cnt[nxt_phys_spu]);
652
653 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
654
655 /* setup the debug bus measure the one event and
656 * the two events to route the next SPU's PC on
657 * the debug bus
658 */
659 ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 3);
660 if (ret)
661 printk(KERN_ERR "%s: pm_rtas_activate_signals failed, "
662 "SPU event swap\n", __func__);
663
664 /* clear the trace buffer, don't want to take PC for
665 * previous SPU*/
666 cbe_write_pm(cpu, trace_address, 0);
667
668 enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
669
670 /* Enable interrupts on the CPU thread that is starting */
671 cbe_enable_pm_interrupts(cpu, hdw_thread,
672 interrupt_mask);
673 cbe_enable_pm(cpu);
566 } 674 }
567 675
568 pm_rtas_token = rtas_token("ibm,cbe-perftools"); 676 spin_unlock_irqrestore(&cntr_lock, flags);
569 677
678 /* swap approximately every 0.1 seconds */
679 mod_timer(&timer_spu_event_swap, jiffies + HZ / 25);
680}
681
682static void start_spu_event_swap(void)
683{
684 init_timer(&timer_spu_event_swap);
685 timer_spu_event_swap.function = spu_evnt_swap;
686 timer_spu_event_swap.data = 0UL;
687 timer_spu_event_swap.expires = jiffies + HZ / 25;
688 add_timer(&timer_spu_event_swap);
689}
690
691static int cell_reg_setup_spu_events(struct op_counter_config *ctr,
692 struct op_system_config *sys, int num_ctrs)
693{
694 int i;
695
696 /* routine is called once for all nodes */
697
698 spu_evnt_phys_spu_indx = 0;
570 /* 699 /*
571 * For all events excetp PPU CYCLEs, each node will need to make 700 * For all events except PPU CYCLEs, each node will need to make
572 * the rtas cbe-perftools call to setup and reset the debug bus. 701 * the rtas cbe-perftools call to setup and reset the debug bus.
573 * Make the token lookup call once and store it in the global 702 * Make the token lookup call once and store it in the global
574 * variable pm_rtas_token. 703 * variable pm_rtas_token.
575 */ 704 */
705 pm_rtas_token = rtas_token("ibm,cbe-perftools");
706
576 if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) { 707 if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
577 printk(KERN_ERR 708 printk(KERN_ERR
578 "%s: rtas token ibm,cbe-perftools unknown\n", 709 "%s: rtas token ibm,cbe-perftools unknown\n",
@@ -580,6 +711,58 @@ static int cell_reg_setup(struct op_counter_config *ctr,
580 return -EIO; 711 return -EIO;
581 } 712 }
582 713
714 /* setup the pm_control register settings,
715 * settings will be written per node by the
716 * cell_cpu_setup() function.
717 */
718 pm_regs.pm_cntrl.trace_buf_ovflw = 1;
719
720 /* Use the occurrence trace mode to have SPU PC saved
721 * to the trace buffer. Occurrence data in trace buffer
722 * is not used. Bit 2 must be set to store SPU addresses.
723 */
724 pm_regs.pm_cntrl.trace_mode = 2;
725
726 pm_regs.pm_cntrl.spu_addr_trace = 0x1; /* using debug bus
727 event 2 & 3 */
728
729 /* setup the debug bus event array with the SPU PC routing events.
730 * Note, pm_signal[0] will be filled in by set_pm_event() call below.
731 */
732 pm_signal[1].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
733 pm_signal[1].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_A);
734 pm_signal[1].bit = SPU_PROFILE_EVENT_ADDR % 100;
735 pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
736
737 pm_signal[2].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
738 pm_signal[2].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_B);
739 pm_signal[2].bit = SPU_PROFILE_EVENT_ADDR % 100;
740 pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
741
742 /* Set the user selected spu event to profile on,
743 * note, only one SPU profiling event is supported
744 */
745 num_counters = 1; /* Only support one SPU event at a time */
746 set_pm_event(0, ctr[0].event, ctr[0].unit_mask);
747
748 reset_value[0] = 0xFFFFFFFF - ctr[0].count;
749
750 /* global, used by cell_cpu_setup */
751 ctr_enabled |= 1;
752
753 /* Initialize the count for each SPU to the reset value */
754 for (i=0; i < MAX_NUMNODES * NUM_SPUS_PER_NODE; i++)
755 spu_pm_cnt[i] = reset_value[0];
756
757 return 0;
758}
759
760static int cell_reg_setup_ppu(struct op_counter_config *ctr,
761 struct op_system_config *sys, int num_ctrs)
762{
763 /* routine is called once for all nodes */
764 int i, j, cpu;
765
583 num_counters = num_ctrs; 766 num_counters = num_ctrs;
584 767
585 if (unlikely(num_ctrs > NR_PHYS_CTRS)) { 768 if (unlikely(num_ctrs > NR_PHYS_CTRS)) {
@@ -589,14 +772,6 @@ static int cell_reg_setup(struct op_counter_config *ctr,
589 __func__); 772 __func__);
590 return -EIO; 773 return -EIO;
591 } 774 }
592 pm_regs.group_control = 0;
593 pm_regs.debug_bus_control = 0;
594
595 /* setup the pm_control register */
596 memset(&pm_regs.pm_cntrl, 0, sizeof(struct pm_cntrl));
597 pm_regs.pm_cntrl.stop_at_max = 1;
598 pm_regs.pm_cntrl.trace_mode = 0;
599 pm_regs.pm_cntrl.freeze = 1;
600 775
601 set_count_mode(sys->enable_kernel, sys->enable_user); 776 set_count_mode(sys->enable_kernel, sys->enable_user);
602 777
@@ -665,6 +840,63 @@ static int cell_reg_setup(struct op_counter_config *ctr,
665} 840}
666 841
667 842
843/* This function is called once for all cpus combined */
844static int cell_reg_setup(struct op_counter_config *ctr,
845 struct op_system_config *sys, int num_ctrs)
846{
847 int ret=0;
848 spu_cycle_reset = 0;
849
850 /* initialize the spu_arr_trace value, will be reset if
851 * doing spu event profiling.
852 */
853 pm_regs.group_control = 0;
854 pm_regs.debug_bus_control = 0;
855 pm_regs.pm_cntrl.stop_at_max = 1;
856 pm_regs.pm_cntrl.trace_mode = 0;
857 pm_regs.pm_cntrl.freeze = 1;
858 pm_regs.pm_cntrl.trace_buf_ovflw = 0;
859 pm_regs.pm_cntrl.spu_addr_trace = 0;
860
861 /*
862 * For all events except PPU CYCLEs, each node will need to make
863 * the rtas cbe-perftools call to setup and reset the debug bus.
864 * Make the token lookup call once and store it in the global
865 * variable pm_rtas_token.
866 */
867 pm_rtas_token = rtas_token("ibm,cbe-perftools");
868
869 if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
870 printk(KERN_ERR
871 "%s: rtas token ibm,cbe-perftools unknown\n",
872 __func__);
873 return -EIO;
874 }
875
876 if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
877 profiling_mode = SPU_PROFILING_CYCLES;
878 ret = cell_reg_setup_spu_cycles(ctr, sys, num_ctrs);
879 } else if ((ctr[0].event >= SPU_EVENT_NUM_START) &&
880 (ctr[0].event <= SPU_EVENT_NUM_STOP)) {
881 profiling_mode = SPU_PROFILING_EVENTS;
882 spu_cycle_reset = ctr[0].count;
883
884 /* for SPU event profiling, need to setup the
885 * pm_signal array with the events to route the
886 * SPU PC before making the FW call. Note, only
887 * one SPU event for profiling can be specified
888 * at a time.
889 */
890 cell_reg_setup_spu_events(ctr, sys, num_ctrs);
891 } else {
892 profiling_mode = PPU_PROFILING;
893 ret = cell_reg_setup_ppu(ctr, sys, num_ctrs);
894 }
895
896 return ret;
897}
898
899
668 900
669/* This function is called once for each cpu */ 901/* This function is called once for each cpu */
670static int cell_cpu_setup(struct op_counter_config *cntr) 902static int cell_cpu_setup(struct op_counter_config *cntr)
@@ -672,8 +904,13 @@ static int cell_cpu_setup(struct op_counter_config *cntr)
672 u32 cpu = smp_processor_id(); 904 u32 cpu = smp_processor_id();
673 u32 num_enabled = 0; 905 u32 num_enabled = 0;
674 int i; 906 int i;
907 int ret;
675 908
676 if (spu_cycle_reset) 909 /* Cycle based SPU profiling does not use the performance
910 * counters. The trace array is configured to collect
911 * the data.
912 */
913 if (profiling_mode == SPU_PROFILING_CYCLES)
677 return 0; 914 return 0;
678 915
679 /* There is one performance monitor per processor chip (i.e. node), 916 /* There is one performance monitor per processor chip (i.e. node),
@@ -686,7 +923,6 @@ static int cell_cpu_setup(struct op_counter_config *cntr)
686 cbe_disable_pm(cpu); 923 cbe_disable_pm(cpu);
687 cbe_disable_pm_interrupts(cpu); 924 cbe_disable_pm_interrupts(cpu);
688 925
689 cbe_write_pm(cpu, pm_interval, 0);
690 cbe_write_pm(cpu, pm_start_stop, 0); 926 cbe_write_pm(cpu, pm_start_stop, 0);
691 cbe_write_pm(cpu, group_control, pm_regs.group_control); 927 cbe_write_pm(cpu, group_control, pm_regs.group_control);
692 cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control); 928 cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
@@ -703,7 +939,20 @@ static int cell_cpu_setup(struct op_counter_config *cntr)
703 * The pm_rtas_activate_signals will return -EIO if the FW 939 * The pm_rtas_activate_signals will return -EIO if the FW
704 * call failed. 940 * call failed.
705 */ 941 */
706 return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled); 942 if (profiling_mode == SPU_PROFILING_EVENTS) {
943 /* For SPU event profiling also need to setup the
944 * pm interval timer
945 */
946 ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
947 num_enabled+2);
948 /* store PC from debug bus to Trace buffer as often
949 * as possible (every 10 cycles)
950 */
951 cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
952 return ret;
953 } else
954 return pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
955 num_enabled);
707} 956}
708 957
709#define ENTRIES 303 958#define ENTRIES 303
@@ -885,7 +1134,122 @@ static struct notifier_block cpu_freq_notifier_block = {
885}; 1134};
886#endif 1135#endif
887 1136
888static int cell_global_start_spu(struct op_counter_config *ctr) 1137/*
1138 * Note the generic OProfile stop calls do not support returning
1139 * an error on stop. Hence, will not return an error if the FW
1140 * calls fail on stop. Failure to reset the debug bus is not an issue.
1141 * Failure to disable the SPU profiling is not an issue. The FW calls
1142 * to enable the performance counters and debug bus will work even if
1143 * the hardware was not cleanly reset.
1144 */
1145static void cell_global_stop_spu_cycles(void)
1146{
1147 int subfunc, rtn_value;
1148 unsigned int lfsr_value;
1149 int cpu;
1150
1151 oprofile_running = 0;
1152 smp_wmb();
1153
1154#ifdef CONFIG_CPU_FREQ
1155 cpufreq_unregister_notifier(&cpu_freq_notifier_block,
1156 CPUFREQ_TRANSITION_NOTIFIER);
1157#endif
1158
1159 for_each_online_cpu(cpu) {
1160 if (cbe_get_hw_thread_id(cpu))
1161 continue;
1162
1163 subfunc = 3; /*
1164 * 2 - activate SPU tracing,
1165 * 3 - deactivate
1166 */
1167 lfsr_value = 0x8f100000;
1168
1169 rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
1170 subfunc, cbe_cpu_to_node(cpu),
1171 lfsr_value);
1172
1173 if (unlikely(rtn_value != 0)) {
1174 printk(KERN_ERR
1175 "%s: rtas call ibm,cbe-spu-perftools " \
1176 "failed, return = %d\n",
1177 __func__, rtn_value);
1178 }
1179
1180 /* Deactivate the signals */
1181 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1182 }
1183
1184 stop_spu_profiling_cycles();
1185}
1186
1187static void cell_global_stop_spu_events(void)
1188{
1189 int cpu;
1190 oprofile_running = 0;
1191
1192 stop_spu_profiling_events();
1193 smp_wmb();
1194
1195 for_each_online_cpu(cpu) {
1196 if (cbe_get_hw_thread_id(cpu))
1197 continue;
1198
1199 cbe_sync_irq(cbe_cpu_to_node(cpu));
1200 /* Stop the counters */
1201 cbe_disable_pm(cpu);
1202 cbe_write_pm07_control(cpu, 0, 0);
1203
1204 /* Deactivate the signals */
1205 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1206
1207 /* Deactivate interrupts */
1208 cbe_disable_pm_interrupts(cpu);
1209 }
1210 del_timer_sync(&timer_spu_event_swap);
1211}
1212
1213static void cell_global_stop_ppu(void)
1214{
1215 int cpu;
1216
1217 /*
1218 * This routine will be called once for the system.
1219 * There is one performance monitor per node, so we
1220 * only need to perform this function once per node.
1221 */
1222 del_timer_sync(&timer_virt_cntr);
1223 oprofile_running = 0;
1224 smp_wmb();
1225
1226 for_each_online_cpu(cpu) {
1227 if (cbe_get_hw_thread_id(cpu))
1228 continue;
1229
1230 cbe_sync_irq(cbe_cpu_to_node(cpu));
1231 /* Stop the counters */
1232 cbe_disable_pm(cpu);
1233
1234 /* Deactivate the signals */
1235 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1236
1237 /* Deactivate interrupts */
1238 cbe_disable_pm_interrupts(cpu);
1239 }
1240}
1241
1242static void cell_global_stop(void)
1243{
1244 if (profiling_mode == PPU_PROFILING)
1245 cell_global_stop_ppu();
1246 else if (profiling_mode == SPU_PROFILING_EVENTS)
1247 cell_global_stop_spu_events();
1248 else
1249 cell_global_stop_spu_cycles();
1250}
1251
1252static int cell_global_start_spu_cycles(struct op_counter_config *ctr)
889{ 1253{
890 int subfunc; 1254 int subfunc;
891 unsigned int lfsr_value; 1255 unsigned int lfsr_value;
@@ -951,18 +1315,18 @@ static int cell_global_start_spu(struct op_counter_config *ctr)
951 1315
952 /* start profiling */ 1316 /* start profiling */
953 ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc, 1317 ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
954 cbe_cpu_to_node(cpu), lfsr_value); 1318 cbe_cpu_to_node(cpu), lfsr_value);
955 1319
956 if (unlikely(ret != 0)) { 1320 if (unlikely(ret != 0)) {
957 printk(KERN_ERR 1321 printk(KERN_ERR
958 "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n", 1322 "%s: rtas call ibm,cbe-spu-perftools failed, " \
959 __func__, ret); 1323 "return = %d\n", __func__, ret);
960 rtas_error = -EIO; 1324 rtas_error = -EIO;
961 goto out; 1325 goto out;
962 } 1326 }
963 } 1327 }
964 1328
965 rtas_error = start_spu_profiling(spu_cycle_reset); 1329 rtas_error = start_spu_profiling_cycles(spu_cycle_reset);
966 if (rtas_error) 1330 if (rtas_error)
967 goto out_stop; 1331 goto out_stop;
968 1332
@@ -970,11 +1334,74 @@ static int cell_global_start_spu(struct op_counter_config *ctr)
970 return 0; 1334 return 0;
971 1335
972out_stop: 1336out_stop:
973 cell_global_stop_spu(); /* clean up the PMU/debug bus */ 1337 cell_global_stop_spu_cycles(); /* clean up the PMU/debug bus */
974out: 1338out:
975 return rtas_error; 1339 return rtas_error;
976} 1340}
977 1341
1342static int cell_global_start_spu_events(struct op_counter_config *ctr)
1343{
1344 int cpu;
1345 u32 interrupt_mask = 0;
1346 int rtn = 0;
1347
1348 hdw_thread = 0;
1349
1350 /* spu event profiling, uses the performance counters to generate
1351 * an interrupt. The hardware is setup to store the SPU program
1352 * counter into the trace array. The occurrence mode is used to
1353 * enable storing data to the trace buffer. The bits are set
1354 * to send/store the SPU address in the trace buffer. The debug
1355 * bus must be setup to route the SPU program counter onto the
1356 * debug bus. The occurrence data in the trace buffer is not used.
1357 */
1358
1359 /* This routine gets called once for the system.
1360 * There is one performance monitor per node, so we
1361 * only need to perform this function once per node.
1362 */
1363
1364 for_each_online_cpu(cpu) {
1365 if (cbe_get_hw_thread_id(cpu))
1366 continue;
1367
1368 /*
1369 * Setup SPU event-based profiling.
1370 * Set perf_mon_control bit 0 to a zero before
1371 * enabling spu collection hardware.
1372 *
1373 * Only support one SPU event on one SPU per node.
1374 */
1375 if (ctr_enabled & 1) {
1376 cbe_write_ctr(cpu, 0, reset_value[0]);
1377 enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
1378 interrupt_mask |=
1379 CBE_PM_CTR_OVERFLOW_INTR(0);
1380 } else {
1381 /* Disable counter */
1382 cbe_write_pm07_control(cpu, 0, 0);
1383 }
1384
1385 cbe_get_and_clear_pm_interrupts(cpu);
1386 cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
1387 cbe_enable_pm(cpu);
1388
1389 /* clear the trace buffer */
1390 cbe_write_pm(cpu, trace_address, 0);
1391 }
1392
1393 /* Start the timer to time slice collecting the event profile
1394 * on each of the SPUs. Note, can collect profile on one SPU
1395 * per node at a time.
1396 */
1397 start_spu_event_swap();
1398 start_spu_profiling_events();
1399 oprofile_running = 1;
1400 smp_wmb();
1401
1402 return rtn;
1403}
1404
978static int cell_global_start_ppu(struct op_counter_config *ctr) 1405static int cell_global_start_ppu(struct op_counter_config *ctr)
979{ 1406{
980 u32 cpu, i; 1407 u32 cpu, i;
@@ -994,8 +1421,7 @@ static int cell_global_start_ppu(struct op_counter_config *ctr)
994 if (ctr_enabled & (1 << i)) { 1421 if (ctr_enabled & (1 << i)) {
995 cbe_write_ctr(cpu, i, reset_value[i]); 1422 cbe_write_ctr(cpu, i, reset_value[i]);
996 enable_ctr(cpu, i, pm_regs.pm07_cntrl); 1423 enable_ctr(cpu, i, pm_regs.pm07_cntrl);
997 interrupt_mask |= 1424 interrupt_mask |= CBE_PM_CTR_OVERFLOW_INTR(i);
998 CBE_PM_CTR_OVERFLOW_INTR(i);
999 } else { 1425 } else {
1000 /* Disable counter */ 1426 /* Disable counter */
1001 cbe_write_pm07_control(cpu, i, 0); 1427 cbe_write_pm07_control(cpu, i, 0);
@@ -1024,99 +1450,162 @@ static int cell_global_start_ppu(struct op_counter_config *ctr)
1024 1450
1025static int cell_global_start(struct op_counter_config *ctr) 1451static int cell_global_start(struct op_counter_config *ctr)
1026{ 1452{
1027 if (spu_cycle_reset) 1453 if (profiling_mode == SPU_PROFILING_CYCLES)
1028 return cell_global_start_spu(ctr); 1454 return cell_global_start_spu_cycles(ctr);
1455 else if (profiling_mode == SPU_PROFILING_EVENTS)
1456 return cell_global_start_spu_events(ctr);
1029 else 1457 else
1030 return cell_global_start_ppu(ctr); 1458 return cell_global_start_ppu(ctr);
1031} 1459}
1032 1460
1033/* 1461
1034 * Note the generic OProfile stop calls do not support returning 1462/* The SPU interrupt handler
1035 * an error on stop. Hence, will not return an error if the FW 1463 *
1036 * calls fail on stop. Failure to reset the debug bus is not an issue. 1464 * SPU event profiling works as follows:
1037 * Failure to disable the SPU profiling is not an issue. The FW calls 1465 * The pm_signal[0] holds the one SPU event to be measured. It is routed on
1038 * to enable the performance counters and debug bus will work even if 1466 * the debug bus using word 0 or 1. The value of pm_signal[1] and
1039 * the hardware was not cleanly reset. 1467 * pm_signal[2] contain the necessary events to route the SPU program
1468 * counter for the selected SPU onto the debug bus using words 2 and 3.
1469 * The pm_interval register is setup to write the SPU PC value into the
1470 * trace buffer at the maximum rate possible. The trace buffer is configured
1471 * to store the PCs, wrapping when it is full. The performance counter is
1472 * intialized to the max hardware count minus the number of events, N, between
1473 * samples. Once the N events have occured, a HW counter overflow occurs
1474 * causing the generation of a HW counter interrupt which also stops the
1475 * writing of the SPU PC values to the trace buffer. Hence the last PC
1476 * written to the trace buffer is the SPU PC that we want. Unfortunately,
1477 * we have to read from the beginning of the trace buffer to get to the
1478 * last value written. We just hope the PPU has nothing better to do then
1479 * service this interrupt. The PC for the specific SPU being profiled is
1480 * extracted from the trace buffer processed and stored. The trace buffer
1481 * is cleared, interrupts are cleared, the counter is reset to max - N.
1482 * A kernel timer is used to periodically call the routine spu_evnt_swap()
1483 * to switch to the next physical SPU in the node to profile in round robbin
1484 * order. This way data is collected for all SPUs on the node. It does mean
1485 * that we need to use a relatively small value of N to ensure enough samples
1486 * on each SPU are collected each SPU is being profiled 1/8 of the time.
1487 * It may also be necessary to use a longer sample collection period.
1040 */ 1488 */
1041static void cell_global_stop_spu(void) 1489static void cell_handle_interrupt_spu(struct pt_regs *regs,
1490 struct op_counter_config *ctr)
1042{ 1491{
1043 int subfunc, rtn_value; 1492 u32 cpu, cpu_tmp;
1044 unsigned int lfsr_value; 1493 u64 trace_entry;
1045 int cpu; 1494 u32 interrupt_mask;
1495 u64 trace_buffer[2];
1496 u64 last_trace_buffer;
1497 u32 sample;
1498 u32 trace_addr;
1499 unsigned long sample_array_lock_flags;
1500 int spu_num;
1501 unsigned long flags;
1046 1502
1047 oprofile_running = 0; 1503 /* Make sure spu event interrupt handler and spu event swap
1504 * don't access the counters simultaneously.
1505 */
1506 cpu = smp_processor_id();
1507 spin_lock_irqsave(&cntr_lock, flags);
1048 1508
1049#ifdef CONFIG_CPU_FREQ 1509 cpu_tmp = cpu;
1050 cpufreq_unregister_notifier(&cpu_freq_notifier_block, 1510 cbe_disable_pm(cpu);
1051 CPUFREQ_TRANSITION_NOTIFIER);
1052#endif
1053 1511
1054 for_each_online_cpu(cpu) { 1512 interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
1055 if (cbe_get_hw_thread_id(cpu))
1056 continue;
1057 1513
1058 subfunc = 3; /* 1514 sample = 0xABCDEF;
1059 * 2 - activate SPU tracing, 1515 trace_entry = 0xfedcba;
1060 * 3 - deactivate 1516 last_trace_buffer = 0xdeadbeaf;
1061 */
1062 lfsr_value = 0x8f100000;
1063 1517
1064 rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL, 1518 if ((oprofile_running == 1) && (interrupt_mask != 0)) {
1065 subfunc, cbe_cpu_to_node(cpu), 1519 /* disable writes to trace buff */
1066 lfsr_value); 1520 cbe_write_pm(cpu, pm_interval, 0);
1067 1521
1068 if (unlikely(rtn_value != 0)) { 1522 /* only have one perf cntr being used, cntr 0 */
1069 printk(KERN_ERR 1523 if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(0))
1070 "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n", 1524 && ctr[0].enabled)
1071 __func__, rtn_value); 1525 /* The SPU PC values will be read
1526 * from the trace buffer, reset counter
1527 */
1528
1529 cbe_write_ctr(cpu, 0, reset_value[0]);
1530
1531 trace_addr = cbe_read_pm(cpu, trace_address);
1532
1533 while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
1534 /* There is data in the trace buffer to process
1535 * Read the buffer until you get to the last
1536 * entry. This is the value we want.
1537 */
1538
1539 cbe_read_trace_buffer(cpu, trace_buffer);
1540 trace_addr = cbe_read_pm(cpu, trace_address);
1072 } 1541 }
1073 1542
1074 /* Deactivate the signals */ 1543 /* SPU Address 16 bit count format for 128 bit
1075 pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); 1544 * HW trace buffer is used for the SPU PC storage
1076 } 1545 * HDR bits 0:15
1546 * SPU Addr 0 bits 16:31
1547 * SPU Addr 1 bits 32:47
1548 * unused bits 48:127
1549 *
1550 * HDR: bit4 = 1 SPU Address 0 valid
1551 * HDR: bit5 = 1 SPU Address 1 valid
1552 * - unfortunately, the valid bits don't seem to work
1553 *
1554 * Note trace_buffer[0] holds bits 0:63 of the HW
1555 * trace buffer, trace_buffer[1] holds bits 64:127
1556 */
1077 1557
1078 stop_spu_profiling(); 1558 trace_entry = trace_buffer[0]
1079} 1559 & 0x00000000FFFF0000;
1080 1560
1081static void cell_global_stop_ppu(void) 1561 /* only top 16 of the 18 bit SPU PC address
1082{ 1562 * is stored in trace buffer, hence shift right
1083 int cpu; 1563 * by 16 -2 bits */
1564 sample = trace_entry >> 14;
1565 last_trace_buffer = trace_buffer[0];
1084 1566
1085 /* 1567 spu_num = spu_evnt_phys_spu_indx
1086 * This routine will be called once for the system. 1568 + (cbe_cpu_to_node(cpu) * NUM_SPUS_PER_NODE);
1087 * There is one performance monitor per node, so we
1088 * only need to perform this function once per node.
1089 */
1090 del_timer_sync(&timer_virt_cntr);
1091 oprofile_running = 0;
1092 smp_wmb();
1093 1569
1094 for_each_online_cpu(cpu) { 1570 /* make sure only one process at a time is calling
1095 if (cbe_get_hw_thread_id(cpu)) 1571 * spu_sync_buffer()
1096 continue; 1572 */
1573 spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
1574 sample_array_lock_flags);
1575 spu_sync_buffer(spu_num, &sample, 1);
1576 spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
1577 sample_array_lock_flags);
1097 1578
1098 cbe_sync_irq(cbe_cpu_to_node(cpu)); 1579 smp_wmb(); /* insure spu event buffer updates are written
1099 /* Stop the counters */ 1580 * don't want events intermingled... */
1100 cbe_disable_pm(cpu);
1101 1581
1102 /* Deactivate the signals */ 1582 /* The counters were frozen by the interrupt.
1103 pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); 1583 * Reenable the interrupt and restart the counters.
1584 */
1585 cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
1586 cbe_enable_pm_interrupts(cpu, hdw_thread,
1587 virt_cntr_inter_mask);
1104 1588
1105 /* Deactivate interrupts */ 1589 /* clear the trace buffer, re-enable writes to trace buff */
1106 cbe_disable_pm_interrupts(cpu); 1590 cbe_write_pm(cpu, trace_address, 0);
1107 } 1591 cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
1108}
1109 1592
1110static void cell_global_stop(void) 1593 /* The writes to the various performance counters only writes
1111{ 1594 * to a latch. The new values (interrupt setting bits, reset
1112 if (spu_cycle_reset) 1595 * counter value etc.) are not copied to the actual registers
1113 cell_global_stop_spu(); 1596 * until the performance monitor is enabled. In order to get
1114 else 1597 * this to work as desired, the permormance monitor needs to
1115 cell_global_stop_ppu(); 1598 * be disabled while writing to the latches. This is a
1599 * HW design issue.
1600 */
1601 write_pm_cntrl(cpu);
1602 cbe_enable_pm(cpu);
1603 }
1604 spin_unlock_irqrestore(&cntr_lock, flags);
1116} 1605}
1117 1606
1118static void cell_handle_interrupt(struct pt_regs *regs, 1607static void cell_handle_interrupt_ppu(struct pt_regs *regs,
1119 struct op_counter_config *ctr) 1608 struct op_counter_config *ctr)
1120{ 1609{
1121 u32 cpu; 1610 u32 cpu;
1122 u64 pc; 1611 u64 pc;
@@ -1132,7 +1621,7 @@ static void cell_handle_interrupt(struct pt_regs *regs,
1132 * routine are not running at the same time. See the 1621 * routine are not running at the same time. See the
1133 * cell_virtual_cntr() routine for additional comments. 1622 * cell_virtual_cntr() routine for additional comments.
1134 */ 1623 */
1135 spin_lock_irqsave(&virt_cntr_lock, flags); 1624 spin_lock_irqsave(&cntr_lock, flags);
1136 1625
1137 /* 1626 /*
1138 * Need to disable and reenable the performance counters 1627 * Need to disable and reenable the performance counters
@@ -1185,7 +1674,16 @@ static void cell_handle_interrupt(struct pt_regs *regs,
1185 */ 1674 */
1186 cbe_enable_pm(cpu); 1675 cbe_enable_pm(cpu);
1187 } 1676 }
1188 spin_unlock_irqrestore(&virt_cntr_lock, flags); 1677 spin_unlock_irqrestore(&cntr_lock, flags);
1678}
1679
1680static void cell_handle_interrupt(struct pt_regs *regs,
1681 struct op_counter_config *ctr)
1682{
1683 if (profiling_mode == PPU_PROFILING)
1684 cell_handle_interrupt_ppu(regs, ctr);
1685 else
1686 cell_handle_interrupt_spu(regs, ctr);
1189} 1687}
1190 1688
1191/* 1689/*
@@ -1195,7 +1693,8 @@ static void cell_handle_interrupt(struct pt_regs *regs,
1195 */ 1693 */
1196static int cell_sync_start(void) 1694static int cell_sync_start(void)
1197{ 1695{
1198 if (spu_cycle_reset) 1696 if ((profiling_mode == SPU_PROFILING_CYCLES) ||
1697 (profiling_mode == SPU_PROFILING_EVENTS))
1199 return spu_sync_start(); 1698 return spu_sync_start();
1200 else 1699 else
1201 return DO_GENERIC_SYNC; 1700 return DO_GENERIC_SYNC;
@@ -1203,7 +1702,8 @@ static int cell_sync_start(void)
1203 1702
1204static int cell_sync_stop(void) 1703static int cell_sync_stop(void)
1205{ 1704{
1206 if (spu_cycle_reset) 1705 if ((profiling_mode == SPU_PROFILING_CYCLES) ||
1706 (profiling_mode == SPU_PROFILING_EVENTS))
1207 return spu_sync_stop(); 1707 return spu_sync_stop();
1208 else 1708 else
1209 return 1; 1709 return 1;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
index ae7c34f37e1c..98367a0255f3 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -42,7 +42,7 @@ static struct of_device_id mpc52xx_bus_ids[] __initdata = {
42 * from interrupt context while node mapping (which calls ioremap()) 42 * from interrupt context while node mapping (which calls ioremap())
43 * cannot be used at such point. 43 * cannot be used at such point.
44 */ 44 */
45static spinlock_t mpc52xx_lock = SPIN_LOCK_UNLOCKED; 45static DEFINE_SPINLOCK(mpc52xx_lock);
46static struct mpc52xx_gpt __iomem *mpc52xx_wdt; 46static struct mpc52xx_gpt __iomem *mpc52xx_wdt;
47static struct mpc52xx_cdm __iomem *mpc52xx_cdm; 47static struct mpc52xx_cdm __iomem *mpc52xx_cdm;
48 48
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index a428f8d1ac80..5177bdd2c62a 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -42,7 +42,7 @@ static void __init mpc831x_rdb_setup_arch(void)
42 mpc831x_usb_cfg(); 42 mpc831x_usb_cfg();
43} 43}
44 44
45void __init mpc831x_rdb_init_IRQ(void) 45static void __init mpc831x_rdb_init_IRQ(void)
46{ 46{
47 struct device_node *np; 47 struct device_node *np;
48 48
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index ec43477caa63..ec0b401bc9cf 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -49,8 +49,6 @@
49#define DBG(fmt...) 49#define DBG(fmt...)
50#endif 50#endif
51 51
52static u8 *bcsr_regs = NULL;
53
54/* ************************************************************************ 52/* ************************************************************************
55 * 53 *
56 * Setup the architecture 54 * Setup the architecture
@@ -59,13 +57,14 @@ static u8 *bcsr_regs = NULL;
59static void __init mpc832x_sys_setup_arch(void) 57static void __init mpc832x_sys_setup_arch(void)
60{ 58{
61 struct device_node *np; 59 struct device_node *np;
60 u8 __iomem *bcsr_regs = NULL;
62 61
63 if (ppc_md.progress) 62 if (ppc_md.progress)
64 ppc_md.progress("mpc832x_sys_setup_arch()", 0); 63 ppc_md.progress("mpc832x_sys_setup_arch()", 0);
65 64
66 /* Map BCSR area */ 65 /* Map BCSR area */
67 np = of_find_node_by_name(NULL, "bcsr"); 66 np = of_find_node_by_name(NULL, "bcsr");
68 if (np != 0) { 67 if (np) {
69 struct resource res; 68 struct resource res;
70 69
71 of_address_to_resource(np, 0, &res); 70 of_address_to_resource(np, 0, &res);
@@ -93,9 +92,9 @@ static void __init mpc832x_sys_setup_arch(void)
93 != NULL){ 92 != NULL){
94 /* Reset the Ethernet PHYs */ 93 /* Reset the Ethernet PHYs */
95#define BCSR8_FETH_RST 0x50 94#define BCSR8_FETH_RST 0x50
96 bcsr_regs[8] &= ~BCSR8_FETH_RST; 95 clrbits8(&bcsr_regs[8], BCSR8_FETH_RST);
97 udelay(1000); 96 udelay(1000);
98 bcsr_regs[8] |= BCSR8_FETH_RST; 97 setbits8(&bcsr_regs[8], BCSR8_FETH_RST);
99 iounmap(bcsr_regs); 98 iounmap(bcsr_regs);
100 of_node_put(np); 99 of_node_put(np);
101 } 100 }
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index 0300268ce5b8..2a1295f19832 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -38,6 +38,7 @@
38#define DBG(fmt...) 38#define DBG(fmt...)
39#endif 39#endif
40 40
41#ifdef CONFIG_QUICC_ENGINE
41static void mpc83xx_spi_activate_cs(u8 cs, u8 polarity) 42static void mpc83xx_spi_activate_cs(u8 cs, u8 polarity)
42{ 43{
43 pr_debug("%s %d %d\n", __func__, cs, polarity); 44 pr_debug("%s %d %d\n", __func__, cs, polarity);
@@ -77,8 +78,8 @@ static int __init mpc832x_spi_init(void)
77 mpc83xx_spi_activate_cs, 78 mpc83xx_spi_activate_cs,
78 mpc83xx_spi_deactivate_cs); 79 mpc83xx_spi_deactivate_cs);
79} 80}
80
81machine_device_initcall(mpc832x_rdb, mpc832x_spi_init); 81machine_device_initcall(mpc832x_rdb, mpc832x_spi_init);
82#endif /* CONFIG_QUICC_ENGINE */
82 83
83/* ************************************************************************ 84/* ************************************************************************
84 * 85 *
@@ -130,7 +131,7 @@ static int __init mpc832x_declare_of_platform_devices(void)
130} 131}
131machine_device_initcall(mpc832x_rdb, mpc832x_declare_of_platform_devices); 132machine_device_initcall(mpc832x_rdb, mpc832x_declare_of_platform_devices);
132 133
133void __init mpc832x_rdb_init_IRQ(void) 134static void __init mpc832x_rdb_init_IRQ(void)
134{ 135{
135 136
136 struct device_node *np; 137 struct device_node *np;
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index 9d46e5bdd101..09e9d6fb7411 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -18,6 +18,7 @@
18 18
19#include <linux/stddef.h> 19#include <linux/stddef.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/compiler.h>
21#include <linux/init.h> 22#include <linux/init.h>
22#include <linux/errno.h> 23#include <linux/errno.h>
23#include <linux/reboot.h> 24#include <linux/reboot.h>
@@ -43,6 +44,7 @@
43#include <asm/udbg.h> 44#include <asm/udbg.h>
44#include <sysdev/fsl_soc.h> 45#include <sysdev/fsl_soc.h>
45#include <sysdev/fsl_pci.h> 46#include <sysdev/fsl_pci.h>
47#include <sysdev/simple_gpio.h>
46#include <asm/qe.h> 48#include <asm/qe.h>
47#include <asm/qe_ic.h> 49#include <asm/qe_ic.h>
48 50
@@ -55,8 +57,6 @@
55#define DBG(fmt...) 57#define DBG(fmt...)
56#endif 58#endif
57 59
58static u8 *bcsr_regs = NULL;
59
60/* ************************************************************************ 60/* ************************************************************************
61 * 61 *
62 * Setup the architecture 62 * Setup the architecture
@@ -65,13 +65,14 @@ static u8 *bcsr_regs = NULL;
65static void __init mpc836x_mds_setup_arch(void) 65static void __init mpc836x_mds_setup_arch(void)
66{ 66{
67 struct device_node *np; 67 struct device_node *np;
68 u8 __iomem *bcsr_regs = NULL;
68 69
69 if (ppc_md.progress) 70 if (ppc_md.progress)
70 ppc_md.progress("mpc836x_mds_setup_arch()", 0); 71 ppc_md.progress("mpc836x_mds_setup_arch()", 0);
71 72
72 /* Map BCSR area */ 73 /* Map BCSR area */
73 np = of_find_node_by_name(NULL, "bcsr"); 74 np = of_find_node_by_name(NULL, "bcsr");
74 if (np != 0) { 75 if (np) {
75 struct resource res; 76 struct resource res;
76 77
77 of_address_to_resource(np, 0, &res); 78 of_address_to_resource(np, 0, &res);
@@ -93,6 +94,16 @@ static void __init mpc836x_mds_setup_arch(void)
93 94
94 for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;) 95 for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;)
95 par_io_of_config(np); 96 par_io_of_config(np);
97#ifdef CONFIG_QE_USB
98 /* Must fixup Par IO before QE GPIO chips are registered. */
99 par_io_config_pin(1, 2, 1, 0, 3, 0); /* USBOE */
100 par_io_config_pin(1, 3, 1, 0, 3, 0); /* USBTP */
101 par_io_config_pin(1, 8, 1, 0, 1, 0); /* USBTN */
102 par_io_config_pin(1, 10, 2, 0, 3, 0); /* USBRXD */
103 par_io_config_pin(1, 9, 2, 1, 3, 0); /* USBRP */
104 par_io_config_pin(1, 11, 2, 1, 3, 0); /* USBRN */
105 par_io_config_pin(2, 20, 2, 0, 1, 0); /* CLK21 */
106#endif /* CONFIG_QE_USB */
96 } 107 }
97 108
98 if ((np = of_find_compatible_node(NULL, "network", "ucc_geth")) 109 if ((np = of_find_compatible_node(NULL, "network", "ucc_geth"))
@@ -151,6 +162,70 @@ static int __init mpc836x_declare_of_platform_devices(void)
151} 162}
152machine_device_initcall(mpc836x_mds, mpc836x_declare_of_platform_devices); 163machine_device_initcall(mpc836x_mds, mpc836x_declare_of_platform_devices);
153 164
165#ifdef CONFIG_QE_USB
166static int __init mpc836x_usb_cfg(void)
167{
168 u8 __iomem *bcsr;
169 struct device_node *np;
170 const char *mode;
171 int ret = 0;
172
173 np = of_find_compatible_node(NULL, NULL, "fsl,mpc8360mds-bcsr");
174 if (!np)
175 return -ENODEV;
176
177 bcsr = of_iomap(np, 0);
178 of_node_put(np);
179 if (!bcsr)
180 return -ENOMEM;
181
182 np = of_find_compatible_node(NULL, NULL, "fsl,mpc8323-qe-usb");
183 if (!np) {
184 ret = -ENODEV;
185 goto err;
186 }
187
188#define BCSR8_TSEC1M_MASK (0x3 << 6)
189#define BCSR8_TSEC1M_RGMII (0x0 << 6)
190#define BCSR8_TSEC2M_MASK (0x3 << 4)
191#define BCSR8_TSEC2M_RGMII (0x0 << 4)
192 /*
193 * Default is GMII (2), but we should set it to RGMII (0) if we use
194 * USB (Eth PHY is in RGMII mode anyway).
195 */
196 clrsetbits_8(&bcsr[8], BCSR8_TSEC1M_MASK | BCSR8_TSEC2M_MASK,
197 BCSR8_TSEC1M_RGMII | BCSR8_TSEC2M_RGMII);
198
199#define BCSR13_USBMASK 0x0f
200#define BCSR13_nUSBEN 0x08 /* 1 - Disable, 0 - Enable */
201#define BCSR13_USBSPEED 0x04 /* 1 - Full, 0 - Low */
202#define BCSR13_USBMODE 0x02 /* 1 - Host, 0 - Function */
203#define BCSR13_nUSBVCC 0x01 /* 1 - gets VBUS, 0 - supplies VBUS */
204
205 clrsetbits_8(&bcsr[13], BCSR13_USBMASK, BCSR13_USBSPEED);
206
207 mode = of_get_property(np, "mode", NULL);
208 if (mode && !strcmp(mode, "peripheral")) {
209 setbits8(&bcsr[13], BCSR13_nUSBVCC);
210 qe_usb_clock_set(QE_CLK21, 48000000);
211 } else {
212 setbits8(&bcsr[13], BCSR13_USBMODE);
213 /*
214 * The BCSR GPIOs are used to control power and
215 * speed of the USB transceiver. This is needed for
216 * the USB Host only.
217 */
218 simple_gpiochip_init("fsl,mpc8360mds-bcsr-gpio");
219 }
220
221 of_node_put(np);
222err:
223 iounmap(bcsr);
224 return ret;
225}
226machine_arch_initcall(mpc836x_mds, mpc836x_usb_cfg);
227#endif /* CONFIG_QE_USB */
228
154static void __init mpc836x_mds_init_IRQ(void) 229static void __init mpc836x_mds_init_IRQ(void)
155{ 230{
156 struct device_node *np; 231 struct device_node *np;
diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
index a5273bb28e1b..b0090aac9642 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
@@ -51,8 +51,9 @@ static void __init mpc836x_rdk_setup_arch(void)
51 for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") 51 for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
52 mpc83xx_add_bridge(np); 52 mpc83xx_add_bridge(np);
53#endif 53#endif
54 54#ifdef CONFIG_QUICC_ENGINE
55 qe_reset(); 55 qe_reset();
56#endif
56} 57}
57 58
58static void __init mpc836x_rdk_init_IRQ(void) 59static void __init mpc836x_rdk_init_IRQ(void)
@@ -71,13 +72,14 @@ static void __init mpc836x_rdk_init_IRQ(void)
71 */ 72 */
72 ipic_set_default_priority(); 73 ipic_set_default_priority();
73 of_node_put(np); 74 of_node_put(np);
74 75#ifdef CONFIG_QUICC_ENGINE
75 np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); 76 np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
76 if (!np) 77 if (!np)
77 return; 78 return;
78 79
79 qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic); 80 qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
80 of_node_put(np); 81 of_node_put(np);
82#endif
81} 83}
82 84
83/* 85/*
diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c
index 8bb13c807142..530ef990ca7c 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c
@@ -26,7 +26,6 @@
26#define BCSR12_USB_SER_MASK 0x8a 26#define BCSR12_USB_SER_MASK 0x8a
27#define BCSR12_USB_SER_PIN 0x80 27#define BCSR12_USB_SER_PIN 0x80
28#define BCSR12_USB_SER_DEVICE 0x02 28#define BCSR12_USB_SER_DEVICE 0x02
29extern int mpc837x_usb_cfg(void);
30 29
31static int mpc837xmds_usb_cfg(void) 30static int mpc837xmds_usb_cfg(void)
32{ 31{
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index da030afa2e2c..1d096545322b 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -21,8 +21,6 @@
21 21
22#include "mpc83xx.h" 22#include "mpc83xx.h"
23 23
24extern int mpc837x_usb_cfg(void);
25
26/* ************************************************************************ 24/* ************************************************************************
27 * 25 *
28 * Setup the architecture 26 * Setup the architecture
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index 2a7cbabb410a..83cfe51526ec 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -61,6 +61,7 @@
61 61
62extern void mpc83xx_restart(char *cmd); 62extern void mpc83xx_restart(char *cmd);
63extern long mpc83xx_time_init(void); 63extern long mpc83xx_time_init(void);
64extern int mpc837x_usb_cfg(void);
64extern int mpc834x_usb_cfg(void); 65extern int mpc834x_usb_cfg(void);
65extern int mpc831x_usb_cfg(void); 66extern int mpc831x_usb_cfg(void);
66 67
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index a8301c8ad537..7326d904202c 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -148,6 +148,9 @@ static int mpc85xx_exclude_device(struct pci_controller *hose,
148/* 148/*
149 * Setup the architecture 149 * Setup the architecture
150 */ 150 */
151#ifdef CONFIG_SMP
152extern void __init mpc85xx_smp_init(void);
153#endif
151static void __init mpc85xx_ds_setup_arch(void) 154static void __init mpc85xx_ds_setup_arch(void)
152{ 155{
153#ifdef CONFIG_PCI 156#ifdef CONFIG_PCI
@@ -173,6 +176,10 @@ static void __init mpc85xx_ds_setup_arch(void)
173 ppc_md.pci_exclude_device = mpc85xx_exclude_device; 176 ppc_md.pci_exclude_device = mpc85xx_exclude_device;
174#endif 177#endif
175 178
179#ifdef CONFIG_SMP
180 mpc85xx_smp_init();
181#endif
182
176 printk("MPC85xx DS board from Freescale Semiconductor\n"); 183 printk("MPC85xx DS board from Freescale Semiconductor\n");
177} 184}
178 185
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index d652c713f496..79a0df17078b 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -58,6 +58,7 @@ smp_85xx_kick_cpu(int nr)
58 58
59 if (cpu_rel_addr == NULL) { 59 if (cpu_rel_addr == NULL) {
60 printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr); 60 printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
61 local_irq_restore(flags);
61 return; 62 return;
62 } 63 }
63 64
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 47e956c871fe..47fe2bea9865 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -312,4 +312,15 @@ config MPC8xxx_GPIO
312 Say Y here if you're going to use hardware that connects to the 312 Say Y here if you're going to use hardware that connects to the
313 MPC831x/834x/837x/8572/8610 GPIOs. 313 MPC831x/834x/837x/8572/8610 GPIOs.
314 314
315config SIMPLE_GPIO
316 bool "Support for simple, memory-mapped GPIO controllers"
317 depends on PPC
318 select GENERIC_GPIO
319 select ARCH_REQUIRE_GPIOLIB
320 help
321 Say Y here to support simple, memory-mapped GPIO controllers.
322 These are usually BCSRs used to control board's switches, LEDs,
323 chip-selects, Ethernet/USB PHY's power and various other small
324 on-board peripherals.
325
315endmenu 326endmenu
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 3d0c776f888d..e868b5c50723 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -231,7 +231,7 @@ config VIRT_CPU_ACCOUNTING
231 If in doubt, say Y here. 231 If in doubt, say Y here.
232 232
233config SMP 233config SMP
234 depends on PPC_STD_MMU 234 depends on PPC_STD_MMU || FSL_BOOKE
235 bool "Symmetric multi-processing support" 235 bool "Symmetric multi-processing support"
236 ---help--- 236 ---help---
237 This enables support for systems with more than one CPU. If you have 237 This enables support for systems with more than one CPU. If you have
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index 2e67bd840e01..35b1ec492715 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -44,8 +44,8 @@ static DEFINE_SPINLOCK(beat_htab_lock);
44 44
45static inline unsigned int beat_read_mask(unsigned hpte_group) 45static inline unsigned int beat_read_mask(unsigned hpte_group)
46{ 46{
47 unsigned long hpte_v[5];
48 unsigned long rmask = 0; 47 unsigned long rmask = 0;
48 u64 hpte_v[5];
49 49
50 beat_read_htab_entries(0, hpte_group + 0, hpte_v); 50 beat_read_htab_entries(0, hpte_group + 0, hpte_v);
51 if (!(hpte_v[0] & HPTE_V_BOLTED)) 51 if (!(hpte_v[0] & HPTE_V_BOLTED))
@@ -93,8 +93,7 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
93 int psize, int ssize) 93 int psize, int ssize)
94{ 94{
95 unsigned long lpar_rc; 95 unsigned long lpar_rc;
96 unsigned long slot; 96 u64 hpte_v, hpte_r, slot;
97 unsigned long hpte_v, hpte_r;
98 97
99 /* same as iseries */ 98 /* same as iseries */
100 if (vflags & HPTE_V_SECONDARY) 99 if (vflags & HPTE_V_SECONDARY)
@@ -153,8 +152,9 @@ static long beat_lpar_hpte_remove(unsigned long hpte_group)
153 152
154static unsigned long beat_lpar_hpte_getword0(unsigned long slot) 153static unsigned long beat_lpar_hpte_getword0(unsigned long slot)
155{ 154{
156 unsigned long dword0, dword[5]; 155 unsigned long dword0;
157 unsigned long lpar_rc; 156 unsigned long lpar_rc;
157 u64 dword[5];
158 158
159 lpar_rc = beat_read_htab_entries(0, slot & ~3UL, dword); 159 lpar_rc = beat_read_htab_entries(0, slot & ~3UL, dword);
160 160
@@ -170,7 +170,7 @@ static void beat_lpar_hptab_clear(void)
170 unsigned long size_bytes = 1UL << ppc64_pft_size; 170 unsigned long size_bytes = 1UL << ppc64_pft_size;
171 unsigned long hpte_count = size_bytes >> 4; 171 unsigned long hpte_count = size_bytes >> 4;
172 int i; 172 int i;
173 unsigned long dummy0, dummy1; 173 u64 dummy0, dummy1;
174 174
175 /* TODO: Use bulk call */ 175 /* TODO: Use bulk call */
176 for (i = 0; i < hpte_count; i++) 176 for (i = 0; i < hpte_count; i++)
@@ -189,7 +189,8 @@ static long beat_lpar_hpte_updatepp(unsigned long slot,
189 int psize, int ssize, int local) 189 int psize, int ssize, int local)
190{ 190{
191 unsigned long lpar_rc; 191 unsigned long lpar_rc;
192 unsigned long dummy0, dummy1, want_v; 192 u64 dummy0, dummy1;
193 unsigned long want_v;
193 194
194 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 195 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
195 196
@@ -255,7 +256,8 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
255 unsigned long ea, 256 unsigned long ea,
256 int psize, int ssize) 257 int psize, int ssize)
257{ 258{
258 unsigned long lpar_rc, slot, vsid, va, dummy0, dummy1; 259 unsigned long lpar_rc, slot, vsid, va;
260 u64 dummy0, dummy1;
259 261
260 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M); 262 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
261 va = (vsid << 28) | (ea & 0x0fffffff); 263 va = (vsid << 28) | (ea & 0x0fffffff);
@@ -276,7 +278,7 @@ static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
276{ 278{
277 unsigned long want_v; 279 unsigned long want_v;
278 unsigned long lpar_rc; 280 unsigned long lpar_rc;
279 unsigned long dummy1, dummy2; 281 u64 dummy1, dummy2;
280 unsigned long flags; 282 unsigned long flags;
281 283
282 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", 284 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
@@ -315,8 +317,7 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
315 int psize, int ssize) 317 int psize, int ssize)
316{ 318{
317 unsigned long lpar_rc; 319 unsigned long lpar_rc;
318 unsigned long slot; 320 u64 hpte_v, hpte_r, slot;
319 unsigned long hpte_v, hpte_r;
320 321
321 /* same as iseries */ 322 /* same as iseries */
322 if (vflags & HPTE_V_SECONDARY) 323 if (vflags & HPTE_V_SECONDARY)
diff --git a/arch/powerpc/platforms/cell/beat_udbg.c b/arch/powerpc/platforms/cell/beat_udbg.c
index 6b418f6b6175..350735bc8888 100644
--- a/arch/powerpc/platforms/cell/beat_udbg.c
+++ b/arch/powerpc/platforms/cell/beat_udbg.c
@@ -40,8 +40,8 @@ static void udbg_putc_beat(char c)
40} 40}
41 41
42/* Buffered chars getc */ 42/* Buffered chars getc */
43static long inbuflen; 43static u64 inbuflen;
44static long inbuf[2]; /* must be 2 longs */ 44static u64 inbuf[2]; /* must be 2 u64s */
45 45
46static int udbg_getc_poll_beat(void) 46static int udbg_getc_poll_beat(void)
47{ 47{
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
index 70fa7aef5edd..20472e487b6f 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
@@ -54,7 +54,7 @@ int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
54{ 54{
55 struct cbe_pmd_regs __iomem *pmd_regs; 55 struct cbe_pmd_regs __iomem *pmd_regs;
56 struct cbe_mic_tm_regs __iomem *mic_tm_regs; 56 struct cbe_mic_tm_regs __iomem *mic_tm_regs;
57 u64 flags; 57 unsigned long flags;
58 u64 value; 58 u64 value;
59#ifdef DEBUG 59#ifdef DEBUG
60 long time; 60 long time;
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 2d5bb22d6c09..28c04dab2633 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -148,7 +148,7 @@ static unsigned int iic_get_irq(void)
148 148
149 iic = &__get_cpu_var(iic); 149 iic = &__get_cpu_var(iic);
150 *(unsigned long *) &pending = 150 *(unsigned long *) &pending =
151 in_be64((unsigned long __iomem *) &iic->regs->pending_destr); 151 in_be64((u64 __iomem *) &iic->regs->pending_destr);
152 if (!(pending.flags & CBE_IIC_IRQ_VALID)) 152 if (!(pending.flags & CBE_IIC_IRQ_VALID))
153 return NO_IRQ; 153 return NO_IRQ;
154 virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending)); 154 virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending));
diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/platforms/cell/io-workarounds.c
index b5f84e8f0899..059cad6c3f69 100644
--- a/arch/powerpc/platforms/cell/io-workarounds.c
+++ b/arch/powerpc/platforms/cell/io-workarounds.c
@@ -130,14 +130,14 @@ static const struct ppc_pci_io __devinitconst iowa_pci_io = {
130 130
131}; 131};
132 132
133static void __iomem *iowa_ioremap(unsigned long addr, unsigned long size, 133static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
134 unsigned long flags) 134 unsigned long flags)
135{ 135{
136 struct iowa_bus *bus; 136 struct iowa_bus *bus;
137 void __iomem *res = __ioremap(addr, size, flags); 137 void __iomem *res = __ioremap(addr, size, flags);
138 int busno; 138 int busno;
139 139
140 bus = iowa_pci_find(0, addr); 140 bus = iowa_pci_find(0, (unsigned long)addr);
141 if (bus != NULL) { 141 if (bus != NULL) {
142 busno = bus - iowa_busses; 142 busno = bus - iowa_busses;
143 PCI_SET_ADDR_TOKEN(res, busno + 1); 143 PCI_SET_ADDR_TOKEN(res, busno + 1);
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 86db4dd170a0..88d94b59a7cb 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -150,8 +150,8 @@ static int cbe_nr_iommus;
150static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte, 150static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
151 long n_ptes) 151 long n_ptes)
152{ 152{
153 unsigned long __iomem *reg; 153 u64 __iomem *reg;
154 unsigned long val; 154 u64 val;
155 long n; 155 long n;
156 156
157 reg = iommu->xlate_regs + IOC_IOPT_CacheInvd; 157 reg = iommu->xlate_regs + IOC_IOPT_CacheInvd;
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 15c62d3ca129..3bf908e2873a 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -314,7 +314,7 @@ extern char *isolated_loader;
314 * we need to call spu_release(ctx) before sleeping, and 314 * we need to call spu_release(ctx) before sleeping, and
315 * then spu_acquire(ctx) when awoken. 315 * then spu_acquire(ctx) when awoken.
316 * 316 *
317 * Returns with state_mutex re-acquired when successfull or 317 * Returns with state_mutex re-acquired when successful or
318 * with -ERESTARTSYS and the state_mutex dropped when interrupted. 318 * with -ERESTARTSYS and the state_mutex dropped when interrupted.
319 */ 319 */
320 320
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
index ed3753d8c109..7ddd0a2c8027 100644
--- a/arch/powerpc/platforms/iseries/Kconfig
+++ b/arch/powerpc/platforms/iseries/Kconfig
@@ -10,18 +10,21 @@ menu "iSeries device drivers"
10config VIODASD 10config VIODASD
11 tristate "iSeries Virtual I/O disk support" 11 tristate "iSeries Virtual I/O disk support"
12 depends on BLOCK 12 depends on BLOCK
13 select VIOPATH
13 help 14 help
14 If you are running on an iSeries system and you want to use 15 If you are running on an iSeries system and you want to use
15 virtual disks created and managed by OS/400, say Y. 16 virtual disks created and managed by OS/400, say Y.
16 17
17config VIOCD 18config VIOCD
18 tristate "iSeries Virtual I/O CD support" 19 tristate "iSeries Virtual I/O CD support"
20 select VIOPATH
19 help 21 help
20 If you are running Linux on an IBM iSeries system and you want to 22 If you are running Linux on an IBM iSeries system and you want to
21 read a CD drive owned by OS/400, say Y here. 23 read a CD drive owned by OS/400, say Y here.
22 24
23config VIOTAPE 25config VIOTAPE
24 tristate "iSeries Virtual Tape Support" 26 tristate "iSeries Virtual Tape Support"
27 select VIOPATH
25 help 28 help
26 If you are running Linux on an iSeries system and you want Linux 29 If you are running Linux on an iSeries system and you want Linux
27 to read and/or write a tape drive owned by OS/400, say Y here. 30 to read and/or write a tape drive owned by OS/400, say Y here.
@@ -30,5 +33,3 @@ endmenu
30 33
31config VIOPATH 34config VIOPATH
32 bool 35 bool
33 depends on VIODASD || VIOCD || VIOTAPE || ISERIES_VETH
34 default y
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index 70b688c1aefb..24519b96d6ad 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -23,6 +23,7 @@
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/seq_file.h> 24#include <linux/seq_file.h>
25#include <linux/kdev_t.h> 25#include <linux/kdev_t.h>
26#include <linux/kexec.h>
26#include <linux/major.h> 27#include <linux/major.h>
27#include <linux/root_dev.h> 28#include <linux/root_dev.h>
28#include <linux/kernel.h> 29#include <linux/kernel.h>
@@ -638,6 +639,13 @@ static int __init iseries_probe(void)
638 return 1; 639 return 1;
639} 640}
640 641
642#ifdef CONFIG_KEXEC
643static int iseries_kexec_prepare(struct kimage *image)
644{
645 return -ENOSYS;
646}
647#endif
648
641define_machine(iseries) { 649define_machine(iseries) {
642 .name = "iSeries", 650 .name = "iSeries",
643 .setup_arch = iSeries_setup_arch, 651 .setup_arch = iSeries_setup_arch,
@@ -658,6 +666,9 @@ define_machine(iseries) {
658 .probe = iseries_probe, 666 .probe = iseries_probe,
659 .ioremap = iseries_ioremap, 667 .ioremap = iseries_ioremap,
660 .iounmap = iseries_iounmap, 668 .iounmap = iseries_iounmap,
669#ifdef CONFIG_KEXEC
670 .machine_kexec_prepare = iseries_kexec_prepare,
671#endif
661 /* XXX Implement enable_pmcs for iSeries */ 672 /* XXX Implement enable_pmcs for iSeries */
662}; 673};
663 674
diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c
index 58556b028a4c..86db47c1b665 100644
--- a/arch/powerpc/platforms/pasemi/cpufreq.c
+++ b/arch/powerpc/platforms/pasemi/cpufreq.c
@@ -112,7 +112,7 @@ static int get_gizmo_latency(void)
112 112
113static void set_astate(int cpu, unsigned int astate) 113static void set_astate(int cpu, unsigned int astate)
114{ 114{
115 u64 flags; 115 unsigned long flags;
116 116
117 /* Return if called before init has run */ 117 /* Return if called before init has run */
118 if (unlikely(!sdcasr_mapbase)) 118 if (unlikely(!sdcasr_mapbase))
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
index 217af321b0ca..a6152d922243 100644
--- a/arch/powerpc/platforms/pasemi/dma_lib.c
+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
@@ -509,7 +509,7 @@ fallback:
509 */ 509 */
510int pasemi_dma_init(void) 510int pasemi_dma_init(void)
511{ 511{
512 static spinlock_t init_lock = SPIN_LOCK_UNLOCKED; 512 static DEFINE_SPINLOCK(init_lock);
513 struct pci_dev *iob_pdev; 513 struct pci_dev *iob_pdev;
514 struct pci_dev *pdev; 514 struct pci_dev *pdev;
515 struct resource res; 515 struct resource res;
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 54b7b76ed4f0..04cdd32624d4 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -661,6 +661,7 @@ static void __init init_second_ohare(void)
661 pci_find_hose_for_OF_device(np); 661 pci_find_hose_for_OF_device(np);
662 if (!hose) { 662 if (!hose) {
663 printk(KERN_ERR "Can't find PCI hose for OHare2 !\n"); 663 printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
664 of_node_put(np);
664 return; 665 return;
665 } 666 }
666 early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd); 667 early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
@@ -669,6 +670,7 @@ static void __init init_second_ohare(void)
669 early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd); 670 early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
670 } 671 }
671 has_second_ohare = 1; 672 has_second_ohare = 1;
673 of_node_put(np);
672} 674}
673 675
674/* 676/*
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index 59eb840d8ce2..1810e4226e56 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -265,12 +265,15 @@ int __init via_calibrate_decr(void)
265 struct resource rsrc; 265 struct resource rsrc;
266 266
267 vias = of_find_node_by_name(NULL, "via-cuda"); 267 vias = of_find_node_by_name(NULL, "via-cuda");
268 if (vias == 0) 268 if (vias == NULL)
269 vias = of_find_node_by_name(NULL, "via-pmu"); 269 vias = of_find_node_by_name(NULL, "via-pmu");
270 if (vias == 0) 270 if (vias == NULL)
271 vias = of_find_node_by_name(NULL, "via"); 271 vias = of_find_node_by_name(NULL, "via");
272 if (vias == 0 || of_address_to_resource(vias, 0, &rsrc)) 272 if (vias == NULL || of_address_to_resource(vias, 0, &rsrc)) {
273 of_node_put(vias);
273 return 0; 274 return 0;
275 }
276 of_node_put(vias);
274 via = ioremap(rsrc.start, rsrc.end - rsrc.start + 1); 277 via = ioremap(rsrc.start, rsrc.end - rsrc.start + 1);
275 if (via == NULL) { 278 if (via == NULL) {
276 printk(KERN_ERR "Failed to map VIA for timer calibration !\n"); 279 printk(KERN_ERR "Failed to map VIA for timer calibration !\n");
@@ -297,7 +300,7 @@ int __init via_calibrate_decr(void)
297 ppc_tb_freq = (dstart - dend) * 100 / 6; 300 ppc_tb_freq = (dstart - dend) * 100 / 6;
298 301
299 iounmap(via); 302 iounmap(via);
300 303
301 return 1; 304 return 1;
302} 305}
303#endif 306#endif
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
index dbc124e05646..ca71a12b764c 100644
--- a/arch/powerpc/platforms/ps3/device-init.c
+++ b/arch/powerpc/platforms/ps3/device-init.c
@@ -518,6 +518,41 @@ fail_device_register:
518 return result; 518 return result;
519} 519}
520 520
521static int __init ps3_register_ramdisk_device(void)
522{
523 int result;
524 struct layout {
525 struct ps3_system_bus_device dev;
526 } *p;
527
528 pr_debug(" -> %s:%d\n", __func__, __LINE__);
529
530 p = kzalloc(sizeof(struct layout), GFP_KERNEL);
531
532 if (!p)
533 return -ENOMEM;
534
535 p->dev.match_id = PS3_MATCH_ID_GPU;
536 p->dev.match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK;
537 p->dev.dev_type = PS3_DEVICE_TYPE_IOC0;
538
539 result = ps3_system_bus_device_register(&p->dev);
540
541 if (result) {
542 pr_debug("%s:%d ps3_system_bus_device_register failed\n",
543 __func__, __LINE__);
544 goto fail_device_register;
545 }
546
547 pr_debug(" <- %s:%d\n", __func__, __LINE__);
548 return 0;
549
550fail_device_register:
551 kfree(p);
552 pr_debug(" <- %s:%d failed\n", __func__, __LINE__);
553 return result;
554}
555
521/** 556/**
522 * ps3_setup_dynamic_device - Setup a dynamic device from the repository 557 * ps3_setup_dynamic_device - Setup a dynamic device from the repository
523 */ 558 */
@@ -946,6 +981,8 @@ static int __init ps3_register_devices(void)
946 981
947 ps3_register_lpm_devices(); 982 ps3_register_lpm_devices();
948 983
984 ps3_register_ramdisk_device();
985
949 pr_debug(" <- %s:%d\n", __func__, __LINE__); 986 pr_debug(" <- %s:%d\n", __func__, __LINE__);
950 return 0; 987 return 0;
951} 988}
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 5afce115ab1f..b33b28a6fe12 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_FSL_PCI) += fsl_pci.o $(fsl-msi-obj-y)
17obj-$(CONFIG_FSL_LBC) += fsl_lbc.o 17obj-$(CONFIG_FSL_LBC) += fsl_lbc.o
18obj-$(CONFIG_FSL_GTM) += fsl_gtm.o 18obj-$(CONFIG_FSL_GTM) += fsl_gtm.o
19obj-$(CONFIG_MPC8xxx_GPIO) += mpc8xxx_gpio.o 19obj-$(CONFIG_MPC8xxx_GPIO) += mpc8xxx_gpio.o
20obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o
20obj-$(CONFIG_RAPIDIO) += fsl_rio.o 21obj-$(CONFIG_RAPIDIO) += fsl_rio.o
21obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o 22obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
22obj-$(CONFIG_QUICC_ENGINE) += qe_lib/ 23obj-$(CONFIG_QUICC_ENGINE) += qe_lib/
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index d5f9ae0f1b75..f611d0369cc8 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -29,7 +29,8 @@
29 29
30#if defined(CONFIG_PPC_85xx) || defined(CONFIG_PPC_86xx) 30#if defined(CONFIG_PPC_85xx) || defined(CONFIG_PPC_86xx)
31/* atmu setup for fsl pci/pcie controller */ 31/* atmu setup for fsl pci/pcie controller */
32void __init setup_pci_atmu(struct pci_controller *hose, struct resource *rsrc) 32static void __init setup_pci_atmu(struct pci_controller *hose,
33 struct resource *rsrc)
33{ 34{
34 struct ccsr_pci __iomem *pci; 35 struct ccsr_pci __iomem *pci;
35 int i; 36 int i;
@@ -86,7 +87,7 @@ void __init setup_pci_atmu(struct pci_controller *hose, struct resource *rsrc)
86 out_be32(&pci->piw[2].piwar, PIWAR_2G); 87 out_be32(&pci->piw[2].piwar, PIWAR_2G);
87} 88}
88 89
89void __init setup_pci_cmd(struct pci_controller *hose) 90static void __init setup_pci_cmd(struct pci_controller *hose)
90{ 91{
91 u16 cmd; 92 u16 cmd;
92 int cap_x; 93 int cap_x;
@@ -130,7 +131,7 @@ static void __init quirk_fsl_pcie_header(struct pci_dev *dev)
130 return ; 131 return ;
131} 132}
132 133
133int __init fsl_pcie_check_link(struct pci_controller *hose) 134static int __init fsl_pcie_check_link(struct pci_controller *hose)
134{ 135{
135 u32 val; 136 u32 val;
136 early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val); 137 early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index 60f7f227327c..9c744e4285a0 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -5,8 +5,13 @@
5#include <asm/mmu.h> 5#include <asm/mmu.h>
6 6
7extern phys_addr_t get_immrbase(void); 7extern phys_addr_t get_immrbase(void);
8#if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx)
8extern u32 get_brgfreq(void); 9extern u32 get_brgfreq(void);
9extern u32 get_baudrate(void); 10extern u32 get_baudrate(void);
11#else
12static inline u32 get_brgfreq(void) { return -1; }
13static inline u32 get_baudrate(void) { return -1; }
14#endif
10extern u32 fsl_get_sys_freq(void); 15extern u32 fsl_get_sys_freq(void);
11 16
12struct spi_board_info; 17struct spi_board_info;
diff --git a/arch/powerpc/sysdev/qe_lib/Kconfig b/arch/powerpc/sysdev/qe_lib/Kconfig
index 76ffbc48d4b9..41ac3dfac98e 100644
--- a/arch/powerpc/sysdev/qe_lib/Kconfig
+++ b/arch/powerpc/sysdev/qe_lib/Kconfig
@@ -22,5 +22,6 @@ config UCC
22 22
23config QE_USB 23config QE_USB
24 bool 24 bool
25 default y if USB_GADGET_FSL_QE
25 help 26 help
26 QE USB Host Controller support 27 QE USB Controller support
diff --git a/arch/powerpc/sysdev/qe_lib/gpio.c b/arch/powerpc/sysdev/qe_lib/gpio.c
index 8e5a0bc36d0b..3485288dce31 100644
--- a/arch/powerpc/sysdev/qe_lib/gpio.c
+++ b/arch/powerpc/sysdev/qe_lib/gpio.c
@@ -14,6 +14,7 @@
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/spinlock.h> 16#include <linux/spinlock.h>
17#include <linux/err.h>
17#include <linux/io.h> 18#include <linux/io.h>
18#include <linux/of.h> 19#include <linux/of.h>
19#include <linux/of_gpio.h> 20#include <linux/of_gpio.h>
@@ -24,8 +25,14 @@ struct qe_gpio_chip {
24 struct of_mm_gpio_chip mm_gc; 25 struct of_mm_gpio_chip mm_gc;
25 spinlock_t lock; 26 spinlock_t lock;
26 27
28 unsigned long pin_flags[QE_PIO_PINS];
29#define QE_PIN_REQUESTED 0
30
27 /* shadowed data register to clear/set bits safely */ 31 /* shadowed data register to clear/set bits safely */
28 u32 cpdata; 32 u32 cpdata;
33
34 /* saved_regs used to restore dedicated functions */
35 struct qe_pio_regs saved_regs;
29}; 36};
30 37
31static inline struct qe_gpio_chip * 38static inline struct qe_gpio_chip *
@@ -40,6 +47,12 @@ static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
40 struct qe_pio_regs __iomem *regs = mm_gc->regs; 47 struct qe_pio_regs __iomem *regs = mm_gc->regs;
41 48
42 qe_gc->cpdata = in_be32(&regs->cpdata); 49 qe_gc->cpdata = in_be32(&regs->cpdata);
50 qe_gc->saved_regs.cpdata = qe_gc->cpdata;
51 qe_gc->saved_regs.cpdir1 = in_be32(&regs->cpdir1);
52 qe_gc->saved_regs.cpdir2 = in_be32(&regs->cpdir2);
53 qe_gc->saved_regs.cppar1 = in_be32(&regs->cppar1);
54 qe_gc->saved_regs.cppar2 = in_be32(&regs->cppar2);
55 qe_gc->saved_regs.cpodr = in_be32(&regs->cpodr);
43} 56}
44 57
45static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio) 58static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
@@ -103,6 +116,188 @@ static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
103 return 0; 116 return 0;
104} 117}
105 118
119struct qe_pin {
120 /*
121 * The qe_gpio_chip name is unfortunate, we should change that to
122 * something like qe_pio_controller. Someday.
123 */
124 struct qe_gpio_chip *controller;
125 int num;
126};
127
128/**
129 * qe_pin_request - Request a QE pin
130 * @np: device node to get a pin from
131 * @index: index of a pin in the device tree
132 * Context: non-atomic
133 *
134 * This function return qe_pin so that you could use it with the rest of
135 * the QE Pin Multiplexing API.
136 */
137struct qe_pin *qe_pin_request(struct device_node *np, int index)
138{
139 struct qe_pin *qe_pin;
140 struct device_node *gc;
141 struct of_gpio_chip *of_gc = NULL;
142 struct of_mm_gpio_chip *mm_gc;
143 struct qe_gpio_chip *qe_gc;
144 int err;
145 int size;
146 const void *gpio_spec;
147 const u32 *gpio_cells;
148 unsigned long flags;
149
150 qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL);
151 if (!qe_pin) {
152 pr_debug("%s: can't allocate memory\n", __func__);
153 return ERR_PTR(-ENOMEM);
154 }
155
156 err = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index,
157 &gc, &gpio_spec);
158 if (err) {
159 pr_debug("%s: can't parse gpios property\n", __func__);
160 goto err0;
161 }
162
163 if (!of_device_is_compatible(gc, "fsl,mpc8323-qe-pario-bank")) {
164 pr_debug("%s: tried to get a non-qe pin\n", __func__);
165 err = -EINVAL;
166 goto err1;
167 }
168
169 of_gc = gc->data;
170 if (!of_gc) {
171 pr_debug("%s: gpio controller %s isn't registered\n",
172 np->full_name, gc->full_name);
173 err = -ENODEV;
174 goto err1;
175 }
176
177 gpio_cells = of_get_property(gc, "#gpio-cells", &size);
178 if (!gpio_cells || size != sizeof(*gpio_cells) ||
179 *gpio_cells != of_gc->gpio_cells) {
180 pr_debug("%s: wrong #gpio-cells for %s\n",
181 np->full_name, gc->full_name);
182 err = -EINVAL;
183 goto err1;
184 }
185
186 err = of_gc->xlate(of_gc, np, gpio_spec, NULL);
187 if (err < 0)
188 goto err1;
189
190 mm_gc = to_of_mm_gpio_chip(&of_gc->gc);
191 qe_gc = to_qe_gpio_chip(mm_gc);
192
193 spin_lock_irqsave(&qe_gc->lock, flags);
194
195 if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) {
196 qe_pin->controller = qe_gc;
197 qe_pin->num = err;
198 err = 0;
199 } else {
200 err = -EBUSY;
201 }
202
203 spin_unlock_irqrestore(&qe_gc->lock, flags);
204
205 if (!err)
206 return qe_pin;
207err1:
208 of_node_put(gc);
209err0:
210 kfree(qe_pin);
211 pr_debug("%s failed with status %d\n", __func__, err);
212 return ERR_PTR(err);
213}
214EXPORT_SYMBOL(qe_pin_request);
215
216/**
217 * qe_pin_free - Free a pin
218 * @qe_pin: pointer to the qe_pin structure
219 * Context: any
220 *
221 * This function frees the qe_pin structure and makes a pin available
222 * for further qe_pin_request() calls.
223 */
224void qe_pin_free(struct qe_pin *qe_pin)
225{
226 struct qe_gpio_chip *qe_gc = qe_pin->controller;
227 unsigned long flags;
228 const int pin = qe_pin->num;
229
230 spin_lock_irqsave(&qe_gc->lock, flags);
231 test_and_clear_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[pin]);
232 spin_unlock_irqrestore(&qe_gc->lock, flags);
233
234 kfree(qe_pin);
235}
236EXPORT_SYMBOL(qe_pin_free);
237
238/**
239 * qe_pin_set_dedicated - Revert a pin to a dedicated peripheral function mode
240 * @qe_pin: pointer to the qe_pin structure
241 * Context: any
242 *
243 * This function resets a pin to a dedicated peripheral function that
244 * has been set up by the firmware.
245 */
246void qe_pin_set_dedicated(struct qe_pin *qe_pin)
247{
248 struct qe_gpio_chip *qe_gc = qe_pin->controller;
249 struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
250 struct qe_pio_regs *sregs = &qe_gc->saved_regs;
251 int pin = qe_pin->num;
252 u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1));
253 u32 mask2 = 0x3 << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2);
254 bool second_reg = pin > (QE_PIO_PINS / 2) - 1;
255 unsigned long flags;
256
257 spin_lock_irqsave(&qe_gc->lock, flags);
258
259 if (second_reg) {
260 clrsetbits_be32(&regs->cpdir2, mask2, sregs->cpdir2 & mask2);
261 clrsetbits_be32(&regs->cppar2, mask2, sregs->cppar2 & mask2);
262 } else {
263 clrsetbits_be32(&regs->cpdir1, mask2, sregs->cpdir1 & mask2);
264 clrsetbits_be32(&regs->cppar1, mask2, sregs->cppar1 & mask2);
265 }
266
267 if (sregs->cpdata & mask1)
268 qe_gc->cpdata |= mask1;
269 else
270 qe_gc->cpdata &= ~mask1;
271
272 out_be32(&regs->cpdata, qe_gc->cpdata);
273 clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
274
275 spin_unlock_irqrestore(&qe_gc->lock, flags);
276}
277EXPORT_SYMBOL(qe_pin_set_dedicated);
278
279/**
280 * qe_pin_set_gpio - Set a pin to the GPIO mode
281 * @qe_pin: pointer to the qe_pin structure
282 * Context: any
283 *
284 * This function sets a pin to the GPIO mode.
285 */
286void qe_pin_set_gpio(struct qe_pin *qe_pin)
287{
288 struct qe_gpio_chip *qe_gc = qe_pin->controller;
289 struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
290 unsigned long flags;
291
292 spin_lock_irqsave(&qe_gc->lock, flags);
293
294 /* Let's make it input by default, GPIO API is able to change that. */
295 __par_io_config_pin(regs, qe_pin->num, QE_PIO_DIR_IN, 0, 0, 0);
296
297 spin_unlock_irqrestore(&qe_gc->lock, flags);
298}
299EXPORT_SYMBOL(qe_pin_set_gpio);
300
106static int __init qe_add_gpiochips(void) 301static int __init qe_add_gpiochips(void)
107{ 302{
108 struct device_node *np; 303 struct device_node *np;
diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c
new file mode 100644
index 000000000000..43c4569e24b7
--- /dev/null
+++ b/arch/powerpc/sysdev/simple_gpio.c
@@ -0,0 +1,155 @@
1/*
2 * Simple Memory-Mapped GPIOs
3 *
4 * Copyright (c) MontaVista Software, Inc. 2008.
5 *
6 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/spinlock.h>
18#include <linux/types.h>
19#include <linux/ioport.h>
20#include <linux/io.h>
21#include <linux/of.h>
22#include <linux/of_gpio.h>
23#include <linux/gpio.h>
24#include <asm/prom.h>
25#include "simple_gpio.h"
26
27struct u8_gpio_chip {
28 struct of_mm_gpio_chip mm_gc;
29 spinlock_t lock;
30
31 /* shadowed data register to clear/set bits safely */
32 u8 data;
33};
34
35static struct u8_gpio_chip *to_u8_gpio_chip(struct of_mm_gpio_chip *mm_gc)
36{
37 return container_of(mm_gc, struct u8_gpio_chip, mm_gc);
38}
39
40static u8 u8_pin2mask(unsigned int pin)
41{
42 return 1 << (8 - 1 - pin);
43}
44
45static int u8_gpio_get(struct gpio_chip *gc, unsigned int gpio)
46{
47 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
48
49 return in_8(mm_gc->regs) & u8_pin2mask(gpio);
50}
51
52static void u8_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
53{
54 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
55 struct u8_gpio_chip *u8_gc = to_u8_gpio_chip(mm_gc);
56 unsigned long flags;
57
58 spin_lock_irqsave(&u8_gc->lock, flags);
59
60 if (val)
61 u8_gc->data |= u8_pin2mask(gpio);
62 else
63 u8_gc->data &= ~u8_pin2mask(gpio);
64
65 out_8(mm_gc->regs, u8_gc->data);
66
67 spin_unlock_irqrestore(&u8_gc->lock, flags);
68}
69
70static int u8_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
71{
72 return 0;
73}
74
75static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
76{
77 u8_gpio_set(gc, gpio, val);
78 return 0;
79}
80
81static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
82{
83 struct u8_gpio_chip *u8_gc = to_u8_gpio_chip(mm_gc);
84
85 u8_gc->data = in_8(mm_gc->regs);
86}
87
88static int __init u8_simple_gpiochip_add(struct device_node *np)
89{
90 int ret;
91 struct u8_gpio_chip *u8_gc;
92 struct of_mm_gpio_chip *mm_gc;
93 struct of_gpio_chip *of_gc;
94 struct gpio_chip *gc;
95
96 u8_gc = kzalloc(sizeof(*u8_gc), GFP_KERNEL);
97 if (!u8_gc)
98 return -ENOMEM;
99
100 spin_lock_init(&u8_gc->lock);
101
102 mm_gc = &u8_gc->mm_gc;
103 of_gc = &mm_gc->of_gc;
104 gc = &of_gc->gc;
105
106 mm_gc->save_regs = u8_gpio_save_regs;
107 of_gc->gpio_cells = 2;
108 gc->ngpio = 8;
109 gc->direction_input = u8_gpio_dir_in;
110 gc->direction_output = u8_gpio_dir_out;
111 gc->get = u8_gpio_get;
112 gc->set = u8_gpio_set;
113
114 ret = of_mm_gpiochip_add(np, mm_gc);
115 if (ret)
116 goto err;
117 return 0;
118err:
119 kfree(u8_gc);
120 return ret;
121}
122
123void __init simple_gpiochip_init(const char *compatible)
124{
125 struct device_node *np;
126
127 for_each_compatible_node(np, NULL, compatible) {
128 int ret;
129 struct resource r;
130
131 ret = of_address_to_resource(np, 0, &r);
132 if (ret)
133 goto err;
134
135 switch (resource_size(&r)) {
136 case 1:
137 ret = u8_simple_gpiochip_add(np);
138 if (ret)
139 goto err;
140 break;
141 default:
142 /*
143 * Whenever you need support for GPIO bank width > 1,
144 * please just turn u8_ code into huge macros, and
145 * construct needed uX_ code with it.
146 */
147 ret = -ENOSYS;
148 goto err;
149 }
150 continue;
151err:
152 pr_err("%s: registration failed, status %d\n",
153 np->full_name, ret);
154 }
155}
diff --git a/arch/powerpc/sysdev/simple_gpio.h b/arch/powerpc/sysdev/simple_gpio.h
new file mode 100644
index 000000000000..3a7b0c513c76
--- /dev/null
+++ b/arch/powerpc/sysdev/simple_gpio.h
@@ -0,0 +1,12 @@
1#ifndef __SYSDEV_SIMPLE_GPIO_H
2#define __SYSDEV_SIMPLE_GPIO_H
3
4#include <linux/errno.h>
5
6#ifdef CONFIG_SIMPLE_GPIO
7extern void simple_gpiochip_init(const char *compatible);
8#else
9static inline void simple_gpiochip_init(const char *compatible) {}
10#endif /* CONFIG_SIMPLE_GPIO */
11
12#endif /* __SYSDEV_SIMPLE_GPIO_H */
diff --git a/arch/s390/include/asm/chpid.h b/arch/s390/include/asm/chpid.h
index dfe3c7f3439a..fc71d8a6709b 100644
--- a/arch/s390/include/asm/chpid.h
+++ b/arch/s390/include/asm/chpid.h
@@ -9,7 +9,7 @@
9#define _ASM_S390_CHPID_H _ASM_S390_CHPID_H 9#define _ASM_S390_CHPID_H _ASM_S390_CHPID_H
10 10
11#include <linux/string.h> 11#include <linux/string.h>
12#include <asm/types.h> 12#include <linux/types.h>
13 13
14#define __MAX_CHPID 255 14#define __MAX_CHPID 255
15 15
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h
index d38d0cf62d4b..807997f7414b 100644
--- a/arch/s390/include/asm/chsc.h
+++ b/arch/s390/include/asm/chsc.h
@@ -8,6 +8,7 @@
8#ifndef _ASM_CHSC_H 8#ifndef _ASM_CHSC_H
9#define _ASM_CHSC_H 9#define _ASM_CHSC_H
10 10
11#include <linux/types.h>
11#include <asm/chpid.h> 12#include <asm/chpid.h>
12#include <asm/schid.h> 13#include <asm/schid.h>
13 14
diff --git a/arch/s390/include/asm/cmb.h b/arch/s390/include/asm/cmb.h
index 50196857d27a..39ae03294794 100644
--- a/arch/s390/include/asm/cmb.h
+++ b/arch/s390/include/asm/cmb.h
@@ -1,5 +1,8 @@
1#ifndef S390_CMB_H 1#ifndef S390_CMB_H
2#define S390_CMB_H 2#define S390_CMB_H
3
4#include <linux/types.h>
5
3/** 6/**
4 * struct cmbdata - channel measurement block data for user space 7 * struct cmbdata - channel measurement block data for user space
5 * @size: size of the stored data 8 * @size: size of the stored data
diff --git a/arch/s390/include/asm/dasd.h b/arch/s390/include/asm/dasd.h
index 55b2b80cdf6e..e2db6f16d9c8 100644
--- a/arch/s390/include/asm/dasd.h
+++ b/arch/s390/include/asm/dasd.h
@@ -14,6 +14,7 @@
14 14
15#ifndef DASD_H 15#ifndef DASD_H
16#define DASD_H 16#define DASD_H
17#include <linux/types.h>
17#include <linux/ioctl.h> 18#include <linux/ioctl.h>
18 19
19#define DASD_IOCTL_LETTER 'D' 20#define DASD_IOCTL_LETTER 'D'
@@ -78,6 +79,7 @@ typedef struct dasd_information2_t {
78#define DASD_FEATURE_USEDIAG 0x02 79#define DASD_FEATURE_USEDIAG 0x02
79#define DASD_FEATURE_INITIAL_ONLINE 0x04 80#define DASD_FEATURE_INITIAL_ONLINE 0x04
80#define DASD_FEATURE_ERPLOG 0x08 81#define DASD_FEATURE_ERPLOG 0x08
82#define DASD_FEATURE_FAILFAST 0x10
81 83
82#define DASD_PARTN_BITS 2 84#define DASD_PARTN_BITS 2
83 85
diff --git a/arch/s390/include/asm/kvm.h b/arch/s390/include/asm/kvm.h
index d74002f95794..e1f54654e3ae 100644
--- a/arch/s390/include/asm/kvm.h
+++ b/arch/s390/include/asm/kvm.h
@@ -13,7 +13,7 @@
13 * Author(s): Carsten Otte <cotte@de.ibm.com> 13 * Author(s): Carsten Otte <cotte@de.ibm.com>
14 * Christian Borntraeger <borntraeger@de.ibm.com> 14 * Christian Borntraeger <borntraeger@de.ibm.com>
15 */ 15 */
16#include <asm/types.h> 16#include <linux/types.h>
17 17
18/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */ 18/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
19struct kvm_pic_state { 19struct kvm_pic_state {
diff --git a/arch/s390/include/asm/posix_types.h b/arch/s390/include/asm/posix_types.h
index 397d93fba3a7..8cc113f92523 100644
--- a/arch/s390/include/asm/posix_types.h
+++ b/arch/s390/include/asm/posix_types.h
@@ -68,11 +68,7 @@ typedef unsigned short __kernel_old_dev_t;
68#endif /* __s390x__ */ 68#endif /* __s390x__ */
69 69
70typedef struct { 70typedef struct {
71#if defined(__KERNEL__) || defined(__USE_ALL)
72 int val[2]; 71 int val[2];
73#else /* !defined(__KERNEL__) && !defined(__USE_ALL)*/
74 int __val[2];
75#endif /* !defined(__KERNEL__) && !defined(__USE_ALL)*/
76} __kernel_fsid_t; 72} __kernel_fsid_t;
77 73
78 74
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 5396f9f12263..8920025c3c02 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -272,12 +272,15 @@ typedef struct
272#define PSW_ASC_SECONDARY 0x0000800000000000UL 272#define PSW_ASC_SECONDARY 0x0000800000000000UL
273#define PSW_ASC_HOME 0x0000C00000000000UL 273#define PSW_ASC_HOME 0x0000C00000000000UL
274 274
275extern long psw_user32_bits;
276
277#endif /* __s390x__ */ 275#endif /* __s390x__ */
278 276
277#ifdef __KERNEL__
279extern long psw_kernel_bits; 278extern long psw_kernel_bits;
280extern long psw_user_bits; 279extern long psw_user_bits;
280#ifdef CONFIG_64BIT
281extern long psw_user32_bits;
282#endif
283#endif
281 284
282/* This macro merges a NEW PSW mask specified by the user into 285/* This macro merges a NEW PSW mask specified by the user into
283 the currently active PSW mask CURRENT, modifying only those 286 the currently active PSW mask CURRENT, modifying only those
diff --git a/arch/s390/include/asm/qeth.h b/arch/s390/include/asm/qeth.h
index 930d378ef75a..06cbd1e8c943 100644
--- a/arch/s390/include/asm/qeth.h
+++ b/arch/s390/include/asm/qeth.h
@@ -10,6 +10,7 @@
10 */ 10 */
11#ifndef __ASM_S390_QETH_IOCTL_H__ 11#ifndef __ASM_S390_QETH_IOCTL_H__
12#define __ASM_S390_QETH_IOCTL_H__ 12#define __ASM_S390_QETH_IOCTL_H__
13#include <linux/types.h>
13#include <linux/ioctl.h> 14#include <linux/ioctl.h>
14 15
15#define SIOC_QETH_ARP_SET_NO_ENTRIES (SIOCDEVPRIVATE) 16#define SIOC_QETH_ARP_SET_NO_ENTRIES (SIOCDEVPRIVATE)
diff --git a/arch/s390/include/asm/schid.h b/arch/s390/include/asm/schid.h
index 825503cf3dc2..3e4d401b4e45 100644
--- a/arch/s390/include/asm/schid.h
+++ b/arch/s390/include/asm/schid.h
@@ -1,6 +1,8 @@
1#ifndef ASM_SCHID_H 1#ifndef ASM_SCHID_H
2#define ASM_SCHID_H 2#define ASM_SCHID_H
3 3
4#include <linux/types.h>
5
4struct subchannel_id { 6struct subchannel_id {
5 __u32 cssid : 8; 7 __u32 cssid : 8;
6 __u32 : 4; 8 __u32 : 4;
diff --git a/arch/s390/include/asm/swab.h b/arch/s390/include/asm/swab.h
index bd9321aa55a9..eb18dc1f327b 100644
--- a/arch/s390/include/asm/swab.h
+++ b/arch/s390/include/asm/swab.h
@@ -9,7 +9,7 @@
9 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 9 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
10 */ 10 */
11 11
12#include <asm/types.h> 12#include <linux/types.h>
13 13
14#ifndef __s390x__ 14#ifndef __s390x__
15# define __SWAB_64_THRU_32__ 15# define __SWAB_64_THRU_32__
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index 41c547656130..3dc3fc228812 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -9,11 +9,7 @@
9#ifndef _S390_TYPES_H 9#ifndef _S390_TYPES_H
10#define _S390_TYPES_H 10#define _S390_TYPES_H
11 11
12#ifndef __s390x__ 12#include <asm-generic/int-ll64.h>
13# include <asm-generic/int-ll64.h>
14#else
15# include <asm-generic/int-l64.h>
16#endif
17 13
18#ifndef __ASSEMBLY__ 14#ifndef __ASSEMBLY__
19 15
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 6b1896345eda..a65afc91e8aa 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -54,7 +54,5 @@ long sys_sigreturn(void);
54long sys_rt_sigreturn(void); 54long sys_rt_sigreturn(void);
55long sys32_sigreturn(void); 55long sys32_sigreturn(void);
56long sys32_rt_sigreturn(void); 56long sys32_rt_sigreturn(void);
57long old_select(struct sel_arg_struct __user *arg);
58long sys_ptrace(long request, long pid, long addr, long data);
59 57
60#endif /* _ENTRY_H */ 58#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 9c0ccb532a45..2d337cbb9329 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -685,7 +685,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
685 if (MACHINE_HAS_IEEE) 685 if (MACHINE_HAS_IEEE)
686 lowcore->extended_save_area_addr = (u32) save_area; 686 lowcore->extended_save_area_addr = (u32) save_area;
687#else 687#else
688 BUG_ON(vdso_alloc_per_cpu(smp_processor_id(), lowcore)); 688 if (vdso_alloc_per_cpu(smp_processor_id(), lowcore))
689 BUG();
689#endif 690#endif
690 set_prefix((u32)(unsigned long) lowcore); 691 set_prefix((u32)(unsigned long) lowcore);
691 local_mcck_enable(); 692 local_mcck_enable();
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 4fe952e557ac..c34be4568b80 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -103,25 +103,6 @@ out:
103 return error; 103 return error;
104} 104}
105 105
106#ifndef CONFIG_64BIT
107struct sel_arg_struct {
108 unsigned long n;
109 fd_set __user *inp, *outp, *exp;
110 struct timeval __user *tvp;
111};
112
113asmlinkage long old_select(struct sel_arg_struct __user *arg)
114{
115 struct sel_arg_struct a;
116
117 if (copy_from_user(&a, arg, sizeof(a)))
118 return -EFAULT;
119 /* sys_select() does the appropriate kernel locking */
120 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
121
122}
123#endif /* CONFIG_64BIT */
124
125/* 106/*
126 * sys_ipc() is the de-multiplexer for the SysV IPC calls.. 107 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
127 * 108 *
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 25a6a82f1c02..690e17819686 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -322,7 +322,8 @@ static int __init vdso_init(void)
322 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); 322 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
323 vdso64_pagelist[vdso64_pages] = NULL; 323 vdso64_pagelist[vdso64_pages] = NULL;
324#ifndef CONFIG_SMP 324#ifndef CONFIG_SMP
325 BUG_ON(vdso_alloc_per_cpu(0, S390_lowcore)); 325 if (vdso_alloc_per_cpu(0, &S390_lowcore))
326 BUG();
326#endif 327#endif
327 vdso_init_cr5(); 328 vdso_init_cr5();
328#endif /* CONFIG_64BIT */ 329#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index c32f29c3d70c..ad8acfc949fb 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -13,10 +13,6 @@
13#include <asm/asm-offsets.h> 13#include <asm/asm-offsets.h>
14#include <asm/unistd.h> 14#include <asm/unistd.h>
15 15
16#include <asm/vdso.h>
17#include <asm/asm-offsets.h>
18#include <asm/unistd.h>
19
20 .text 16 .text
21 .align 4 17 .align 4
22 .globl __kernel_gettimeofday 18 .globl __kernel_gettimeofday
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index a0775e1f08df..8300309698fa 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -47,7 +47,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
47 vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; 47 vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
48 vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; 48 vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
49 vcpu->run->exit_reason = KVM_EXIT_S390_RESET; 49 vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
50 VCPU_EVENT(vcpu, 3, "requesting userspace resets %lx", 50 VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
51 vcpu->run->s390_reset_flags); 51 vcpu->run->s390_reset_flags);
52 return -EREMOTE; 52 return -EREMOTE;
53} 53}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 2960702b4824..f4fe28a2521a 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -160,7 +160,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
160 break; 160 break;
161 161
162 case KVM_S390_INT_VIRTIO: 162 case KVM_S390_INT_VIRTIO:
163 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%lx", 163 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
164 inti->ext.ext_params, inti->ext.ext_params2); 164 inti->ext.ext_params, inti->ext.ext_params2);
165 vcpu->stat.deliver_virtio_interrupt++; 165 vcpu->stat.deliver_virtio_interrupt++;
166 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603); 166 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
@@ -360,7 +360,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
360 vcpu->arch.ckc_timer.expires = jiffies + sltime; 360 vcpu->arch.ckc_timer.expires = jiffies + sltime;
361 361
362 add_timer(&vcpu->arch.ckc_timer); 362 add_timer(&vcpu->arch.ckc_timer);
363 VCPU_EVENT(vcpu, 5, "enabled wait timer:%lx jiffies", sltime); 363 VCPU_EVENT(vcpu, 5, "enabled wait timer:%llx jiffies", sltime);
364no_timer: 364no_timer:
365 spin_lock_bh(&vcpu->arch.local_int.float_int->lock); 365 spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
366 spin_lock_bh(&vcpu->arch.local_int.lock); 366 spin_lock_bh(&vcpu->arch.local_int.lock);
@@ -491,7 +491,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
491 491
492 switch (s390int->type) { 492 switch (s390int->type) {
493 case KVM_S390_INT_VIRTIO: 493 case KVM_S390_INT_VIRTIO:
494 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%lx", 494 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
495 s390int->parm, s390int->parm64); 495 s390int->parm, s390int->parm64);
496 inti->type = s390int->type; 496 inti->type = s390int->type;
497 inti->ext.ext_params = s390int->parm; 497 inti->ext.ext_params = s390int->parm;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index cce40ff2913b..3605df45dd41 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -118,7 +118,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
118 goto out; 118 goto out;
119 } 119 }
120 120
121 VCPU_EVENT(vcpu, 5, "storing cpu address to %lx", useraddr); 121 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
122out: 122out:
123 return 0; 123 return 0;
124} 124}
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 4f8d60586b07..8040376c4890 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -54,7 +54,8 @@ extern int __smp4m_processor_id(void);
54#define SMP_PRINTK(x) 54#define SMP_PRINTK(x)
55#endif 55#endif
56 56
57static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val) 57static inline unsigned long
58swap_ulong(volatile unsigned long *ptr, unsigned long val)
58{ 59{
59 __asm__ __volatile__("swap [%1], %0\n\t" : 60 __asm__ __volatile__("swap [%1], %0\n\t" :
60 "=&r" (val), "=&r" (ptr) : 61 "=&r" (val), "=&r" (ptr) :
@@ -90,7 +91,7 @@ void __cpuinit smp4m_callin(void)
90 * to call the scheduler code. 91 * to call the scheduler code.
91 */ 92 */
92 /* Allow master to continue. */ 93 /* Allow master to continue. */
93 swap(&cpu_callin_map[cpuid], 1); 94 swap_ulong(&cpu_callin_map[cpuid], 1);
94 95
95 /* XXX: What's up with all the flushes? */ 96 /* XXX: What's up with all the flushes? */
96 local_flush_cache_all(); 97 local_flush_cache_all();
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 9fa9dcdf344b..e02a359d2aa5 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -300,7 +300,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
300 return oldbit; 300 return oldbit;
301} 301}
302 302
303static inline int constant_test_bit(int nr, const volatile unsigned long *addr) 303static inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
304{ 304{
305 return ((1UL << (nr % BITS_PER_LONG)) & 305 return ((1UL << (nr % BITS_PER_LONG)) &
306 (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; 306 (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 29dc0c89d4af..d37593c2f438 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -47,7 +47,7 @@
47#endif 47#endif
48 48
49static int __initdata acpi_force = 0; 49static int __initdata acpi_force = 0;
50 50u32 acpi_rsdt_forced;
51#ifdef CONFIG_ACPI 51#ifdef CONFIG_ACPI
52int acpi_disabled = 0; 52int acpi_disabled = 0;
53#else 53#else
@@ -1374,6 +1374,17 @@ static void __init acpi_process_madt(void)
1374 "Invalid BIOS MADT, disabling ACPI\n"); 1374 "Invalid BIOS MADT, disabling ACPI\n");
1375 disable_acpi(); 1375 disable_acpi();
1376 } 1376 }
1377 } else {
1378 /*
1379 * ACPI found no MADT, and so ACPI wants UP PIC mode.
1380 * In the event an MPS table was found, forget it.
1381 * Boot with "acpi=off" to use MPS on such a system.
1382 */
1383 if (smp_found_config) {
1384 printk(KERN_WARNING PREFIX
1385 "No APIC-table, disabling MPS\n");
1386 smp_found_config = 0;
1387 }
1377 } 1388 }
1378 1389
1379 /* 1390 /*
@@ -1809,6 +1820,10 @@ static int __init parse_acpi(char *arg)
1809 disable_acpi(); 1820 disable_acpi();
1810 acpi_ht = 1; 1821 acpi_ht = 1;
1811 } 1822 }
1823 /* acpi=rsdt use RSDT instead of XSDT */
1824 else if (strcmp(arg, "rsdt") == 0) {
1825 acpi_rsdt_forced = 1;
1826 }
1812 /* "acpi=noirq" disables ACPI interrupt routing */ 1827 /* "acpi=noirq" disables ACPI interrupt routing */
1813 else if (strcmp(arg, "noirq") == 0) { 1828 else if (strcmp(arg, "noirq") == 0) {
1814 acpi_noirq_set(); 1829 acpi_noirq_set();
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index c2502eb9aa83..a4805b3b4095 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -56,6 +56,7 @@ static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */
56static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; 56static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
57 57
58#define MWAIT_SUBSTATE_MASK (0xf) 58#define MWAIT_SUBSTATE_MASK (0xf)
59#define MWAIT_CSTATE_MASK (0xf)
59#define MWAIT_SUBSTATE_SIZE (4) 60#define MWAIT_SUBSTATE_SIZE (4)
60 61
61#define CPUID_MWAIT_LEAF (5) 62#define CPUID_MWAIT_LEAF (5)
@@ -98,7 +99,8 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
98 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); 99 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
99 100
100 /* Check whether this particular cx_type (in CST) is supported or not */ 101 /* Check whether this particular cx_type (in CST) is supported or not */
101 cstate_type = (cx->address >> MWAIT_SUBSTATE_SIZE) + 1; 102 cstate_type = ((cx->address >> MWAIT_SUBSTATE_SIZE) &
103 MWAIT_CSTATE_MASK) + 1;
102 edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE); 104 edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
103 num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK; 105 num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
104 106
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 806b4e9051b4..707c1f6f95fa 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -159,6 +159,8 @@ static int __init acpi_sleep_setup(char *str)
159#endif 159#endif
160 if (strncmp(str, "old_ordering", 12) == 0) 160 if (strncmp(str, "old_ordering", 12) == 0)
161 acpi_old_suspend_ordering(); 161 acpi_old_suspend_ordering();
162 if (strncmp(str, "s4_nonvs", 8) == 0)
163 acpi_s4_no_nvs();
162 str = strchr(str, ','); 164 str = strchr(str, ',');
163 if (str != NULL) 165 if (str != NULL)
164 str += strspn(str, ", \t"); 166 str += strspn(str, ", \t");
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c
index b0461856acfb..a4cff5d6e380 100644
--- a/arch/x86/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c
@@ -982,7 +982,7 @@ static int __init longhaul_init(void)
982 case 10: 982 case 10:
983 printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n"); 983 printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n");
984 default: 984 default:
985 ;; 985 ;
986 } 986 }
987 987
988 return -ENODEV; 988 return -ENODEV;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 65a13943e098..e85826829cf2 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -665,6 +665,27 @@ void __init e820_mark_nosave_regions(unsigned long limit_pfn)
665} 665}
666#endif 666#endif
667 667
668#ifdef CONFIG_HIBERNATION
669/**
670 * Mark ACPI NVS memory region, so that we can save/restore it during
671 * hibernation and the subsequent resume.
672 */
673static int __init e820_mark_nvs_memory(void)
674{
675 int i;
676
677 for (i = 0; i < e820.nr_map; i++) {
678 struct e820entry *ei = &e820.map[i];
679
680 if (ei->type == E820_NVS)
681 hibernate_nvs_register(ei->addr, ei->size);
682 }
683
684 return 0;
685}
686core_initcall(e820_mark_nvs_memory);
687#endif
688
668/* 689/*
669 * Early reserved memory areas. 690 * Early reserved memory areas.
670 */ 691 */
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 744aa7fc49d5..76b8cd953dee 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -201,6 +201,12 @@ struct chipset {
201 void (*f)(int num, int slot, int func); 201 void (*f)(int num, int slot, int func);
202}; 202};
203 203
204/*
205 * Only works for devices on the root bus. If you add any devices
206 * not on bus 0 readd another loop level in early_quirks(). But
207 * be careful because at least the Nvidia quirk here relies on
208 * only matching on bus 0.
209 */
204static struct chipset early_qrk[] __initdata = { 210static struct chipset early_qrk[] __initdata = {
205 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, 211 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
206 PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs }, 212 PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs },
@@ -267,17 +273,17 @@ static int __init check_dev_quirk(int num, int slot, int func)
267 273
268void __init early_quirks(void) 274void __init early_quirks(void)
269{ 275{
270 int num, slot, func; 276 int slot, func;
271 277
272 if (!early_pci_allowed()) 278 if (!early_pci_allowed())
273 return; 279 return;
274 280
275 /* Poor man's PCI discovery */ 281 /* Poor man's PCI discovery */
276 for (num = 0; num < 32; num++) 282 /* Only scan the root bus */
277 for (slot = 0; slot < 32; slot++) 283 for (slot = 0; slot < 32; slot++)
278 for (func = 0; func < 8; func++) { 284 for (func = 0; func < 8; func++) {
279 /* Only probe function 0 on single fn devices */ 285 /* Only probe function 0 on single fn devices */
280 if (check_dev_quirk(num, slot, func)) 286 if (check_dev_quirk(0, slot, func))
281 break; 287 break;
282 } 288 }
283} 289}
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 98658f25f542..8fdf06e4edf9 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -2,7 +2,7 @@
2 * @file op_model_amd.c 2 * @file op_model_amd.c
3 * athlon / K7 / K8 / Family 10h model-specific MSR operations 3 * athlon / K7 / K8 / Family 10h model-specific MSR operations
4 * 4 *
5 * @remark Copyright 2002-2008 OProfile authors 5 * @remark Copyright 2002-2009 OProfile authors
6 * @remark Read the file COPYING 6 * @remark Read the file COPYING
7 * 7 *
8 * @author John Levon 8 * @author John Levon
@@ -10,7 +10,7 @@
10 * @author Graydon Hoare 10 * @author Graydon Hoare
11 * @author Robert Richter <robert.richter@amd.com> 11 * @author Robert Richter <robert.richter@amd.com>
12 * @author Barry Kasindorf 12 * @author Barry Kasindorf
13*/ 13 */
14 14
15#include <linux/oprofile.h> 15#include <linux/oprofile.h>
16#include <linux/device.h> 16#include <linux/device.h>
@@ -60,53 +60,10 @@ static unsigned long reset_value[NUM_COUNTERS];
60#define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */ 60#define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */
61#define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */ 61#define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */
62 62
63/* Codes used in cpu_buffer.c */ 63#define IBS_FETCH_SIZE 6
64/* This produces duplicate code, need to be fixed */ 64#define IBS_OP_SIZE 12
65#define IBS_FETCH_BEGIN 3
66#define IBS_OP_BEGIN 4
67
68/*
69 * The function interface needs to be fixed, something like add
70 * data. Should then be added to linux/oprofile.h.
71 */
72extern void
73oprofile_add_ibs_sample(struct pt_regs * const regs,
74 unsigned int * const ibs_sample, int ibs_code);
75
76struct ibs_fetch_sample {
77 /* MSRC001_1031 IBS Fetch Linear Address Register */
78 unsigned int ibs_fetch_lin_addr_low;
79 unsigned int ibs_fetch_lin_addr_high;
80 /* MSRC001_1030 IBS Fetch Control Register */
81 unsigned int ibs_fetch_ctl_low;
82 unsigned int ibs_fetch_ctl_high;
83 /* MSRC001_1032 IBS Fetch Physical Address Register */
84 unsigned int ibs_fetch_phys_addr_low;
85 unsigned int ibs_fetch_phys_addr_high;
86};
87
88struct ibs_op_sample {
89 /* MSRC001_1034 IBS Op Logical Address Register (IbsRIP) */
90 unsigned int ibs_op_rip_low;
91 unsigned int ibs_op_rip_high;
92 /* MSRC001_1035 IBS Op Data Register */
93 unsigned int ibs_op_data1_low;
94 unsigned int ibs_op_data1_high;
95 /* MSRC001_1036 IBS Op Data 2 Register */
96 unsigned int ibs_op_data2_low;
97 unsigned int ibs_op_data2_high;
98 /* MSRC001_1037 IBS Op Data 3 Register */
99 unsigned int ibs_op_data3_low;
100 unsigned int ibs_op_data3_high;
101 /* MSRC001_1038 IBS DC Linear Address Register (IbsDcLinAd) */
102 unsigned int ibs_dc_linear_low;
103 unsigned int ibs_dc_linear_high;
104 /* MSRC001_1039 IBS DC Physical Address Register (IbsDcPhysAd) */
105 unsigned int ibs_dc_phys_low;
106 unsigned int ibs_dc_phys_high;
107};
108 65
109static int ibs_allowed; /* AMD Family10h and later */ 66static int has_ibs; /* AMD Family10h and later */
110 67
111struct op_ibs_config { 68struct op_ibs_config {
112 unsigned long op_enabled; 69 unsigned long op_enabled;
@@ -197,31 +154,29 @@ static inline int
197op_amd_handle_ibs(struct pt_regs * const regs, 154op_amd_handle_ibs(struct pt_regs * const regs,
198 struct op_msrs const * const msrs) 155 struct op_msrs const * const msrs)
199{ 156{
200 unsigned int low, high; 157 u32 low, high;
201 struct ibs_fetch_sample ibs_fetch; 158 u64 msr;
202 struct ibs_op_sample ibs_op; 159 struct op_entry entry;
203 160
204 if (!ibs_allowed) 161 if (!has_ibs)
205 return 1; 162 return 1;
206 163
207 if (ibs_config.fetch_enabled) { 164 if (ibs_config.fetch_enabled) {
208 rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); 165 rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
209 if (high & IBS_FETCH_HIGH_VALID_BIT) { 166 if (high & IBS_FETCH_HIGH_VALID_BIT) {
210 ibs_fetch.ibs_fetch_ctl_high = high; 167 rdmsrl(MSR_AMD64_IBSFETCHLINAD, msr);
211 ibs_fetch.ibs_fetch_ctl_low = low; 168 oprofile_write_reserve(&entry, regs, msr,
212 rdmsr(MSR_AMD64_IBSFETCHLINAD, low, high); 169 IBS_FETCH_CODE, IBS_FETCH_SIZE);
213 ibs_fetch.ibs_fetch_lin_addr_high = high; 170 oprofile_add_data(&entry, (u32)msr);
214 ibs_fetch.ibs_fetch_lin_addr_low = low; 171 oprofile_add_data(&entry, (u32)(msr >> 32));
215 rdmsr(MSR_AMD64_IBSFETCHPHYSAD, low, high); 172 oprofile_add_data(&entry, low);
216 ibs_fetch.ibs_fetch_phys_addr_high = high; 173 oprofile_add_data(&entry, high);
217 ibs_fetch.ibs_fetch_phys_addr_low = low; 174 rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, msr);
218 175 oprofile_add_data(&entry, (u32)msr);
219 oprofile_add_ibs_sample(regs, 176 oprofile_add_data(&entry, (u32)(msr >> 32));
220 (unsigned int *)&ibs_fetch, 177 oprofile_write_commit(&entry);
221 IBS_FETCH_BEGIN);
222 178
223 /* reenable the IRQ */ 179 /* reenable the IRQ */
224 rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
225 high &= ~IBS_FETCH_HIGH_VALID_BIT; 180 high &= ~IBS_FETCH_HIGH_VALID_BIT;
226 high |= IBS_FETCH_HIGH_ENABLE; 181 high |= IBS_FETCH_HIGH_ENABLE;
227 low &= IBS_FETCH_LOW_MAX_CNT_MASK; 182 low &= IBS_FETCH_LOW_MAX_CNT_MASK;
@@ -232,30 +187,29 @@ op_amd_handle_ibs(struct pt_regs * const regs,
232 if (ibs_config.op_enabled) { 187 if (ibs_config.op_enabled) {
233 rdmsr(MSR_AMD64_IBSOPCTL, low, high); 188 rdmsr(MSR_AMD64_IBSOPCTL, low, high);
234 if (low & IBS_OP_LOW_VALID_BIT) { 189 if (low & IBS_OP_LOW_VALID_BIT) {
235 rdmsr(MSR_AMD64_IBSOPRIP, low, high); 190 rdmsrl(MSR_AMD64_IBSOPRIP, msr);
236 ibs_op.ibs_op_rip_low = low; 191 oprofile_write_reserve(&entry, regs, msr,
237 ibs_op.ibs_op_rip_high = high; 192 IBS_OP_CODE, IBS_OP_SIZE);
238 rdmsr(MSR_AMD64_IBSOPDATA, low, high); 193 oprofile_add_data(&entry, (u32)msr);
239 ibs_op.ibs_op_data1_low = low; 194 oprofile_add_data(&entry, (u32)(msr >> 32));
240 ibs_op.ibs_op_data1_high = high; 195 rdmsrl(MSR_AMD64_IBSOPDATA, msr);
241 rdmsr(MSR_AMD64_IBSOPDATA2, low, high); 196 oprofile_add_data(&entry, (u32)msr);
242 ibs_op.ibs_op_data2_low = low; 197 oprofile_add_data(&entry, (u32)(msr >> 32));
243 ibs_op.ibs_op_data2_high = high; 198 rdmsrl(MSR_AMD64_IBSOPDATA2, msr);
244 rdmsr(MSR_AMD64_IBSOPDATA3, low, high); 199 oprofile_add_data(&entry, (u32)msr);
245 ibs_op.ibs_op_data3_low = low; 200 oprofile_add_data(&entry, (u32)(msr >> 32));
246 ibs_op.ibs_op_data3_high = high; 201 rdmsrl(MSR_AMD64_IBSOPDATA3, msr);
247 rdmsr(MSR_AMD64_IBSDCLINAD, low, high); 202 oprofile_add_data(&entry, (u32)msr);
248 ibs_op.ibs_dc_linear_low = low; 203 oprofile_add_data(&entry, (u32)(msr >> 32));
249 ibs_op.ibs_dc_linear_high = high; 204 rdmsrl(MSR_AMD64_IBSDCLINAD, msr);
250 rdmsr(MSR_AMD64_IBSDCPHYSAD, low, high); 205 oprofile_add_data(&entry, (u32)msr);
251 ibs_op.ibs_dc_phys_low = low; 206 oprofile_add_data(&entry, (u32)(msr >> 32));
252 ibs_op.ibs_dc_phys_high = high; 207 rdmsrl(MSR_AMD64_IBSDCPHYSAD, msr);
208 oprofile_add_data(&entry, (u32)msr);
209 oprofile_add_data(&entry, (u32)(msr >> 32));
210 oprofile_write_commit(&entry);
253 211
254 /* reenable the IRQ */ 212 /* reenable the IRQ */
255 oprofile_add_ibs_sample(regs,
256 (unsigned int *)&ibs_op,
257 IBS_OP_BEGIN);
258 rdmsr(MSR_AMD64_IBSOPCTL, low, high);
259 high = 0; 213 high = 0;
260 low &= ~IBS_OP_LOW_VALID_BIT; 214 low &= ~IBS_OP_LOW_VALID_BIT;
261 low |= IBS_OP_LOW_ENABLE; 215 low |= IBS_OP_LOW_ENABLE;
@@ -305,14 +259,14 @@ static void op_amd_start(struct op_msrs const * const msrs)
305 } 259 }
306 260
307#ifdef CONFIG_OPROFILE_IBS 261#ifdef CONFIG_OPROFILE_IBS
308 if (ibs_allowed && ibs_config.fetch_enabled) { 262 if (has_ibs && ibs_config.fetch_enabled) {
309 low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; 263 low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
310 high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */ 264 high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */
311 + IBS_FETCH_HIGH_ENABLE; 265 + IBS_FETCH_HIGH_ENABLE;
312 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); 266 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
313 } 267 }
314 268
315 if (ibs_allowed && ibs_config.op_enabled) { 269 if (has_ibs && ibs_config.op_enabled) {
316 low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) 270 low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF)
317 + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */ 271 + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */
318 + IBS_OP_LOW_ENABLE; 272 + IBS_OP_LOW_ENABLE;
@@ -341,14 +295,14 @@ static void op_amd_stop(struct op_msrs const * const msrs)
341 } 295 }
342 296
343#ifdef CONFIG_OPROFILE_IBS 297#ifdef CONFIG_OPROFILE_IBS
344 if (ibs_allowed && ibs_config.fetch_enabled) { 298 if (has_ibs && ibs_config.fetch_enabled) {
345 /* clear max count and enable */ 299 /* clear max count and enable */
346 low = 0; 300 low = 0;
347 high = 0; 301 high = 0;
348 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); 302 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
349 } 303 }
350 304
351 if (ibs_allowed && ibs_config.op_enabled) { 305 if (has_ibs && ibs_config.op_enabled) {
352 /* clear max count and enable */ 306 /* clear max count and enable */
353 low = 0; 307 low = 0;
354 high = 0; 308 high = 0;
@@ -409,6 +363,7 @@ static int init_ibs_nmi(void)
409 | IBSCTL_LVTOFFSETVAL); 363 | IBSCTL_LVTOFFSETVAL);
410 pci_read_config_dword(cpu_cfg, IBSCTL, &value); 364 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
411 if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) { 365 if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) {
366 pci_dev_put(cpu_cfg);
412 printk(KERN_DEBUG "Failed to setup IBS LVT offset, " 367 printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
413 "IBSCTL = 0x%08x", value); 368 "IBSCTL = 0x%08x", value);
414 return 1; 369 return 1;
@@ -436,20 +391,20 @@ static int init_ibs_nmi(void)
436/* uninitialize the APIC for the IBS interrupts if needed */ 391/* uninitialize the APIC for the IBS interrupts if needed */
437static void clear_ibs_nmi(void) 392static void clear_ibs_nmi(void)
438{ 393{
439 if (ibs_allowed) 394 if (has_ibs)
440 on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1); 395 on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
441} 396}
442 397
443/* initialize the APIC for the IBS interrupts if available */ 398/* initialize the APIC for the IBS interrupts if available */
444static void ibs_init(void) 399static void ibs_init(void)
445{ 400{
446 ibs_allowed = boot_cpu_has(X86_FEATURE_IBS); 401 has_ibs = boot_cpu_has(X86_FEATURE_IBS);
447 402
448 if (!ibs_allowed) 403 if (!has_ibs)
449 return; 404 return;
450 405
451 if (init_ibs_nmi()) { 406 if (init_ibs_nmi()) {
452 ibs_allowed = 0; 407 has_ibs = 0;
453 return; 408 return;
454 } 409 }
455 410
@@ -458,7 +413,7 @@ static void ibs_init(void)
458 413
459static void ibs_exit(void) 414static void ibs_exit(void)
460{ 415{
461 if (!ibs_allowed) 416 if (!has_ibs)
462 return; 417 return;
463 418
464 clear_ibs_nmi(); 419 clear_ibs_nmi();
@@ -478,7 +433,7 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
478 if (ret) 433 if (ret)
479 return ret; 434 return ret;
480 435
481 if (!ibs_allowed) 436 if (!has_ibs)
482 return ret; 437 return ret;
483 438
484 /* model specific files */ 439 /* model specific files */
diff --git a/block/Kconfig b/block/Kconfig
index ac0956f77785..0cbb3b88b59a 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -36,6 +36,12 @@ config LBD
36 This option also enables support for single files larger than 36 This option also enables support for single files larger than
37 2TB. 37 2TB.
38 38
39 The ext4 filesystem requires that this feature be enabled in
40 order to support filesystems that have the huge_file feature
41 enabled. Otherwise, it will refuse to mount any filesystems
42 that use the huge_file feature, which is enabled by default
43 by mke2fs.ext4. The GFS2 filesystem also requires this feature.
44
39 If unsure, say N. 45 If unsure, say N.
40 46
41config BLK_DEV_IO_TRACE 47config BLK_DEV_IO_TRACE
diff --git a/block/blk-map.c b/block/blk-map.c
index 2990447f45e9..f103729b462f 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -42,7 +42,7 @@ static int __blk_rq_unmap_user(struct bio *bio)
42 42
43static int __blk_rq_map_user(struct request_queue *q, struct request *rq, 43static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
44 struct rq_map_data *map_data, void __user *ubuf, 44 struct rq_map_data *map_data, void __user *ubuf,
45 unsigned int len, int null_mapped, gfp_t gfp_mask) 45 unsigned int len, gfp_t gfp_mask)
46{ 46{
47 unsigned long uaddr; 47 unsigned long uaddr;
48 struct bio *bio, *orig_bio; 48 struct bio *bio, *orig_bio;
@@ -63,7 +63,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
63 if (IS_ERR(bio)) 63 if (IS_ERR(bio))
64 return PTR_ERR(bio); 64 return PTR_ERR(bio);
65 65
66 if (null_mapped) 66 if (map_data && map_data->null_mapped)
67 bio->bi_flags |= (1 << BIO_NULL_MAPPED); 67 bio->bi_flags |= (1 << BIO_NULL_MAPPED);
68 68
69 orig_bio = bio; 69 orig_bio = bio;
@@ -114,17 +114,15 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
114{ 114{
115 unsigned long bytes_read = 0; 115 unsigned long bytes_read = 0;
116 struct bio *bio = NULL; 116 struct bio *bio = NULL;
117 int ret, null_mapped = 0; 117 int ret;
118 118
119 if (len > (q->max_hw_sectors << 9)) 119 if (len > (q->max_hw_sectors << 9))
120 return -EINVAL; 120 return -EINVAL;
121 if (!len) 121 if (!len)
122 return -EINVAL; 122 return -EINVAL;
123 if (!ubuf) { 123
124 if (!map_data || rq_data_dir(rq) != READ) 124 if (!ubuf && (!map_data || !map_data->null_mapped))
125 return -EINVAL; 125 return -EINVAL;
126 null_mapped = 1;
127 }
128 126
129 while (bytes_read != len) { 127 while (bytes_read != len) {
130 unsigned long map_len, end, start; 128 unsigned long map_len, end, start;
@@ -143,13 +141,16 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
143 map_len -= PAGE_SIZE; 141 map_len -= PAGE_SIZE;
144 142
145 ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len, 143 ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
146 null_mapped, gfp_mask); 144 gfp_mask);
147 if (ret < 0) 145 if (ret < 0)
148 goto unmap_rq; 146 goto unmap_rq;
149 if (!bio) 147 if (!bio)
150 bio = rq->bio; 148 bio = rq->bio;
151 bytes_read += ret; 149 bytes_read += ret;
152 ubuf += ret; 150 ubuf += ret;
151
152 if (map_data)
153 map_data->offset += ret;
153 } 154 }
154 155
155 if (!bio_flagged(bio, BIO_USER_MAPPED)) 156 if (!bio_flagged(bio, BIO_USER_MAPPED))
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index dcbf1be149f3..f21147f3626a 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -28,351 +28,18 @@
28#include <linux/async_tx.h> 28#include <linux/async_tx.h>
29 29
30#ifdef CONFIG_DMA_ENGINE 30#ifdef CONFIG_DMA_ENGINE
31static enum dma_state_client 31static int __init async_tx_init(void)
32dma_channel_add_remove(struct dma_client *client,
33 struct dma_chan *chan, enum dma_state state);
34
35static struct dma_client async_tx_dma = {
36 .event_callback = dma_channel_add_remove,
37 /* .cap_mask == 0 defaults to all channels */
38};
39
40/**
41 * dma_cap_mask_all - enable iteration over all operation types
42 */
43static dma_cap_mask_t dma_cap_mask_all;
44
45/**
46 * chan_ref_percpu - tracks channel allocations per core/opertion
47 */
48struct chan_ref_percpu {
49 struct dma_chan_ref *ref;
50};
51
52static int channel_table_initialized;
53static struct chan_ref_percpu *channel_table[DMA_TX_TYPE_END];
54
55/**
56 * async_tx_lock - protect modification of async_tx_master_list and serialize
57 * rebalance operations
58 */
59static spinlock_t async_tx_lock;
60
61static LIST_HEAD(async_tx_master_list);
62
63/* async_tx_issue_pending_all - start all transactions on all channels */
64void async_tx_issue_pending_all(void)
65{
66 struct dma_chan_ref *ref;
67
68 rcu_read_lock();
69 list_for_each_entry_rcu(ref, &async_tx_master_list, node)
70 ref->chan->device->device_issue_pending(ref->chan);
71 rcu_read_unlock();
72}
73EXPORT_SYMBOL_GPL(async_tx_issue_pending_all);
74
75/* dma_wait_for_async_tx - spin wait for a transcation to complete
76 * @tx: transaction to wait on
77 */
78enum dma_status
79dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
80{
81 enum dma_status status;
82 struct dma_async_tx_descriptor *iter;
83 struct dma_async_tx_descriptor *parent;
84
85 if (!tx)
86 return DMA_SUCCESS;
87
88 /* poll through the dependency chain, return when tx is complete */
89 do {
90 iter = tx;
91
92 /* find the root of the unsubmitted dependency chain */
93 do {
94 parent = iter->parent;
95 if (!parent)
96 break;
97 else
98 iter = parent;
99 } while (parent);
100
101 /* there is a small window for ->parent == NULL and
102 * ->cookie == -EBUSY
103 */
104 while (iter->cookie == -EBUSY)
105 cpu_relax();
106
107 status = dma_sync_wait(iter->chan, iter->cookie);
108 } while (status == DMA_IN_PROGRESS || (iter != tx));
109
110 return status;
111}
112EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
113
114/* async_tx_run_dependencies - helper routine for dma drivers to process
115 * (start) dependent operations on their target channel
116 * @tx: transaction with dependencies
117 */
118void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx)
119{
120 struct dma_async_tx_descriptor *dep = tx->next;
121 struct dma_async_tx_descriptor *dep_next;
122 struct dma_chan *chan;
123
124 if (!dep)
125 return;
126
127 chan = dep->chan;
128
129 /* keep submitting up until a channel switch is detected
130 * in that case we will be called again as a result of
131 * processing the interrupt from async_tx_channel_switch
132 */
133 for (; dep; dep = dep_next) {
134 spin_lock_bh(&dep->lock);
135 dep->parent = NULL;
136 dep_next = dep->next;
137 if (dep_next && dep_next->chan == chan)
138 dep->next = NULL; /* ->next will be submitted */
139 else
140 dep_next = NULL; /* submit current dep and terminate */
141 spin_unlock_bh(&dep->lock);
142
143 dep->tx_submit(dep);
144 }
145
146 chan->device->device_issue_pending(chan);
147}
148EXPORT_SYMBOL_GPL(async_tx_run_dependencies);
149
150static void
151free_dma_chan_ref(struct rcu_head *rcu)
152{
153 struct dma_chan_ref *ref;
154 ref = container_of(rcu, struct dma_chan_ref, rcu);
155 kfree(ref);
156}
157
158static void
159init_dma_chan_ref(struct dma_chan_ref *ref, struct dma_chan *chan)
160{
161 INIT_LIST_HEAD(&ref->node);
162 INIT_RCU_HEAD(&ref->rcu);
163 ref->chan = chan;
164 atomic_set(&ref->count, 0);
165}
166
167/**
168 * get_chan_ref_by_cap - returns the nth channel of the given capability
169 * defaults to returning the channel with the desired capability and the
170 * lowest reference count if the index can not be satisfied
171 * @cap: capability to match
172 * @index: nth channel desired, passing -1 has the effect of forcing the
173 * default return value
174 */
175static struct dma_chan_ref *
176get_chan_ref_by_cap(enum dma_transaction_type cap, int index)
177{
178 struct dma_chan_ref *ret_ref = NULL, *min_ref = NULL, *ref;
179
180 rcu_read_lock();
181 list_for_each_entry_rcu(ref, &async_tx_master_list, node)
182 if (dma_has_cap(cap, ref->chan->device->cap_mask)) {
183 if (!min_ref)
184 min_ref = ref;
185 else if (atomic_read(&ref->count) <
186 atomic_read(&min_ref->count))
187 min_ref = ref;
188
189 if (index-- == 0) {
190 ret_ref = ref;
191 break;
192 }
193 }
194 rcu_read_unlock();
195
196 if (!ret_ref)
197 ret_ref = min_ref;
198
199 if (ret_ref)
200 atomic_inc(&ret_ref->count);
201
202 return ret_ref;
203}
204
205/**
206 * async_tx_rebalance - redistribute the available channels, optimize
207 * for cpu isolation in the SMP case, and opertaion isolation in the
208 * uniprocessor case
209 */
210static void async_tx_rebalance(void)
211{
212 int cpu, cap, cpu_idx = 0;
213 unsigned long flags;
214
215 if (!channel_table_initialized)
216 return;
217
218 spin_lock_irqsave(&async_tx_lock, flags);
219
220 /* undo the last distribution */
221 for_each_dma_cap_mask(cap, dma_cap_mask_all)
222 for_each_possible_cpu(cpu) {
223 struct dma_chan_ref *ref =
224 per_cpu_ptr(channel_table[cap], cpu)->ref;
225 if (ref) {
226 atomic_set(&ref->count, 0);
227 per_cpu_ptr(channel_table[cap], cpu)->ref =
228 NULL;
229 }
230 }
231
232 for_each_dma_cap_mask(cap, dma_cap_mask_all)
233 for_each_online_cpu(cpu) {
234 struct dma_chan_ref *new;
235 if (NR_CPUS > 1)
236 new = get_chan_ref_by_cap(cap, cpu_idx++);
237 else
238 new = get_chan_ref_by_cap(cap, -1);
239
240 per_cpu_ptr(channel_table[cap], cpu)->ref = new;
241 }
242
243 spin_unlock_irqrestore(&async_tx_lock, flags);
244}
245
246static enum dma_state_client
247dma_channel_add_remove(struct dma_client *client,
248 struct dma_chan *chan, enum dma_state state)
249{
250 unsigned long found, flags;
251 struct dma_chan_ref *master_ref, *ref;
252 enum dma_state_client ack = DMA_DUP; /* default: take no action */
253
254 switch (state) {
255 case DMA_RESOURCE_AVAILABLE:
256 found = 0;
257 rcu_read_lock();
258 list_for_each_entry_rcu(ref, &async_tx_master_list, node)
259 if (ref->chan == chan) {
260 found = 1;
261 break;
262 }
263 rcu_read_unlock();
264
265 pr_debug("async_tx: dma resource available [%s]\n",
266 found ? "old" : "new");
267
268 if (!found)
269 ack = DMA_ACK;
270 else
271 break;
272
273 /* add the channel to the generic management list */
274 master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL);
275 if (master_ref) {
276 /* keep a reference until async_tx is unloaded */
277 dma_chan_get(chan);
278 init_dma_chan_ref(master_ref, chan);
279 spin_lock_irqsave(&async_tx_lock, flags);
280 list_add_tail_rcu(&master_ref->node,
281 &async_tx_master_list);
282 spin_unlock_irqrestore(&async_tx_lock,
283 flags);
284 } else {
285 printk(KERN_WARNING "async_tx: unable to create"
286 " new master entry in response to"
287 " a DMA_RESOURCE_ADDED event"
288 " (-ENOMEM)\n");
289 return 0;
290 }
291
292 async_tx_rebalance();
293 break;
294 case DMA_RESOURCE_REMOVED:
295 found = 0;
296 spin_lock_irqsave(&async_tx_lock, flags);
297 list_for_each_entry(ref, &async_tx_master_list, node)
298 if (ref->chan == chan) {
299 /* permit backing devices to go away */
300 dma_chan_put(ref->chan);
301 list_del_rcu(&ref->node);
302 call_rcu(&ref->rcu, free_dma_chan_ref);
303 found = 1;
304 break;
305 }
306 spin_unlock_irqrestore(&async_tx_lock, flags);
307
308 pr_debug("async_tx: dma resource removed [%s]\n",
309 found ? "ours" : "not ours");
310
311 if (found)
312 ack = DMA_ACK;
313 else
314 break;
315
316 async_tx_rebalance();
317 break;
318 case DMA_RESOURCE_SUSPEND:
319 case DMA_RESOURCE_RESUME:
320 printk(KERN_WARNING "async_tx: does not support dma channel"
321 " suspend/resume\n");
322 break;
323 default:
324 BUG();
325 }
326
327 return ack;
328}
329
330static int __init
331async_tx_init(void)
332{ 32{
333 enum dma_transaction_type cap; 33 dmaengine_get();
334
335 spin_lock_init(&async_tx_lock);
336 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
337
338 /* an interrupt will never be an explicit operation type.
339 * clearing this bit prevents allocation to a slot in 'channel_table'
340 */
341 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
342
343 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
344 channel_table[cap] = alloc_percpu(struct chan_ref_percpu);
345 if (!channel_table[cap])
346 goto err;
347 }
348
349 channel_table_initialized = 1;
350 dma_async_client_register(&async_tx_dma);
351 dma_async_client_chan_request(&async_tx_dma);
352 34
353 printk(KERN_INFO "async_tx: api initialized (async)\n"); 35 printk(KERN_INFO "async_tx: api initialized (async)\n");
354 36
355 return 0; 37 return 0;
356err:
357 printk(KERN_ERR "async_tx: initialization failure\n");
358
359 while (--cap >= 0)
360 free_percpu(channel_table[cap]);
361
362 return 1;
363} 38}
364 39
365static void __exit async_tx_exit(void) 40static void __exit async_tx_exit(void)
366{ 41{
367 enum dma_transaction_type cap; 42 dmaengine_put();
368
369 channel_table_initialized = 0;
370
371 for_each_dma_cap_mask(cap, dma_cap_mask_all)
372 if (channel_table[cap])
373 free_percpu(channel_table[cap]);
374
375 dma_async_client_unregister(&async_tx_dma);
376} 43}
377 44
378/** 45/**
@@ -387,16 +54,9 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
387{ 54{
388 /* see if we can keep the chain on one channel */ 55 /* see if we can keep the chain on one channel */
389 if (depend_tx && 56 if (depend_tx &&
390 dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) 57 dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
391 return depend_tx->chan; 58 return depend_tx->chan;
392 else if (likely(channel_table_initialized)) { 59 return dma_find_channel(tx_type);
393 struct dma_chan_ref *ref;
394 int cpu = get_cpu();
395 ref = per_cpu_ptr(channel_table[tx_type], cpu)->ref;
396 put_cpu();
397 return ref ? ref->chan : NULL;
398 } else
399 return NULL;
400} 60}
401EXPORT_SYMBOL_GPL(__async_tx_find_channel); 61EXPORT_SYMBOL_GPL(__async_tx_find_channel);
402#else 62#else
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 2f557f570ade..00cf9553f740 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -107,4 +107,6 @@ source "drivers/uio/Kconfig"
107source "drivers/xen/Kconfig" 107source "drivers/xen/Kconfig"
108 108
109source "drivers/staging/Kconfig" 109source "drivers/staging/Kconfig"
110
111source "drivers/platform/Kconfig"
110endmenu 112endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index e121b66ef082..c1bf41737936 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -18,6 +18,9 @@ obj-$(CONFIG_ARM_AMBA) += amba/
18 18
19obj-$(CONFIG_XEN) += xen/ 19obj-$(CONFIG_XEN) += xen/
20 20
21# regulators early, since some subsystems rely on them to initialize
22obj-$(CONFIG_REGULATOR) += regulator/
23
21# char/ comes before serial/ etc so that the VT console is the boot-time 24# char/ comes before serial/ etc so that the VT console is the boot-time
22# default. 25# default.
23obj-y += char/ 26obj-y += char/
@@ -101,5 +104,5 @@ obj-$(CONFIG_PPC_PS3) += ps3/
101obj-$(CONFIG_OF) += of/ 104obj-$(CONFIG_OF) += of/
102obj-$(CONFIG_SSB) += ssb/ 105obj-$(CONFIG_SSB) += ssb/
103obj-$(CONFIG_VIRTIO) += virtio/ 106obj-$(CONFIG_VIRTIO) += virtio/
104obj-$(CONFIG_REGULATOR) += regulator/
105obj-$(CONFIG_STAGING) += staging/ 107obj-$(CONFIG_STAGING) += staging/
108obj-y += platform/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index b0243fd55ac0..d7f9839ba264 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -196,90 +196,6 @@ config ACPI_NUMA
196 depends on (X86 || IA64) 196 depends on (X86 || IA64)
197 default y if IA64_GENERIC || IA64_SGI_SN2 197 default y if IA64_GENERIC || IA64_SGI_SN2
198 198
199config ACPI_WMI
200 tristate "WMI (EXPERIMENTAL)"
201 depends on X86
202 depends on EXPERIMENTAL
203 help
204 This driver adds support for the ACPI-WMI (Windows Management
205 Instrumentation) mapper device (PNP0C14) found on some systems.
206
207 ACPI-WMI is a proprietary extension to ACPI to expose parts of the
208 ACPI firmware to userspace - this is done through various vendor
209 defined methods and data blocks in a PNP0C14 device, which are then
210 made available for userspace to call.
211
212 The implementation of this in Linux currently only exposes this to
213 other kernel space drivers.
214
215 This driver is a required dependency to build the firmware specific
216 drivers needed on many machines, including Acer and HP laptops.
217
218 It is safe to enable this driver even if your DSDT doesn't define
219 any ACPI-WMI devices.
220
221config ACPI_ASUS
222 tristate "ASUS/Medion Laptop Extras"
223 depends on X86
224 select BACKLIGHT_CLASS_DEVICE
225 ---help---
226 This driver provides support for extra features of ACPI-compatible
227 ASUS laptops. As some of Medion laptops are made by ASUS, it may also
228 support some Medion laptops (such as 9675 for example). It makes all
229 the extra buttons generate standard ACPI events that go through
230 /proc/acpi/events, and (on some models) adds support for changing the
231 display brightness and output, switching the LCD backlight on and off,
232 and most importantly, allows you to blink those fancy LEDs intended
233 for reporting mail and wireless status.
234
235 Note: display switching code is currently considered EXPERIMENTAL,
236 toying with these values may even lock your machine.
237
238 All settings are changed via /proc/acpi/asus directory entries. Owner
239 and group for these entries can be set with asus_uid and asus_gid
240 parameters.
241
242 More information and a userspace daemon for handling the extra buttons
243 at <http://sourceforge.net/projects/acpi4asus/>.
244
245 If you have an ACPI-compatible ASUS laptop, say Y or M here. This
246 driver is still under development, so if your laptop is unsupported or
247 something works not quite as expected, please use the mailing list
248 available on the above page (acpi4asus-user@lists.sourceforge.net).
249
250 NOTE: This driver is deprecated and will probably be removed soon,
251 use asus-laptop instead.
252
253config ACPI_TOSHIBA
254 tristate "Toshiba Laptop Extras"
255 depends on X86 && INPUT
256 select INPUT_POLLDEV
257 select NET
258 select RFKILL
259 select BACKLIGHT_CLASS_DEVICE
260 ---help---
261 This driver adds support for access to certain system settings
262 on "legacy free" Toshiba laptops. These laptops can be recognized by
263 their lack of a BIOS setup menu and APM support.
264
265 On these machines, all system configuration is handled through the
266 ACPI. This driver is required for access to controls not covered
267 by the general ACPI drivers, such as LCD brightness, video output,
268 etc.
269
270 This driver differs from the non-ACPI Toshiba laptop driver (located
271 under "Processor type and features") in several aspects.
272 Configuration is accessed by reading and writing text files in the
273 /proc tree instead of by program interface to /dev. Furthermore, no
274 power management functions are exposed, as those are handled by the
275 general ACPI drivers.
276
277 More information about this driver is available at
278 <http://memebeam.org/toys/ToshibaAcpiDriver>.
279
280 If you have a legacy free Toshiba laptop (such as the Libretto L1
281 series), say Y.
282
283config ACPI_CUSTOM_DSDT_FILE 199config ACPI_CUSTOM_DSDT_FILE
284 string "Custom DSDT Table file to include" 200 string "Custom DSDT Table file to include"
285 default "" 201 default ""
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 3c0c93300f12..d80f4cc2e0da 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -2,15 +2,8 @@
2# Makefile for the Linux ACPI interpreter 2# Makefile for the Linux ACPI interpreter
3# 3#
4 4
5export ACPI_CFLAGS 5ccflags-y := -Os
6 6ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
7ACPI_CFLAGS := -Os
8
9ifdef CONFIG_ACPI_DEBUG
10 ACPI_CFLAGS += -DACPI_DEBUG_OUTPUT
11endif
12
13EXTRA_CFLAGS += $(ACPI_CFLAGS)
14 7
15# 8#
16# ACPI Boot-Time Table Parsing 9# ACPI Boot-Time Table Parsing
@@ -22,9 +15,13 @@ obj-$(CONFIG_X86) += blacklist.o
22# ACPI Core Subsystem (Interpreter) 15# ACPI Core Subsystem (Interpreter)
23# 16#
24obj-y += osl.o utils.o reboot.o\ 17obj-y += osl.o utils.o reboot.o\
25 dispatcher/ events/ executer/ hardware/ \ 18 acpica/
26 namespace/ parser/ resources/ tables/ \ 19
27 utilities/ 20# sleep related files
21obj-y += wakeup.o
22obj-y += main.o
23obj-$(CONFIG_ACPI_SLEEP) += proc.o
24
28 25
29# 26#
30# ACPI Bus and Device Drivers 27# ACPI Bus and Device Drivers
@@ -35,7 +32,6 @@ ifdef CONFIG_CPU_FREQ
35processor-objs += processor_perflib.o 32processor-objs += processor_perflib.o
36endif 33endif
37 34
38obj-y += sleep/
39obj-y += bus.o glue.o 35obj-y += bus.o glue.o
40obj-y += scan.o 36obj-y += scan.o
41# Keep EC driver first. Initialization of others depend on it. 37# Keep EC driver first. Initialization of others depend on it.
@@ -59,9 +55,6 @@ obj-y += power.o
59obj-$(CONFIG_ACPI_SYSTEM) += system.o event.o 55obj-$(CONFIG_ACPI_SYSTEM) += system.o event.o
60obj-$(CONFIG_ACPI_DEBUG) += debug.o 56obj-$(CONFIG_ACPI_DEBUG) += debug.o
61obj-$(CONFIG_ACPI_NUMA) += numa.o 57obj-$(CONFIG_ACPI_NUMA) += numa.o
62obj-$(CONFIG_ACPI_WMI) += wmi.o
63obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o
64obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
65obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o 58obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o
66obj-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o 59obj-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
67obj-$(CONFIG_ACPI_SBS) += sbshc.o 60obj-$(CONFIG_ACPI_SBS) += sbshc.o
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
new file mode 100644
index 000000000000..3f23298ee3fd
--- /dev/null
+++ b/drivers/acpi/acpica/Makefile
@@ -0,0 +1,44 @@
1#
2# Makefile for ACPICA Core interpreter
3#
4
5ccflags-y := -Os
6ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
7
8obj-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
9 dsmethod.o dsobject.o dsutils.o dswload.o dswstate.o \
10 dsinit.o
11
12obj-y += evevent.o evregion.o evsci.o evxfevnt.o \
13 evmisc.o evrgnini.o evxface.o evxfregn.o \
14 evgpe.o evgpeblk.o
15
16obj-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
17 exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
18 excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \
19 exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o
20
21obj-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o
22
23obj-$(ACPI_FUTURE_USAGE) += hwtimer.o
24
25obj-y += nsaccess.o nsload.o nssearch.o nsxfeval.o \
26 nsalloc.o nseval.o nsnames.o nsutils.o nsxfname.o \
27 nsdump.o nsinit.o nsobject.o nswalk.o nsxfobj.o \
28 nsparse.o nspredef.o
29
30obj-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
31
32obj-y += psargs.o psparse.o psloop.o pstree.o pswalk.o \
33 psopcode.o psscope.o psutils.o psxface.o
34
35obj-y += rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \
36 rscalc.o rsirq.o rsmemory.o rsutils.o
37
38obj-$(ACPI_FUTURE_USAGE) += rsdump.o
39
40obj-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
41
42obj-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
43 utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
44 utstate.o utmutex.o utobject.o utresrc.o
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
new file mode 100644
index 000000000000..3b20786cbb0d
--- /dev/null
+++ b/drivers/acpi/acpica/accommon.h
@@ -0,0 +1,63 @@
1/******************************************************************************
2 *
3 * Name: accommon.h - Common include files for generation of ACPICA source
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#ifndef __ACCOMMON_H__
45#define __ACCOMMON_H__
46
47/*
48 * Common set of includes for all ACPICA source files.
49 * We put them here because we don't want to duplicate them
50 * in the the source code again and again.
51 *
52 * Note: The order of these include files is important.
53 */
54#include "acconfig.h" /* Global configuration constants */
55#include "acmacros.h" /* C macros */
56#include "aclocal.h" /* Internal data types */
57#include "acobject.h" /* ACPI internal object */
58#include "acstruct.h" /* Common structures */
59#include "acglobal.h" /* All global variables */
60#include "achware.h" /* Hardware defines and interfaces */
61#include "acutils.h" /* Utility interfaces */
62
63#endif /* __ACCOMMON_H__ */
diff --git a/include/acpi/acconfig.h b/drivers/acpi/acpica/acconfig.h
index 29feee27f0ea..e6777fb883d2 100644
--- a/include/acpi/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -61,10 +61,6 @@
61 * 61 *
62 */ 62 */
63 63
64/* Current ACPICA subsystem version in YYYYMMDD format */
65
66#define ACPI_CA_VERSION 0x20080926
67
68/* 64/*
69 * OS name, used for the _OS object. The _OS object is essentially obsolete, 65 * OS name, used for the _OS object. The _OS object is essentially obsolete,
70 * but there is a large base of ASL/AML code in existing machines that check 66 * but there is a large base of ASL/AML code in existing machines that check
@@ -119,6 +115,10 @@
119 115
120#define ACPI_ROOT_TABLE_SIZE_INCREMENT 4 116#define ACPI_ROOT_TABLE_SIZE_INCREMENT 4
121 117
118/* Maximum number of While() loop iterations before forced abort */
119
120#define ACPI_MAX_LOOP_ITERATIONS 0xFFFF
121
122/****************************************************************************** 122/******************************************************************************
123 * 123 *
124 * ACPI Specification constants (Do not change unless the specification changes) 124 * ACPI Specification constants (Do not change unless the specification changes)
diff --git a/include/acpi/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 62c59df3b86c..62c59df3b86c 100644
--- a/include/acpi/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
diff --git a/include/acpi/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 6291904be01e..6291904be01e 100644
--- a/include/acpi/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
diff --git a/include/acpi/acevents.h b/drivers/acpi/acpica/acevents.h
index d5d099bf349c..07e20135f01b 100644
--- a/include/acpi/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -93,11 +93,13 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
93 */ 93 */
94u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info); 94u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
95 95
96acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback); 96acpi_status
97acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
97 98
98acpi_status 99acpi_status
99acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 100acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
100 struct acpi_gpe_block_info *gpe_block); 101 struct acpi_gpe_block_info *gpe_block,
102 void *context);
101 103
102acpi_status 104acpi_status
103acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, 105acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
diff --git a/include/acpi/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 15dda46b70d1..ddb40f5c68fc 100644
--- a/include/acpi/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -102,6 +102,12 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_create_osi_method, TRUE);
102 */ 102 */
103ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE); 103ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE);
104 104
105/*
106 * Optionally use default values for the ACPI register widths. Set this to
107 * TRUE to use the defaults, if an FADT contains incorrect widths/lengths.
108 */
109ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE);
110
105/***************************************************************************** 111/*****************************************************************************
106 * 112 *
107 * Debug support 113 * Debug support
@@ -140,7 +146,7 @@ ACPI_EXTERN u32 acpi_gbl_trace_flags;
140 */ 146 */
141ACPI_EXTERN struct acpi_internal_rsdt acpi_gbl_root_table_list; 147ACPI_EXTERN struct acpi_internal_rsdt acpi_gbl_root_table_list;
142ACPI_EXTERN struct acpi_table_fadt acpi_gbl_FADT; 148ACPI_EXTERN struct acpi_table_fadt acpi_gbl_FADT;
143extern u8 acpi_gbl_permanent_mmap; 149ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS;
144 150
145/* These addresses are calculated from FADT address values */ 151/* These addresses are calculated from FADT address values */
146 152
@@ -326,6 +332,7 @@ ACPI_EXTERN struct acpi_fixed_event_handler
326ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head; 332ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
327ACPI_EXTERN struct acpi_gpe_block_info 333ACPI_EXTERN struct acpi_gpe_block_info
328*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; 334*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
335ACPI_EXTERN u32 acpi_current_gpe_count;
329 336
330/***************************************************************************** 337/*****************************************************************************
331 * 338 *
diff --git a/include/acpi/achware.h b/drivers/acpi/acpica/achware.h
index 97a72b193276..58c69dc49ab4 100644
--- a/include/acpi/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -44,11 +44,7 @@
44#ifndef __ACHWARE_H__ 44#ifndef __ACHWARE_H__
45#define __ACHWARE_H__ 45#define __ACHWARE_H__
46 46
47/* PM Timer ticks per second (HZ) */ 47/* Values for the _SST predefined method */
48
49#define PM_TIMER_FREQUENCY 3579545
50
51/* Values for the _SST reserved method */
52 48
53#define ACPI_SST_INDICATOR_OFF 0 49#define ACPI_SST_INDICATOR_OFF 0
54#define ACPI_SST_WORKING 1 50#define ACPI_SST_WORKING 1
@@ -56,8 +52,6 @@
56#define ACPI_SST_SLEEPING 3 52#define ACPI_SST_SLEEPING 3
57#define ACPI_SST_SLEEP_CONTEXT 4 53#define ACPI_SST_SLEEP_CONTEXT 4
58 54
59/* Prototypes */
60
61/* 55/*
62 * hwacpi - high level functions 56 * hwacpi - high level functions
63 */ 57 */
@@ -75,13 +69,6 @@ acpi_hw_register_read(u32 register_id, u32 * return_value);
75 69
76acpi_status acpi_hw_register_write(u32 register_id, u32 value); 70acpi_status acpi_hw_register_write(u32 register_id, u32 value);
77 71
78acpi_status
79acpi_hw_low_level_read(u32 width,
80 u32 * value, struct acpi_generic_address *reg);
81
82acpi_status
83acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address *reg);
84
85acpi_status acpi_hw_clear_acpi_status(void); 72acpi_status acpi_hw_clear_acpi_status(void);
86 73
87/* 74/*
@@ -94,13 +81,13 @@ acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info *gpe_event_info);
94 81
95acpi_status 82acpi_status
96acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 83acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
97 struct acpi_gpe_block_info *gpe_block); 84 struct acpi_gpe_block_info *gpe_block, void *context);
98 85
99acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info *gpe_event_info); 86acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info *gpe_event_info);
100 87
101acpi_status 88acpi_status
102acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 89acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
103 struct acpi_gpe_block_info *gpe_block); 90 struct acpi_gpe_block_info *gpe_block, void *context);
104 91
105acpi_status 92acpi_status
106acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info, 93acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
@@ -114,7 +101,8 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void);
114 101
115acpi_status 102acpi_status
116acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 103acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
117 struct acpi_gpe_block_info *gpe_block); 104 struct acpi_gpe_block_info *gpe_block,
105 void *context);
118 106
119#ifdef ACPI_FUTURE_USAGE 107#ifdef ACPI_FUTURE_USAGE
120/* 108/*
diff --git a/include/acpi/acinterp.h b/drivers/acpi/acpica/acinterp.h
index e8db7a3143a5..e8db7a3143a5 100644
--- a/include/acpi/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
diff --git a/include/acpi/aclocal.h b/drivers/acpi/acpica/aclocal.h
index ecab527cf78e..492d02761bb7 100644
--- a/include/acpi/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -46,8 +46,6 @@
46 46
47/* acpisrc:struct_defs -- for acpisrc conversion */ 47/* acpisrc:struct_defs -- for acpisrc conversion */
48 48
49#define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */
50#define ACPI_DO_NOT_WAIT 0
51#define ACPI_SERIALIZED 0xFF 49#define ACPI_SERIALIZED 0xFF
52 50
53typedef u32 acpi_mutex_handle; 51typedef u32 acpi_mutex_handle;
@@ -120,11 +118,6 @@ static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = {
120#define ACPI_MAX_LOCK 1 118#define ACPI_MAX_LOCK 1
121#define ACPI_NUM_LOCK ACPI_MAX_LOCK+1 119#define ACPI_NUM_LOCK ACPI_MAX_LOCK+1
122 120
123/* Owner IDs are used to track namespace nodes for selective deletion */
124
125typedef u8 acpi_owner_id;
126#define ACPI_OWNER_ID_MAX 0xFF
127
128/* This Thread ID means that the mutex is not in use (unlocked) */ 121/* This Thread ID means that the mutex is not in use (unlocked) */
129 122
130#define ACPI_MUTEX_NOT_ACQUIRED (acpi_thread_id) 0 123#define ACPI_MUTEX_NOT_ACQUIRED (acpi_thread_id) 0
@@ -165,11 +158,6 @@ typedef enum {
165 ACPI_IMODE_EXECUTE = 0x03 158 ACPI_IMODE_EXECUTE = 0x03
166} acpi_interpreter_mode; 159} acpi_interpreter_mode;
167 160
168union acpi_name_union {
169 u32 integer;
170 char ascii[4];
171};
172
173/* 161/*
174 * The Namespace Node describes a named object that appears in the AML. 162 * The Namespace Node describes a named object that appears in the AML.
175 * descriptor_type is used to differentiate between internal descriptors. 163 * descriptor_type is used to differentiate between internal descriptors.
@@ -216,26 +204,6 @@ struct acpi_namespace_node {
216#define ANOBJ_IS_BIT_OFFSET 0x40 /* i_aSL only: Reference is a bit offset */ 204#define ANOBJ_IS_BIT_OFFSET 0x40 /* i_aSL only: Reference is a bit offset */
217#define ANOBJ_IS_REFERENCED 0x80 /* i_aSL only: Object was referenced */ 205#define ANOBJ_IS_REFERENCED 0x80 /* i_aSL only: Object was referenced */
218 206
219/*
220 * ACPI Table Descriptor. One per ACPI table
221 */
222struct acpi_table_desc {
223 acpi_physical_address address;
224 struct acpi_table_header *pointer;
225 u32 length; /* Length fixed at 32 bits */
226 union acpi_name_union signature;
227 acpi_owner_id owner_id;
228 u8 flags;
229};
230
231/* Flags for above */
232
233#define ACPI_TABLE_ORIGIN_UNKNOWN (0)
234#define ACPI_TABLE_ORIGIN_MAPPED (1)
235#define ACPI_TABLE_ORIGIN_ALLOCATED (2)
236#define ACPI_TABLE_ORIGIN_MASK (3)
237#define ACPI_TABLE_IS_LOADED (4)
238
239/* One internal RSDT for table management */ 207/* One internal RSDT for table management */
240 208
241struct acpi_internal_rsdt { 209struct acpi_internal_rsdt {
@@ -266,15 +234,6 @@ struct acpi_ns_search_data {
266 struct acpi_namespace_node *node; 234 struct acpi_namespace_node *node;
267}; 235};
268 236
269/*
270 * Predefined Namespace items
271 */
272struct acpi_predefined_names {
273 char *name;
274 u8 type;
275 char *val;
276};
277
278/* Object types used during package copies */ 237/* Object types used during package copies */
279 238
280#define ACPI_COPY_TYPE_SIMPLE 0 239#define ACPI_COPY_TYPE_SIMPLE 0
@@ -487,10 +446,15 @@ struct acpi_gpe_walk_info {
487 struct acpi_gpe_block_info *gpe_block; 446 struct acpi_gpe_block_info *gpe_block;
488}; 447};
489 448
490typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info * 449struct acpi_gpe_device_info {
491 gpe_xrupt_info, 450 u32 index;
492 struct acpi_gpe_block_info * 451 u32 next_block_base_index;
493 gpe_block); 452 acpi_status status;
453 struct acpi_namespace_node *gpe_device;
454};
455
456typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *gpe_xrupt_info,
457 struct acpi_gpe_block_info *gpe_block, void *context);
494 458
495/* Information about each particular fixed event */ 459/* Information about each particular fixed event */
496 460
@@ -566,6 +530,7 @@ struct acpi_control_state {
566 union acpi_parse_object *predicate_op; 530 union acpi_parse_object *predicate_op;
567 u8 *aml_predicate_start; /* Start of if/while predicate */ 531 u8 *aml_predicate_start; /* Start of if/while predicate */
568 u8 *package_end; /* End of if/while block */ 532 u8 *package_end; /* End of if/while block */
533 u32 loop_count; /* While() loop counter */
569}; 534};
570 535
571/* 536/*
@@ -671,6 +636,12 @@ union acpi_parse_value {
671 union acpi_parse_object *arg; /* arguments and contained ops */ 636 union acpi_parse_object *arg; /* arguments and contained ops */
672}; 637};
673 638
639#ifdef ACPI_DISASSEMBLER
640#define ACPI_DISASM_ONLY_MEMBERS(a) a;
641#else
642#define ACPI_DISASM_ONLY_MEMBERS(a)
643#endif
644
674#define ACPI_PARSE_COMMON \ 645#define ACPI_PARSE_COMMON \
675 union acpi_parse_object *parent; /* Parent op */\ 646 union acpi_parse_object *parent; /* Parent op */\
676 u8 descriptor_type; /* To differentiate various internal objs */\ 647 u8 descriptor_type; /* To differentiate various internal objs */\
@@ -790,9 +761,6 @@ struct acpi_parse_state {
790 * 761 *
791 ****************************************************************************/ 762 ****************************************************************************/
792 763
793#define PCI_ROOT_HID_STRING "PNP0A03"
794#define PCI_EXPRESS_ROOT_HID_STRING "PNP0A08"
795
796struct acpi_bit_register_info { 764struct acpi_bit_register_info {
797 u8 parent_register; 765 u8 parent_register;
798 u8 bit_position; 766 u8 bit_position;
@@ -1019,26 +987,4 @@ struct acpi_debug_mem_block {
1019#define ACPI_MEM_LIST_MAX 1 987#define ACPI_MEM_LIST_MAX 1
1020#define ACPI_NUM_MEM_LISTS 2 988#define ACPI_NUM_MEM_LISTS 2
1021 989
1022struct acpi_memory_list {
1023 char *list_name;
1024 void *list_head;
1025 u16 object_size;
1026 u16 max_depth;
1027 u16 current_depth;
1028 u16 link_offset;
1029
1030#ifdef ACPI_DBG_TRACK_ALLOCATIONS
1031
1032 /* Statistics for debug memory tracking only */
1033
1034 u32 total_allocated;
1035 u32 total_freed;
1036 u32 max_occupied;
1037 u32 total_size;
1038 u32 current_total_size;
1039 u32 requests;
1040 u32 hits;
1041#endif
1042};
1043
1044#endif /* __ACLOCAL_H__ */ 990#endif /* __ACLOCAL_H__ */
diff --git a/include/acpi/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 1954c9d1d012..9c127e8e2d6d 100644
--- a/include/acpi/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -45,23 +45,6 @@
45#define __ACMACROS_H__ 45#define __ACMACROS_H__
46 46
47/* 47/*
48 * Data manipulation macros
49 */
50#define ACPI_LOWORD(l) ((u16)(u32)(l))
51#define ACPI_HIWORD(l) ((u16)((((u32)(l)) >> 16) & 0xFFFF))
52#define ACPI_LOBYTE(l) ((u8)(u16)(l))
53#define ACPI_HIBYTE(l) ((u8)((((u16)(l)) >> 8) & 0xFF))
54
55#define ACPI_SET_BIT(target,bit) ((target) |= (bit))
56#define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit))
57#define ACPI_MIN(a,b) (((a)<(b))?(a):(b))
58#define ACPI_MAX(a,b) (((a)>(b))?(a):(b))
59
60/* Size calculation */
61
62#define ACPI_ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0]))
63
64/*
65 * Extract data using a pointer. Any more than a byte and we 48 * Extract data using a pointer. Any more than a byte and we
66 * get into potential aligment issues -- see the STORE macros below. 49 * get into potential aligment issues -- see the STORE macros below.
67 * Use with care. 50 * Use with care.
@@ -76,39 +59,6 @@
76#define ACPI_SET64(ptr) *ACPI_CAST_PTR (u64, ptr) 59#define ACPI_SET64(ptr) *ACPI_CAST_PTR (u64, ptr)
77 60
78/* 61/*
79 * Pointer manipulation
80 */
81#define ACPI_CAST_PTR(t, p) ((t *) (acpi_uintptr_t) (p))
82#define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p))
83#define ACPI_ADD_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b)))
84#define ACPI_PTR_DIFF(a, b) (acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b)))
85
86/* Pointer/Integer type conversions */
87
88#define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void, (void *) NULL, (acpi_size) i)
89#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) NULL)
90#define ACPI_OFFSET(d, f) (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f), (void *) NULL)
91#define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i)
92#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i)
93
94#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
95#define ACPI_COMPARE_NAME(a, b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b)))
96#else
97#define ACPI_COMPARE_NAME(a, b) (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE))
98#endif
99
100/*
101 * Full 64-bit integer must be available on both 32-bit and 64-bit platforms
102 */
103struct acpi_integer_overlay {
104 u32 lo_dword;
105 u32 hi_dword;
106};
107
108#define ACPI_LODWORD(integer) (ACPI_CAST_PTR (struct acpi_integer_overlay, &integer)->lo_dword)
109#define ACPI_HIDWORD(integer) (ACPI_CAST_PTR (struct acpi_integer_overlay, &integer)->hi_dword)
110
111/*
112 * printf() format helpers 62 * printf() format helpers
113 */ 63 */
114 64
@@ -209,7 +159,7 @@ struct acpi_integer_overlay {
209/* 159/*
210 * The hardware does not support unaligned transfers. We must move the 160 * The hardware does not support unaligned transfers. We must move the
211 * data one byte at a time. These macros work whether the source or 161 * data one byte at a time. These macros work whether the source or
212 * the destination (or both) is/are unaligned. (Little-endian move) 162 * the destination (or both) is/are unaligned. (Little-endian move)
213 */ 163 */
214 164
215/* 16-bit source, 16/32/64 destination */ 165/* 16-bit source, 16/32/64 destination */
@@ -357,12 +307,6 @@ struct acpi_integer_overlay {
357 {(u32)(Pargs), (u32)(Iargs), (u32)(flags), obj_type, class, type} 307 {(u32)(Pargs), (u32)(Iargs), (u32)(flags), obj_type, class, type}
358#endif 308#endif
359 309
360#ifdef ACPI_DISASSEMBLER
361#define ACPI_DISASM_ONLY_MEMBERS(a) a;
362#else
363#define ACPI_DISASM_ONLY_MEMBERS(a)
364#endif
365
366#define ARG_TYPE_WIDTH 5 310#define ARG_TYPE_WIDTH 5
367#define ARG_1(x) ((u32)(x)) 311#define ARG_1(x) ((u32)(x))
368#define ARG_2(x) ((u32)(x) << (1 * ARG_TYPE_WIDTH)) 312#define ARG_2(x) ((u32)(x) << (1 * ARG_TYPE_WIDTH))
@@ -388,32 +332,16 @@ struct acpi_integer_overlay {
388#define GET_CURRENT_ARG_TYPE(list) (list & ((u32) 0x1F)) 332#define GET_CURRENT_ARG_TYPE(list) (list & ((u32) 0x1F))
389#define INCREMENT_ARG_LIST(list) (list >>= ((u32) ARG_TYPE_WIDTH)) 333#define INCREMENT_ARG_LIST(list) (list >>= ((u32) ARG_TYPE_WIDTH))
390 334
391#if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES)
392/*
393 * Module name is include in both debug and non-debug versions primarily for
394 * error messages. The __FILE__ macro is not very useful for this, because it
395 * often includes the entire pathname to the module
396 */
397#define ACPI_MODULE_NAME(name) static const char ACPI_UNUSED_VAR _acpi_module_name[] = name;
398#else
399#define ACPI_MODULE_NAME(name)
400#endif
401
402/* 335/*
403 * Ascii error messages can be configured out 336 * Ascii error messages can be configured out
404 */ 337 */
405#ifndef ACPI_NO_ERROR_MESSAGES 338#ifndef ACPI_NO_ERROR_MESSAGES
406#define AE_INFO _acpi_module_name, __LINE__
407 339
408/* 340/*
409 * Error reporting. Callers module and line number are inserted by AE_INFO, 341 * Error reporting. Callers module and line number are inserted by AE_INFO,
410 * the plist contains a set of parens to allow variable-length lists. 342 * the plist contains a set of parens to allow variable-length lists.
411 * These macros are used for both the debug and non-debug versions of the code. 343 * These macros are used for both the debug and non-debug versions of the code.
412 */ 344 */
413#define ACPI_INFO(plist) acpi_ut_info plist
414#define ACPI_WARNING(plist) acpi_ut_warning plist
415#define ACPI_EXCEPTION(plist) acpi_ut_exception plist
416#define ACPI_ERROR(plist) acpi_ut_error plist
417#define ACPI_ERROR_NAMESPACE(s, e) acpi_ns_report_error (AE_INFO, s, e); 345#define ACPI_ERROR_NAMESPACE(s, e) acpi_ns_report_error (AE_INFO, s, e);
418#define ACPI_ERROR_METHOD(s, n, p, e) acpi_ns_report_method_error (AE_INFO, s, n, p, e); 346#define ACPI_ERROR_METHOD(s, n, p, e) acpi_ns_report_method_error (AE_INFO, s, n, p, e);
419 347
@@ -421,13 +349,9 @@ struct acpi_integer_overlay {
421 349
422/* No error messages */ 350/* No error messages */
423 351
424#define ACPI_INFO(plist)
425#define ACPI_WARNING(plist)
426#define ACPI_EXCEPTION(plist)
427#define ACPI_ERROR(plist)
428#define ACPI_ERROR_NAMESPACE(s, e) 352#define ACPI_ERROR_NAMESPACE(s, e)
429#define ACPI_ERROR_METHOD(s, n, p, e) 353#define ACPI_ERROR_METHOD(s, n, p, e)
430#endif 354#endif /* ACPI_NO_ERROR_MESSAGES */
431 355
432/* 356/*
433 * Debug macros that are conditionally compiled 357 * Debug macros that are conditionally compiled
@@ -435,36 +359,8 @@ struct acpi_integer_overlay {
435#ifdef ACPI_DEBUG_OUTPUT 359#ifdef ACPI_DEBUG_OUTPUT
436 360
437/* 361/*
438 * Common parameters used for debug output functions:
439 * line number, function name, module(file) name, component ID
440 */
441#define ACPI_DEBUG_PARAMETERS __LINE__, ACPI_GET_FUNCTION_NAME, _acpi_module_name, _COMPONENT
442
443/*
444 * Function entry tracing 362 * Function entry tracing
445 */ 363 */
446
447/*
448 * If ACPI_GET_FUNCTION_NAME was not defined in the compiler-dependent header,
449 * define it now. This is the case where there the compiler does not support
450 * a __func__ macro or equivalent.
451 */
452#ifndef ACPI_GET_FUNCTION_NAME
453#define ACPI_GET_FUNCTION_NAME _acpi_function_name
454/*
455 * The Name parameter should be the procedure name as a quoted string.
456 * The function name is also used by the function exit macros below.
457 * Note: (const char) is used to be compatible with the debug interfaces
458 * and macros such as __func__.
459 */
460#define ACPI_FUNCTION_NAME(name) static const char _acpi_function_name[] = #name;
461
462#else
463/* Compiler supports __func__ (or equivalent) -- Ignore this macro */
464
465#define ACPI_FUNCTION_NAME(name)
466#endif
467
468#ifdef CONFIG_ACPI_DEBUG_FUNC_TRACE 364#ifdef CONFIG_ACPI_DEBUG_FUNC_TRACE
469 365
470#define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \ 366#define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \
@@ -584,15 +480,6 @@ struct acpi_integer_overlay {
584#define ACPI_DUMP_RESOURCE_LIST(a) acpi_rs_dump_resource_list(a) 480#define ACPI_DUMP_RESOURCE_LIST(a) acpi_rs_dump_resource_list(a)
585#define ACPI_DUMP_BUFFER(a, b) acpi_ut_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT) 481#define ACPI_DUMP_BUFFER(a, b) acpi_ut_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT)
586 482
587/*
588 * Master debug print macros
589 * Print iff:
590 * 1) Debug print for the current component is enabled
591 * 2) Debug error level or trace level for the print statement is enabled
592 */
593#define ACPI_DEBUG_PRINT(plist) acpi_ut_debug_print plist
594#define ACPI_DEBUG_PRINT_RAW(plist) acpi_ut_debug_print_raw plist
595
596#else 483#else
597/* 484/*
598 * This is the non-debug case -- make everything go away, 485 * This is the non-debug case -- make everything go away,
@@ -603,7 +490,6 @@ struct acpi_integer_overlay {
603 490
604#define ACPI_DEBUG_DEFINE(a) do { } while(0) 491#define ACPI_DEBUG_DEFINE(a) do { } while(0)
605#define ACPI_DEBUG_ONLY_MEMBERS(a) do { } while(0) 492#define ACPI_DEBUG_ONLY_MEMBERS(a) do { } while(0)
606#define ACPI_FUNCTION_NAME(a) do { } while(0)
607#define ACPI_FUNCTION_TRACE(a) do { } while(0) 493#define ACPI_FUNCTION_TRACE(a) do { } while(0)
608#define ACPI_FUNCTION_TRACE_PTR(a, b) do { } while(0) 494#define ACPI_FUNCTION_TRACE_PTR(a, b) do { } while(0)
609#define ACPI_FUNCTION_TRACE_U32(a, b) do { } while(0) 495#define ACPI_FUNCTION_TRACE_U32(a, b) do { } while(0)
@@ -619,8 +505,6 @@ struct acpi_integer_overlay {
619#define ACPI_DUMP_PATHNAME(a, b, c, d) do { } while(0) 505#define ACPI_DUMP_PATHNAME(a, b, c, d) do { } while(0)
620#define ACPI_DUMP_RESOURCE_LIST(a) do { } while(0) 506#define ACPI_DUMP_RESOURCE_LIST(a) do { } while(0)
621#define ACPI_DUMP_BUFFER(a, b) do { } while(0) 507#define ACPI_DUMP_BUFFER(a, b) do { } while(0)
622#define ACPI_DEBUG_PRINT(pl) do { } while(0)
623#define ACPI_DEBUG_PRINT_RAW(pl) do { } while(0)
624 508
625#define return_VOID return 509#define return_VOID return
626#define return_ACPI_STATUS(s) return(s) 510#define return_ACPI_STATUS(s) return(s)
@@ -629,7 +513,7 @@ struct acpi_integer_overlay {
629#define return_UINT32(s) return(s) 513#define return_UINT32(s) return(s)
630#define return_PTR(s) return(s) 514#define return_PTR(s) return(s)
631 515
632#endif 516#endif /* ACPI_DEBUG_OUTPUT */
633 517
634/* 518/*
635 * Some code only gets executed when the debugger is built in. 519 * Some code only gets executed when the debugger is built in.
diff --git a/include/acpi/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index db4e6f677855..46cb5b46d280 100644
--- a/include/acpi/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -182,7 +182,9 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info);
182 */ 182 */
183acpi_status 183acpi_status
184acpi_ns_check_predefined_names(struct acpi_namespace_node *node, 184acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
185 union acpi_operand_object *return_object); 185 u32 user_param_count,
186 acpi_status return_status,
187 union acpi_operand_object **return_object);
186 188
187const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct 189const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
188 acpi_namespace_node 190 acpi_namespace_node
@@ -191,6 +193,7 @@ const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
191void 193void
192acpi_ns_check_parameter_count(char *pathname, 194acpi_ns_check_parameter_count(char *pathname,
193 struct acpi_namespace_node *node, 195 struct acpi_namespace_node *node,
196 u32 user_param_count,
194 const union acpi_predefined_info *info); 197 const union acpi_predefined_info *info);
195 198
196/* 199/*
diff --git a/include/acpi/acobject.h b/drivers/acpi/acpica/acobject.h
index eb6f038b03d9..eb6f038b03d9 100644
--- a/include/acpi/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
diff --git a/include/acpi/acopcode.h b/drivers/acpi/acpica/acopcode.h
index dfdf63327885..dfdf63327885 100644
--- a/include/acpi/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
diff --git a/include/acpi/acparser.h b/drivers/acpi/acpica/acparser.h
index 23ee0fbf5619..23ee0fbf5619 100644
--- a/include/acpi/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
diff --git a/include/acpi/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 16a9ca9a66e4..16a9ca9a66e4 100644
--- a/include/acpi/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
diff --git a/include/acpi/acresrc.h b/drivers/acpi/acpica/acresrc.h
index eef5bd7a59fa..eef5bd7a59fa 100644
--- a/include/acpi/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
diff --git a/include/acpi/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 7980a26bad35..7980a26bad35 100644
--- a/include/acpi/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
diff --git a/include/acpi/actables.h b/drivers/acpi/acpica/actables.h
index 0cbe1b9ab522..7ce6e33c7f78 100644
--- a/include/acpi/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -94,6 +94,8 @@ void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded);
94/* 94/*
95 * tbutils - table manager utilities 95 * tbutils - table manager utilities
96 */ 96 */
97acpi_status acpi_tb_initialize_facs(void);
98
97u8 acpi_tb_tables_loaded(void); 99u8 acpi_tb_tables_loaded(void);
98 100
99void 101void
diff --git a/include/acpi/acutils.h b/drivers/acpi/acpica/acutils.h
index d8307b2987e3..80d8813484fe 100644
--- a/include/acpi/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -297,42 +297,6 @@ void acpi_ut_report_info(char *module_name, u32 line_number);
297 297
298void acpi_ut_report_warning(char *module_name, u32 line_number); 298void acpi_ut_report_warning(char *module_name, u32 line_number);
299 299
300/* Error and message reporting interfaces */
301
302void ACPI_INTERNAL_VAR_XFACE
303acpi_ut_debug_print(u32 requested_debug_level,
304 u32 line_number,
305 const char *function_name,
306 const char *module_name,
307 u32 component_id,
308 const char *format, ...) ACPI_PRINTF_LIKE(6);
309
310void ACPI_INTERNAL_VAR_XFACE
311acpi_ut_debug_print_raw(u32 requested_debug_level,
312 u32 line_number,
313 const char *function_name,
314 const char *module_name,
315 u32 component_id,
316 const char *format, ...) ACPI_PRINTF_LIKE(6);
317
318void ACPI_INTERNAL_VAR_XFACE
319acpi_ut_error(const char *module_name,
320 u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
321
322void ACPI_INTERNAL_VAR_XFACE
323acpi_ut_exception(const char *module_name,
324 u32 line_number,
325 acpi_status status,
326 const char *format, ...) ACPI_PRINTF_LIKE(4);
327
328void ACPI_INTERNAL_VAR_XFACE
329acpi_ut_warning(const char *module_name,
330 u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
331
332void ACPI_INTERNAL_VAR_XFACE
333acpi_ut_info(const char *module_name,
334 u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
335
336/* 300/*
337 * utdelete - Object deletion and reference counts 301 * utdelete - Object deletion and reference counts
338 */ 302 */
diff --git a/include/acpi/amlcode.h b/drivers/acpi/acpica/amlcode.h
index ff851c5df698..ff851c5df698 100644
--- a/include/acpi/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
diff --git a/include/acpi/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 7b070e42b7c5..7b070e42b7c5 100644
--- a/include/acpi/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
diff --git a/drivers/acpi/dispatcher/dsfield.c b/drivers/acpi/acpica/dsfield.c
index f988a5e7d2b4..53e27bc5a734 100644
--- a/drivers/acpi/dispatcher/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -42,11 +42,12 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/amlcode.h> 45#include "accommon.h"
46#include <acpi/acdispat.h> 46#include "amlcode.h"
47#include <acpi/acinterp.h> 47#include "acdispat.h"
48#include <acpi/acnamesp.h> 48#include "acinterp.h"
49#include <acpi/acparser.h> 49#include "acnamesp.h"
50#include "acparser.h"
50 51
51#define _COMPONENT ACPI_DISPATCHER 52#define _COMPONENT ACPI_DISPATCHER
52ACPI_MODULE_NAME("dsfield") 53ACPI_MODULE_NAME("dsfield")
diff --git a/drivers/acpi/dispatcher/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 949f7c75029e..eb144b13d8fa 100644
--- a/drivers/acpi/dispatcher/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -42,9 +42,10 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acdispat.h> 45#include "accommon.h"
46#include <acpi/acnamesp.h> 46#include "acdispat.h"
47#include <acpi/actables.h> 47#include "acnamesp.h"
48#include "actables.h"
48 49
49#define _COMPONENT ACPI_DISPATCHER 50#define _COMPONENT ACPI_DISPATCHER
50ACPI_MODULE_NAME("dsinit") 51ACPI_MODULE_NAME("dsinit")
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 279a5a60a0dd..14b8b8ed8023 100644
--- a/drivers/acpi/dispatcher/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -42,11 +42,14 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/amlcode.h> 45#include "accommon.h"
46#include <acpi/acdispat.h> 46#include "amlcode.h"
47#include <acpi/acinterp.h> 47#include "acdispat.h"
48#include <acpi/acnamesp.h> 48#include "acinterp.h"
49#include "acnamesp.h"
50#ifdef ACPI_DISASSEMBLER
49#include <acpi/acdisasm.h> 51#include <acpi/acdisasm.h>
52#endif
50 53
51#define _COMPONENT ACPI_DISPATCHER 54#define _COMPONENT ACPI_DISPATCHER
52ACPI_MODULE_NAME("dsmethod") 55ACPI_MODULE_NAME("dsmethod")
@@ -412,6 +415,9 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
412 415
413 if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) { 416 if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
414 status = obj_desc->method.implementation(next_walk_state); 417 status = obj_desc->method.implementation(next_walk_state);
418 if (status == AE_OK) {
419 status = AE_CTRL_TERMINATE;
420 }
415 } 421 }
416 422
417 return_ACPI_STATUS(status); 423 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/dispatcher/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index d03f81bd1bcb..da0f5468184c 100644
--- a/drivers/acpi/dispatcher/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -42,9 +42,10 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acdispat.h> 45#include "accommon.h"
46#include <acpi/acnamesp.h> 46#include "acdispat.h"
47#include <acpi/acinterp.h> 47#include "acnamesp.h"
48#include "acinterp.h"
48 49
49#define _COMPONENT ACPI_DISPATCHER 50#define _COMPONENT ACPI_DISPATCHER
50ACPI_MODULE_NAME("dsmthdat") 51ACPI_MODULE_NAME("dsmthdat")
diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 4f08e599d07e..15c628e6aa00 100644
--- a/drivers/acpi/dispatcher/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -42,11 +42,12 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acparser.h> 45#include "accommon.h"
46#include <acpi/amlcode.h> 46#include "acparser.h"
47#include <acpi/acdispat.h> 47#include "amlcode.h"
48#include <acpi/acnamesp.h> 48#include "acdispat.h"
49#include <acpi/acinterp.h> 49#include "acnamesp.h"
50#include "acinterp.h"
50 51
51#define _COMPONENT ACPI_DISPATCHER 52#define _COMPONENT ACPI_DISPATCHER
52ACPI_MODULE_NAME("dsobject") 53ACPI_MODULE_NAME("dsobject")
diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 69fae5905bb8..0c3b4dd60e8a 100644
--- a/drivers/acpi/dispatcher/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -43,13 +43,14 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acparser.h> 46#include "accommon.h"
47#include <acpi/amlcode.h> 47#include "acparser.h"
48#include <acpi/acdispat.h> 48#include "amlcode.h"
49#include <acpi/acinterp.h> 49#include "acdispat.h"
50#include <acpi/acnamesp.h> 50#include "acinterp.h"
51#include <acpi/acevents.h> 51#include "acnamesp.h"
52#include <acpi/actables.h> 52#include "acevents.h"
53#include "actables.h"
53 54
54#define _COMPONENT ACPI_DISPATCHER 55#define _COMPONENT ACPI_DISPATCHER
55ACPI_MODULE_NAME("dsopcode") 56ACPI_MODULE_NAME("dsopcode")
@@ -1140,10 +1141,29 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
1140 op->common.aml_opcode, walk_state)); 1141 op->common.aml_opcode, walk_state));
1141 1142
1142 switch (op->common.aml_opcode) { 1143 switch (op->common.aml_opcode) {
1143 case AML_IF_OP:
1144 case AML_WHILE_OP: 1144 case AML_WHILE_OP:
1145 1145
1146 /* 1146 /*
1147 * If this is an additional iteration of a while loop, continue.
1148 * There is no need to allocate a new control state.
1149 */
1150 if (walk_state->control_state) {
1151 if (walk_state->control_state->control.aml_predicate_start
1152 == (walk_state->parser_state.aml - 1)) {
1153
1154 /* Reset the state to start-of-loop */
1155
1156 walk_state->control_state->common.state =
1157 ACPI_CONTROL_CONDITIONAL_EXECUTING;
1158 break;
1159 }
1160 }
1161
1162 /*lint -fallthrough */
1163
1164 case AML_IF_OP:
1165
1166 /*
1147 * IF/WHILE: Create a new control state to manage these 1167 * IF/WHILE: Create a new control state to manage these
1148 * constructs. We need to manage these as a stack, in order 1168 * constructs. We need to manage these as a stack, in order
1149 * to handle nesting. 1169 * to handle nesting.
@@ -1243,13 +1263,36 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
1243 1263
1244 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] Op=%p\n", op)); 1264 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] Op=%p\n", op));
1245 1265
1246 if (walk_state->control_state->common.value) { 1266 control_state = walk_state->control_state;
1267 if (control_state->common.value) {
1247 1268
1248 /* Predicate was true, go back and evaluate it again! */ 1269 /* Predicate was true, the body of the loop was just executed */
1249 1270
1271 /*
1272 * This loop counter mechanism allows the interpreter to escape
1273 * possibly infinite loops. This can occur in poorly written AML
1274 * when the hardware does not respond within a while loop and the
1275 * loop does not implement a timeout.
1276 */
1277 control_state->control.loop_count++;
1278 if (control_state->control.loop_count >
1279 ACPI_MAX_LOOP_ITERATIONS) {
1280 status = AE_AML_INFINITE_LOOP;
1281 break;
1282 }
1283
1284 /*
1285 * Go back and evaluate the predicate and maybe execute the loop
1286 * another time
1287 */
1250 status = AE_CTRL_PENDING; 1288 status = AE_CTRL_PENDING;
1289 walk_state->aml_last_while =
1290 control_state->control.aml_predicate_start;
1291 break;
1251 } 1292 }
1252 1293
1294 /* Predicate was false, terminate this while loop */
1295
1253 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 1296 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
1254 "[WHILE_OP] termination! Op=%p\n", op)); 1297 "[WHILE_OP] termination! Op=%p\n", op));
1255 1298
@@ -1257,9 +1300,6 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
1257 1300
1258 control_state = 1301 control_state =
1259 acpi_ut_pop_generic_state(&walk_state->control_state); 1302 acpi_ut_pop_generic_state(&walk_state->control_state);
1260
1261 walk_state->aml_last_while =
1262 control_state->control.aml_predicate_start;
1263 acpi_ut_delete_generic_state(control_state); 1303 acpi_ut_delete_generic_state(control_state);
1264 break; 1304 break;
1265 1305
diff --git a/drivers/acpi/dispatcher/dsutils.c b/drivers/acpi/acpica/dsutils.c
index b398982f0d8b..dabc23a46176 100644
--- a/drivers/acpi/dispatcher/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -42,12 +42,13 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acparser.h> 45#include "accommon.h"
46#include <acpi/amlcode.h> 46#include "acparser.h"
47#include <acpi/acdispat.h> 47#include "amlcode.h"
48#include <acpi/acinterp.h> 48#include "acdispat.h"
49#include <acpi/acnamesp.h> 49#include "acinterp.h"
50#include <acpi/acdebug.h> 50#include "acnamesp.h"
51#include "acdebug.h"
51 52
52#define _COMPONENT ACPI_DISPATCHER 53#define _COMPONENT ACPI_DISPATCHER
53ACPI_MODULE_NAME("dsutils") 54ACPI_MODULE_NAME("dsutils")
diff --git a/drivers/acpi/dispatcher/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 396fe12078cd..350e6656bc89 100644
--- a/drivers/acpi/dispatcher/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -43,12 +43,13 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acparser.h> 46#include "accommon.h"
47#include <acpi/amlcode.h> 47#include "acparser.h"
48#include <acpi/acdispat.h> 48#include "amlcode.h"
49#include <acpi/acinterp.h> 49#include "acdispat.h"
50#include <acpi/acnamesp.h> 50#include "acinterp.h"
51#include <acpi/acdebug.h> 51#include "acnamesp.h"
52#include "acdebug.h"
52 53
53#define _COMPONENT ACPI_DISPATCHER 54#define _COMPONENT ACPI_DISPATCHER
54ACPI_MODULE_NAME("dswexec") 55ACPI_MODULE_NAME("dswexec")
diff --git a/drivers/acpi/dispatcher/dswload.c b/drivers/acpi/acpica/dswload.c
index dff7a3e445a8..3023ceaa8d54 100644
--- a/drivers/acpi/dispatcher/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -42,12 +42,13 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acparser.h> 45#include "accommon.h"
46#include <acpi/amlcode.h> 46#include "acparser.h"
47#include <acpi/acdispat.h> 47#include "amlcode.h"
48#include <acpi/acinterp.h> 48#include "acdispat.h"
49#include <acpi/acnamesp.h> 49#include "acinterp.h"
50#include <acpi/acevents.h> 50#include "acnamesp.h"
51#include "acevents.h"
51 52
52#ifdef ACPI_ASL_COMPILER 53#ifdef ACPI_ASL_COMPILER
53#include <acpi/acdisasm.h> 54#include <acpi/acdisasm.h>
diff --git a/drivers/acpi/dispatcher/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 9e6073265873..908645e72f03 100644
--- a/drivers/acpi/dispatcher/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acdispat.h> 45#include "accommon.h"
46#include "acdispat.h"
46 47
47#define _COMPONENT ACPI_DISPATCHER 48#define _COMPONENT ACPI_DISPATCHER
48ACPI_MODULE_NAME("dswscope") 49ACPI_MODULE_NAME("dswscope")
diff --git a/drivers/acpi/dispatcher/dswstate.c b/drivers/acpi/acpica/dswstate.c
index b00d4af791aa..40f92bf7dce5 100644
--- a/drivers/acpi/dispatcher/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -42,9 +42,10 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acparser.h> 45#include "accommon.h"
46#include <acpi/acdispat.h> 46#include "acparser.h"
47#include <acpi/acnamesp.h> 47#include "acdispat.h"
48#include "acnamesp.h"
48 49
49#define _COMPONENT ACPI_DISPATCHER 50#define _COMPONENT ACPI_DISPATCHER
50ACPI_MODULE_NAME("dswstate") 51ACPI_MODULE_NAME("dswstate")
diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/acpica/evevent.c
index c56c5c6ea77b..803edd9e3f6a 100644
--- a/drivers/acpi/events/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acevents.h> 45#include "accommon.h"
46#include "acevents.h"
46 47
47#define _COMPONENT ACPI_EVENTS 48#define _COMPONENT ACPI_EVENTS
48ACPI_MODULE_NAME("evevent") 49ACPI_MODULE_NAME("evevent")
@@ -72,8 +73,8 @@ acpi_status acpi_ev_initialize_events(void)
72 73
73 /* 74 /*
74 * Initialize the Fixed and General Purpose Events. This is done prior to 75 * Initialize the Fixed and General Purpose Events. This is done prior to
75 * enabling SCIs to prevent interrupts from occurring before the handlers are 76 * enabling SCIs to prevent interrupts from occurring before the handlers
76 * installed. 77 * are installed.
77 */ 78 */
78 status = acpi_ev_fixed_event_initialize(); 79 status = acpi_ev_fixed_event_initialize();
79 if (ACPI_FAILURE(status)) { 80 if (ACPI_FAILURE(status)) {
@@ -192,8 +193,8 @@ static acpi_status acpi_ev_fixed_event_initialize(void)
192 acpi_status status; 193 acpi_status status;
193 194
194 /* 195 /*
195 * Initialize the structure that keeps track of fixed event handlers 196 * Initialize the structure that keeps track of fixed event handlers and
196 * and enable the fixed events. 197 * enable the fixed events.
197 */ 198 */
198 for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { 199 for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
199 acpi_gbl_fixed_event_handlers[i].handler = NULL; 200 acpi_gbl_fixed_event_handlers[i].handler = NULL;
@@ -237,7 +238,7 @@ u32 acpi_ev_fixed_event_detect(void)
237 238
238 /* 239 /*
239 * Read the fixed feature status and enable registers, as all the cases 240 * Read the fixed feature status and enable registers, as all the cases
240 * depend on their values. Ignore errors here. 241 * depend on their values. Ignore errors here.
241 */ 242 */
242 (void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status); 243 (void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
243 (void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable); 244 (void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
@@ -291,8 +292,8 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
291 status_register_id, 1); 292 status_register_id, 1);
292 293
293 /* 294 /*
294 * Make sure we've got a handler. If not, report an error. 295 * Make sure we've got a handler. If not, report an error. The event is
295 * The event is disabled to prevent further interrupts. 296 * disabled to prevent further interrupts.
296 */ 297 */
297 if (NULL == acpi_gbl_fixed_event_handlers[event].handler) { 298 if (NULL == acpi_gbl_fixed_event_handlers[event].handler) {
298 (void)acpi_set_register(acpi_gbl_fixed_event_info[event]. 299 (void)acpi_set_register(acpi_gbl_fixed_event_info[event].
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/acpica/evgpe.c
index f45c74fe745e..f345ced36477 100644
--- a/drivers/acpi/events/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -42,8 +42,9 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acevents.h> 45#include "accommon.h"
46#include <acpi/acnamesp.h> 46#include "acevents.h"
47#include "acnamesp.h"
47 48
48#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
49ACPI_MODULE_NAME("evgpe") 50ACPI_MODULE_NAME("evgpe")
@@ -125,7 +126,7 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
125 (1 << 126 (1 <<
126 (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number)); 127 (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
127 128
128 /* 1) Disable case. Simply clear all enable bits */ 129 /* 1) Disable case. Simply clear all enable bits */
129 130
130 if (type == ACPI_GPE_DISABLE) { 131 if (type == ACPI_GPE_DISABLE) {
131 ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, 132 ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
@@ -134,7 +135,7 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
134 return_ACPI_STATUS(AE_OK); 135 return_ACPI_STATUS(AE_OK);
135 } 136 }
136 137
137 /* 2) Enable case. Set/Clear the appropriate enable bits */ 138 /* 2) Enable case. Set/Clear the appropriate enable bits */
138 139
139 switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { 140 switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
140 case ACPI_GPE_TYPE_WAKE: 141 case ACPI_GPE_TYPE_WAKE:
@@ -295,7 +296,7 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
295 * 296 *
296 * FUNCTION: acpi_ev_get_gpe_event_info 297 * FUNCTION: acpi_ev_get_gpe_event_info
297 * 298 *
298 * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 299 * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1
299 * gpe_number - Raw GPE number 300 * gpe_number - Raw GPE number
300 * 301 *
301 * RETURN: A GPE event_info struct. NULL if not a valid GPE 302 * RETURN: A GPE event_info struct. NULL if not a valid GPE
@@ -372,7 +373,7 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
372 * 373 *
373 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED 374 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
374 * 375 *
375 * DESCRIPTION: Detect if any GP events have occurred. This function is 376 * DESCRIPTION: Detect if any GP events have occurred. This function is
376 * executed at interrupt level. 377 * executed at interrupt level.
377 * 378 *
378 ******************************************************************************/ 379 ******************************************************************************/
@@ -400,8 +401,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
400 401
401 /* 402 /*
402 * We need to obtain the GPE lock for both the data structs and registers 403 * We need to obtain the GPE lock for both the data structs and registers
403 * Note: Not necessary to obtain the hardware lock, since the GPE registers 404 * Note: Not necessary to obtain the hardware lock, since the GPE
404 * are owned by the gpe_lock. 405 * registers are owned by the gpe_lock.
405 */ 406 */
406 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 407 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
407 408
@@ -410,9 +411,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
410 gpe_block = gpe_xrupt_list->gpe_block_list_head; 411 gpe_block = gpe_xrupt_list->gpe_block_list_head;
411 while (gpe_block) { 412 while (gpe_block) {
412 /* 413 /*
413 * Read all of the 8-bit GPE status and enable registers 414 * Read all of the 8-bit GPE status and enable registers in this GPE
414 * in this GPE block, saving all of them. 415 * block, saving all of them. Find all currently active GP events.
415 * Find all currently active GP events.
416 */ 416 */
417 for (i = 0; i < gpe_block->register_count; i++) { 417 for (i = 0; i < gpe_block->register_count; i++) {
418 418
@@ -423,10 +423,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
423 /* Read the Status Register */ 423 /* Read the Status Register */
424 424
425 status = 425 status =
426 acpi_hw_low_level_read(ACPI_GPE_REGISTER_WIDTH, 426 acpi_read(&status_reg,
427 &status_reg, 427 &gpe_register_info->status_address);
428 &gpe_register_info->
429 status_address);
430 if (ACPI_FAILURE(status)) { 428 if (ACPI_FAILURE(status)) {
431 goto unlock_and_exit; 429 goto unlock_and_exit;
432 } 430 }
@@ -434,10 +432,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
434 /* Read the Enable Register */ 432 /* Read the Enable Register */
435 433
436 status = 434 status =
437 acpi_hw_low_level_read(ACPI_GPE_REGISTER_WIDTH, 435 acpi_read(&enable_reg,
438 &enable_reg, 436 &gpe_register_info->enable_address);
439 &gpe_register_info->
440 enable_address);
441 if (ACPI_FAILURE(status)) { 437 if (ACPI_FAILURE(status)) {
442 goto unlock_and_exit; 438 goto unlock_and_exit;
443 } 439 }
@@ -527,8 +523,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
527 (void)acpi_ev_enable_gpe(gpe_event_info, FALSE); 523 (void)acpi_ev_enable_gpe(gpe_event_info, FALSE);
528 524
529 /* 525 /*
530 * Take a snapshot of the GPE info for this level - we copy the 526 * Take a snapshot of the GPE info for this level - we copy the info to
531 * info to prevent a race condition with remove_handler/remove_block. 527 * prevent a race condition with remove_handler/remove_block.
532 */ 528 */
533 ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info, 529 ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info,
534 sizeof(struct acpi_gpe_event_info)); 530 sizeof(struct acpi_gpe_event_info));
@@ -539,8 +535,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
539 } 535 }
540 536
541 /* 537 /*
542 * Must check for control method type dispatch one more 538 * Must check for control method type dispatch one more time to avoid a
543 * time to avoid race with ev_gpe_install_handler 539 * race with ev_gpe_install_handler
544 */ 540 */
545 if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) == 541 if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) ==
546 ACPI_GPE_DISPATCH_METHOD) { 542 ACPI_GPE_DISPATCH_METHOD) {
@@ -584,8 +580,8 @@ static void acpi_ev_asynch_enable_gpe(void *context)
584 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == 580 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
585 ACPI_GPE_LEVEL_TRIGGERED) { 581 ACPI_GPE_LEVEL_TRIGGERED) {
586 /* 582 /*
587 * GPE is level-triggered, we clear the GPE status bit after 583 * GPE is level-triggered, we clear the GPE status bit after handling
588 * handling the event. 584 * the event.
589 */ 585 */
590 status = acpi_hw_clear_gpe(gpe_event_info); 586 status = acpi_hw_clear_gpe(gpe_event_info);
591 if (ACPI_FAILURE(status)) { 587 if (ACPI_FAILURE(status)) {
@@ -624,7 +620,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
624 acpi_os_gpe_count(gpe_number); 620 acpi_os_gpe_count(gpe_number);
625 621
626 /* 622 /*
627 * If edge-triggered, clear the GPE status bit now. Note that 623 * If edge-triggered, clear the GPE status bit now. Note that
628 * level-triggered events are cleared after the GPE is serviced. 624 * level-triggered events are cleared after the GPE is serviced.
629 */ 625 */
630 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == 626 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
@@ -650,7 +646,8 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
650 646
651 /* 647 /*
652 * Invoke the installed handler (at interrupt level) 648 * Invoke the installed handler (at interrupt level)
653 * Ignore return status for now. TBD: leave GPE disabled on error? 649 * Ignore return status for now.
650 * TBD: leave GPE disabled on error?
654 */ 651 */
655 (void)gpe_event_info->dispatch.handler->address(gpe_event_info-> 652 (void)gpe_event_info->dispatch.handler->address(gpe_event_info->
656 dispatch. 653 dispatch.
@@ -708,7 +705,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
708 gpe_number)); 705 gpe_number));
709 706
710 /* 707 /*
711 * Disable the GPE. The GPE will remain disabled until the ACPI 708 * Disable the GPE. The GPE will remain disabled until the ACPICA
712 * Core Subsystem is restarted, or a handler is installed. 709 * Core Subsystem is restarted, or a handler is installed.
713 */ 710 */
714 status = acpi_ev_disable_gpe(gpe_event_info); 711 status = acpi_ev_disable_gpe(gpe_event_info);
diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 73c058e2f5c2..484cc0565d5b 100644
--- a/drivers/acpi/events/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -42,8 +42,9 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acevents.h> 45#include "accommon.h"
46#include <acpi/acnamesp.h> 46#include "acevents.h"
47#include "acnamesp.h"
47 48
48#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
49ACPI_MODULE_NAME("evgpeblk") 50ACPI_MODULE_NAME("evgpeblk")
@@ -124,6 +125,7 @@ u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
124 * FUNCTION: acpi_ev_walk_gpe_list 125 * FUNCTION: acpi_ev_walk_gpe_list
125 * 126 *
126 * PARAMETERS: gpe_walk_callback - Routine called for each GPE block 127 * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
128 * Context - Value passed to callback
127 * 129 *
128 * RETURN: Status 130 * RETURN: Status
129 * 131 *
@@ -131,7 +133,8 @@ u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
131 * 133 *
132 ******************************************************************************/ 134 ******************************************************************************/
133 135
134acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback) 136acpi_status
137acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
135{ 138{
136 struct acpi_gpe_block_info *gpe_block; 139 struct acpi_gpe_block_info *gpe_block;
137 struct acpi_gpe_xrupt_info *gpe_xrupt_info; 140 struct acpi_gpe_xrupt_info *gpe_xrupt_info;
@@ -154,8 +157,13 @@ acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback)
154 157
155 /* One callback per GPE block */ 158 /* One callback per GPE block */
156 159
157 status = gpe_walk_callback(gpe_xrupt_info, gpe_block); 160 status =
161 gpe_walk_callback(gpe_xrupt_info, gpe_block,
162 context);
158 if (ACPI_FAILURE(status)) { 163 if (ACPI_FAILURE(status)) {
164 if (status == AE_CTRL_END) { /* Callback abort */
165 status = AE_OK;
166 }
159 goto unlock_and_exit; 167 goto unlock_and_exit;
160 } 168 }
161 169
@@ -186,7 +194,8 @@ acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback)
186 194
187acpi_status 195acpi_status
188acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 196acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
189 struct acpi_gpe_block_info *gpe_block) 197 struct acpi_gpe_block_info *gpe_block,
198 void *context)
190{ 199{
191 struct acpi_gpe_event_info *gpe_event_info; 200 struct acpi_gpe_event_info *gpe_event_info;
192 u32 i; 201 u32 i;
@@ -309,17 +318,17 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
309 (gpe_block->block_base_number + 318 (gpe_block->block_base_number +
310 (gpe_block->register_count * 8)))) { 319 (gpe_block->register_count * 8)))) {
311 /* 320 /*
312 * Not valid for this GPE block, just ignore it 321 * Not valid for this GPE block, just ignore it. However, it may be
313 * However, it may be valid for a different GPE block, since GPE0 and GPE1 322 * valid for a different GPE block, since GPE0 and GPE1 methods both
314 * methods both appear under \_GPE. 323 * appear under \_GPE.
315 */ 324 */
316 return_ACPI_STATUS(AE_OK); 325 return_ACPI_STATUS(AE_OK);
317 } 326 }
318 327
319 /* 328 /*
320 * Now we can add this information to the gpe_event_info block 329 * Now we can add this information to the gpe_event_info block for use
321 * for use during dispatch of this GPE. Default type is RUNTIME, although 330 * during dispatch of this GPE. Default type is RUNTIME, although this may
322 * this may change when the _PRW methods are executed later. 331 * change when the _PRW methods are executed later.
323 */ 332 */
324 gpe_event_info = 333 gpe_event_info =
325 &gpe_block->event_info[gpe_number - gpe_block->block_base_number]; 334 &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
@@ -394,8 +403,8 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
394 gpe_block = gpe_info->gpe_block; 403 gpe_block = gpe_info->gpe_block;
395 404
396 /* 405 /*
397 * The _PRW object must return a package, we are only interested 406 * The _PRW object must return a package, we are only interested in the
398 * in the first element 407 * first element
399 */ 408 */
400 obj_desc = pkg_desc->package.elements[0]; 409 obj_desc = pkg_desc->package.elements[0];
401 410
@@ -434,7 +443,7 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
434 /* 443 /*
435 * Is this GPE within this block? 444 * Is this GPE within this block?
436 * 445 *
437 * TRUE iff these conditions are true: 446 * TRUE if and only if these conditions are true:
438 * 1) The GPE devices match. 447 * 1) The GPE devices match.
439 * 2) The GPE index(number) is within the range of the Gpe Block 448 * 2) The GPE index(number) is within the range of the Gpe Block
440 * associated with the GPE device. 449 * associated with the GPE device.
@@ -457,6 +466,7 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
457 if (ACPI_FAILURE(status)) { 466 if (ACPI_FAILURE(status)) {
458 goto cleanup; 467 goto cleanup;
459 } 468 }
469
460 status = 470 status =
461 acpi_ev_update_gpe_enable_masks(gpe_event_info, 471 acpi_ev_update_gpe_enable_masks(gpe_event_info,
462 ACPI_GPE_DISABLE); 472 ACPI_GPE_DISABLE);
@@ -476,9 +486,9 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
476 * RETURN: A GPE interrupt block 486 * RETURN: A GPE interrupt block
477 * 487 *
478 * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt 488 * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
479 * block per unique interrupt level used for GPEs. 489 * block per unique interrupt level used for GPEs. Should be
480 * Should be called only when the GPE lists are semaphore locked 490 * called only when the GPE lists are semaphore locked and not
481 * and not subject to change. 491 * subject to change.
482 * 492 *
483 ******************************************************************************/ 493 ******************************************************************************/
484 494
@@ -608,8 +618,9 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
608 * 618 *
609 * FUNCTION: acpi_ev_install_gpe_block 619 * FUNCTION: acpi_ev_install_gpe_block
610 * 620 *
611 * PARAMETERS: gpe_block - New GPE block 621 * PARAMETERS: gpe_block - New GPE block
612 * interrupt_number - Xrupt to be associated with this GPE block 622 * interrupt_number - Xrupt to be associated with this
623 * GPE block
613 * 624 *
614 * RETURN: Status 625 * RETURN: Status
615 * 626 *
@@ -666,7 +677,7 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
666 * 677 *
667 * FUNCTION: acpi_ev_delete_gpe_block 678 * FUNCTION: acpi_ev_delete_gpe_block
668 * 679 *
669 * PARAMETERS: gpe_block - Existing GPE block 680 * PARAMETERS: gpe_block - Existing GPE block
670 * 681 *
671 * RETURN: Status 682 * RETURN: Status
672 * 683 *
@@ -688,7 +699,8 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
688 699
689 /* Disable all GPEs in this block */ 700 /* Disable all GPEs in this block */
690 701
691 status = acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block); 702 status =
703 acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL);
692 704
693 if (!gpe_block->previous && !gpe_block->next) { 705 if (!gpe_block->previous && !gpe_block->next) {
694 706
@@ -715,6 +727,9 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
715 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 727 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
716 } 728 }
717 729
730 acpi_current_gpe_count -=
731 gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH;
732
718 /* Free the gpe_block */ 733 /* Free the gpe_block */
719 734
720 ACPI_FREE(gpe_block->register_info); 735 ACPI_FREE(gpe_block->register_info);
@@ -786,9 +801,9 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
786 801
787 /* 802 /*
788 * Initialize the GPE Register and Event structures. A goal of these 803 * Initialize the GPE Register and Event structures. A goal of these
789 * tables is to hide the fact that there are two separate GPE register sets 804 * tables is to hide the fact that there are two separate GPE register
790 * in a given GPE hardware block, the status registers occupy the first half, 805 * sets in a given GPE hardware block, the status registers occupy the
791 * and the enable registers occupy the second half. 806 * first half, and the enable registers occupy the second half.
792 */ 807 */
793 this_register = gpe_register_info; 808 this_register = gpe_register_info;
794 this_event = gpe_event_info; 809 this_event = gpe_event_info;
@@ -816,10 +831,8 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
816 ACPI_GPE_REGISTER_WIDTH; 831 ACPI_GPE_REGISTER_WIDTH;
817 this_register->enable_address.bit_width = 832 this_register->enable_address.bit_width =
818 ACPI_GPE_REGISTER_WIDTH; 833 ACPI_GPE_REGISTER_WIDTH;
819 this_register->status_address.bit_offset = 834 this_register->status_address.bit_offset = 0;
820 ACPI_GPE_REGISTER_WIDTH; 835 this_register->enable_address.bit_offset = 0;
821 this_register->enable_address.bit_offset =
822 ACPI_GPE_REGISTER_WIDTH;
823 836
824 /* Init the event_info for each GPE within this register */ 837 /* Init the event_info for each GPE within this register */
825 838
@@ -832,18 +845,14 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
832 845
833 /* Disable all GPEs within this register */ 846 /* Disable all GPEs within this register */
834 847
835 status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0x00, 848 status = acpi_write(0x00, &this_register->enable_address);
836 &this_register->
837 enable_address);
838 if (ACPI_FAILURE(status)) { 849 if (ACPI_FAILURE(status)) {
839 goto error_exit; 850 goto error_exit;
840 } 851 }
841 852
842 /* Clear any pending GPE events within this register */ 853 /* Clear any pending GPE events within this register */
843 854
844 status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0xFF, 855 status = acpi_write(0xFF, &this_register->status_address);
845 &this_register->
846 status_address);
847 if (ACPI_FAILURE(status)) { 856 if (ACPI_FAILURE(status)) {
848 goto error_exit; 857 goto error_exit;
849 } 858 }
@@ -956,6 +965,9 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
956 gpe_device->name.ascii, gpe_block->register_count, 965 gpe_device->name.ascii, gpe_block->register_count,
957 interrupt_number)); 966 interrupt_number));
958 967
968 /* Update global count of currently available GPEs */
969
970 acpi_current_gpe_count += register_count * ACPI_GPE_REGISTER_WIDTH;
959 return_ACPI_STATUS(AE_OK); 971 return_ACPI_STATUS(AE_OK);
960} 972}
961 973
@@ -1055,7 +1067,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
1055 1067
1056 /* Enable all valid runtime GPEs found above */ 1068 /* Enable all valid runtime GPEs found above */
1057 1069
1058 status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block); 1070 status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block, NULL);
1059 if (ACPI_FAILURE(status)) { 1071 if (ACPI_FAILURE(status)) {
1060 ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p", 1072 ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p",
1061 gpe_block)); 1073 gpe_block));
diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 1d5670be729a..5f893057bcc6 100644
--- a/drivers/acpi/events/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -42,18 +42,15 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acevents.h> 45#include "accommon.h"
46#include <acpi/acnamesp.h> 46#include "acevents.h"
47#include <acpi/acinterp.h> 47#include "acnamesp.h"
48#include "acinterp.h"
48 49
49#define _COMPONENT ACPI_EVENTS 50#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evmisc") 51ACPI_MODULE_NAME("evmisc")
51 52
52/* Pointer to FACS needed for the Global Lock */
53static struct acpi_table_facs *facs = NULL;
54
55/* Local prototypes */ 53/* Local prototypes */
56
57static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context); 54static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
58 55
59static u32 acpi_ev_global_lock_handler(void *context); 56static u32 acpi_ev_global_lock_handler(void *context);
@@ -152,7 +149,9 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
152 break; 149 break;
153 150
154 default: 151 default:
152
155 /* All other types are not supported */ 153 /* All other types are not supported */
154
156 return (AE_TYPE); 155 return (AE_TYPE);
157 } 156 }
158 } 157 }
@@ -193,9 +192,8 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
193 acpi_ut_delete_generic_state(notify_info); 192 acpi_ut_delete_generic_state(notify_info);
194 } 193 }
195 } else { 194 } else {
196 /* 195 /* There is no notify handler (per-device or system) for this device */
197 * There is no notify handler (per-device or system) for this device. 196
198 */
199 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 197 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
200 "No notify handler for Notify (%4.4s, %X) node %p\n", 198 "No notify handler for Notify (%4.4s, %X) node %p\n",
201 acpi_ut_get_node_name(node), notify_value, 199 acpi_ut_get_node_name(node), notify_value,
@@ -229,9 +227,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
229 ACPI_FUNCTION_ENTRY(); 227 ACPI_FUNCTION_ENTRY();
230 228
231 /* 229 /*
232 * We will invoke a global notify handler if installed. 230 * We will invoke a global notify handler if installed. This is done
233 * This is done _before_ we invoke the per-device handler attached 231 * _before_ we invoke the per-device handler attached to the device.
234 * to the device.
235 */ 232 */
236 if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) { 233 if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) {
237 234
@@ -299,7 +296,7 @@ static u32 acpi_ev_global_lock_handler(void *context)
299 * If we don't get it now, it will be marked pending and we will 296 * If we don't get it now, it will be marked pending and we will
300 * take another interrupt when it becomes free. 297 * take another interrupt when it becomes free.
301 */ 298 */
302 ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired); 299 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
303 if (acquired) { 300 if (acquired) {
304 301
305 /* Got the lock, now wake all threads waiting for it */ 302 /* Got the lock, now wake all threads waiting for it */
@@ -336,34 +333,27 @@ acpi_status acpi_ev_init_global_lock_handler(void)
336 333
337 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler); 334 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
338 335
339 status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS, 336 /* Attempt installation of the global lock handler */
340 ACPI_CAST_INDIRECT_PTR(struct
341 acpi_table_header,
342 &facs));
343 if (ACPI_FAILURE(status)) {
344 return_ACPI_STATUS(status);
345 }
346 337
347 acpi_gbl_global_lock_present = TRUE;
348 status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL, 338 status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
349 acpi_ev_global_lock_handler, 339 acpi_ev_global_lock_handler,
350 NULL); 340 NULL);
351 341
352 /* 342 /*
353 * If the global lock does not exist on this platform, the attempt 343 * If the global lock does not exist on this platform, the attempt to
354 * to enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick) 344 * enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick).
355 * Map to AE_OK, but mark global lock as not present. 345 * Map to AE_OK, but mark global lock as not present. Any attempt to
356 * Any attempt to actually use the global lock will be flagged 346 * actually use the global lock will be flagged with an error.
357 * with an error.
358 */ 347 */
359 if (status == AE_NO_HARDWARE_RESPONSE) { 348 if (status == AE_NO_HARDWARE_RESPONSE) {
360 ACPI_ERROR((AE_INFO, 349 ACPI_ERROR((AE_INFO,
361 "No response from Global Lock hardware, disabling lock")); 350 "No response from Global Lock hardware, disabling lock"));
362 351
363 acpi_gbl_global_lock_present = FALSE; 352 acpi_gbl_global_lock_present = FALSE;
364 status = AE_OK; 353 return_ACPI_STATUS(AE_OK);
365 } 354 }
366 355
356 acpi_gbl_global_lock_present = TRUE;
367 return_ACPI_STATUS(status); 357 return_ACPI_STATUS(status);
368} 358}
369 359
@@ -462,8 +452,8 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
462 } 452 }
463 453
464 /* 454 /*
465 * Make sure that a global lock actually exists. If not, just treat 455 * Make sure that a global lock actually exists. If not, just treat the
466 * the lock as a standard mutex. 456 * lock as a standard mutex.
467 */ 457 */
468 if (!acpi_gbl_global_lock_present) { 458 if (!acpi_gbl_global_lock_present) {
469 acpi_gbl_global_lock_acquired = TRUE; 459 acpi_gbl_global_lock_acquired = TRUE;
@@ -472,7 +462,7 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
472 462
473 /* Attempt to acquire the actual hardware lock */ 463 /* Attempt to acquire the actual hardware lock */
474 464
475 ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired); 465 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
476 if (acquired) { 466 if (acquired) {
477 467
478 /* We got the lock */ 468 /* We got the lock */
@@ -536,7 +526,7 @@ acpi_status acpi_ev_release_global_lock(void)
536 526
537 /* Allow any thread to release the lock */ 527 /* Allow any thread to release the lock */
538 528
539 ACPI_RELEASE_GLOBAL_LOCK(facs, pending); 529 ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending);
540 530
541 /* 531 /*
542 * If the pending bit was set, we must write GBL_RLS to the control 532 * If the pending bit was set, we must write GBL_RLS to the control
@@ -582,8 +572,8 @@ void acpi_ev_terminate(void)
582 572
583 if (acpi_gbl_events_initialized) { 573 if (acpi_gbl_events_initialized) {
584 /* 574 /*
585 * Disable all event-related functionality. 575 * Disable all event-related functionality. In all cases, on error,
586 * In all cases, on error, print a message but obviously we don't abort. 576 * print a message but obviously we don't abort.
587 */ 577 */
588 578
589 /* Disable all fixed events */ 579 /* Disable all fixed events */
@@ -599,7 +589,7 @@ void acpi_ev_terminate(void)
599 589
600 /* Disable all GPEs in all GPE blocks */ 590 /* Disable all GPEs in all GPE blocks */
601 591
602 status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block); 592 status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL);
603 593
604 /* Remove SCI handler */ 594 /* Remove SCI handler */
605 595
@@ -617,7 +607,7 @@ void acpi_ev_terminate(void)
617 607
618 /* Deallocate all handler objects installed within GPE info structs */ 608 /* Deallocate all handler objects installed within GPE info structs */
619 609
620 status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers); 610 status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL);
621 611
622 /* Return to original mode if necessary */ 612 /* Return to original mode if necessary */
623 613
diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/acpica/evregion.c
index 236fbd1ca438..665c0887ab4d 100644
--- a/drivers/acpi/events/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -42,22 +42,15 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acevents.h> 45#include "accommon.h"
46#include <acpi/acnamesp.h> 46#include "acevents.h"
47#include <acpi/acinterp.h> 47#include "acnamesp.h"
48#include "acinterp.h"
48 49
49#define _COMPONENT ACPI_EVENTS 50#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evregion") 51ACPI_MODULE_NAME("evregion")
51#define ACPI_NUM_DEFAULT_SPACES 4
52static u8 acpi_gbl_default_address_spaces[ACPI_NUM_DEFAULT_SPACES] = {
53 ACPI_ADR_SPACE_SYSTEM_MEMORY,
54 ACPI_ADR_SPACE_SYSTEM_IO,
55 ACPI_ADR_SPACE_PCI_CONFIG,
56 ACPI_ADR_SPACE_DATA_TABLE
57};
58 52
59/* Local prototypes */ 53/* Local prototypes */
60
61static acpi_status 54static acpi_status
62acpi_ev_reg_run(acpi_handle obj_handle, 55acpi_ev_reg_run(acpi_handle obj_handle,
63 u32 level, void *context, void **return_value); 56 u32 level, void *context, void **return_value);
@@ -66,6 +59,17 @@ static acpi_status
66acpi_ev_install_handler(acpi_handle obj_handle, 59acpi_ev_install_handler(acpi_handle obj_handle,
67 u32 level, void *context, void **return_value); 60 u32 level, void *context, void **return_value);
68 61
62/* These are the address spaces that will get default handlers */
63
64#define ACPI_NUM_DEFAULT_SPACES 4
65
66static u8 acpi_gbl_default_address_spaces[ACPI_NUM_DEFAULT_SPACES] = {
67 ACPI_ADR_SPACE_SYSTEM_MEMORY,
68 ACPI_ADR_SPACE_SYSTEM_IO,
69 ACPI_ADR_SPACE_PCI_CONFIG,
70 ACPI_ADR_SPACE_DATA_TABLE
71};
72
69/******************************************************************************* 73/*******************************************************************************
70 * 74 *
71 * FUNCTION: acpi_ev_install_region_handlers 75 * FUNCTION: acpi_ev_install_region_handlers
@@ -91,18 +95,19 @@ acpi_status acpi_ev_install_region_handlers(void)
91 } 95 }
92 96
93 /* 97 /*
94 * All address spaces (PCI Config, EC, SMBus) are scope dependent 98 * All address spaces (PCI Config, EC, SMBus) are scope dependent and
95 * and registration must occur for a specific device. 99 * registration must occur for a specific device.
96 * 100 *
97 * In the case of the system memory and IO address spaces there is currently 101 * In the case of the system memory and IO address spaces there is
98 * no device associated with the address space. For these we use the root. 102 * currently no device associated with the address space. For these we
103 * use the root.
99 * 104 *
100 * We install the default PCI config space handler at the root so 105 * We install the default PCI config space handler at the root so that
101 * that this space is immediately available even though the we have 106 * this space is immediately available even though the we have not
102 * not enumerated all the PCI Root Buses yet. This is to conform 107 * enumerated all the PCI Root Buses yet. This is to conform to the ACPI
103 * to the ACPI specification which states that the PCI config 108 * specification which states that the PCI config space must be always
104 * space must be always available -- even though we are nowhere 109 * available -- even though we are nowhere near ready to find the PCI root
105 * near ready to find the PCI root buses at this point. 110 * buses at this point.
106 * 111 *
107 * NOTE: We ignore AE_ALREADY_EXISTS because this means that a handler 112 * NOTE: We ignore AE_ALREADY_EXISTS because this means that a handler
108 * has already been installed (via acpi_install_address_space_handler). 113 * has already been installed (via acpi_install_address_space_handler).
@@ -160,12 +165,11 @@ acpi_status acpi_ev_initialize_op_regions(void)
160 return_ACPI_STATUS(status); 165 return_ACPI_STATUS(status);
161 } 166 }
162 167
163 /* 168 /* Run the _REG methods for op_regions in each default address space */
164 * Run the _REG methods for op_regions in each default address space
165 */
166 for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
167 169
168 /* TBD: Make sure handler is the DEFAULT handler, otherwise 170 for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
171 /*
172 * TBD: Make sure handler is the DEFAULT handler, otherwise
169 * _REG will have already been run. 173 * _REG will have already been run.
170 */ 174 */
171 status = acpi_ev_execute_reg_methods(acpi_gbl_root_node, 175 status = acpi_ev_execute_reg_methods(acpi_gbl_root_node,
@@ -318,13 +322,13 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
318 } 322 }
319 323
320 /* 324 /*
321 * It may be the case that the region has never been initialized 325 * It may be the case that the region has never been initialized.
322 * Some types of regions require special init code 326 * Some types of regions require special init code
323 */ 327 */
324 if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { 328 if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) {
325 /* 329
326 * This region has not been initialized yet, do it 330 /* This region has not been initialized yet, do it */
327 */ 331
328 region_setup = handler_desc->address_space.setup; 332 region_setup = handler_desc->address_space.setup;
329 if (!region_setup) { 333 if (!region_setup) {
330 334
@@ -339,9 +343,9 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
339 } 343 }
340 344
341 /* 345 /*
342 * We must exit the interpreter because the region 346 * We must exit the interpreter because the region setup will
343 * setup will potentially execute control methods 347 * potentially execute control methods (for example, the _REG method
344 * (e.g., _REG method for this region) 348 * for this region)
345 */ 349 */
346 acpi_ex_exit_interpreter(); 350 acpi_ex_exit_interpreter();
347 351
@@ -364,9 +368,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
364 return_ACPI_STATUS(status); 368 return_ACPI_STATUS(status);
365 } 369 }
366 370
367 /* 371 /* Region initialization may have been completed by region_setup */
368 * Region initialization may have been completed by region_setup 372
369 */
370 if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { 373 if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) {
371 region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE; 374 region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE;
372 375
@@ -521,8 +524,8 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
521 } 524 }
522 525
523 /* 526 /*
524 * If the region has been activated, call the setup handler 527 * If the region has been activated, call the setup handler with
525 * with the deactivate notification 528 * the deactivate notification
526 */ 529 */
527 if (region_obj->region.flags & AOPOBJ_SETUP_COMPLETE) { 530 if (region_obj->region.flags & AOPOBJ_SETUP_COMPLETE) {
528 region_setup = handler_obj->address_space.setup; 531 region_setup = handler_obj->address_space.setup;
@@ -668,8 +671,8 @@ acpi_ev_install_handler(acpi_handle obj_handle,
668 } 671 }
669 672
670 /* 673 /*
671 * We only care about regions.and objects 674 * We only care about regions and objects that are allowed to have
672 * that are allowed to have address space handlers 675 * address space handlers
673 */ 676 */
674 if ((node->type != ACPI_TYPE_DEVICE) && 677 if ((node->type != ACPI_TYPE_DEVICE) &&
675 (node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) { 678 (node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
@@ -710,9 +713,9 @@ acpi_ev_install_handler(acpi_handle obj_handle,
710 /* 713 /*
711 * Since the object we found it on was a device, then it 714 * Since the object we found it on was a device, then it
712 * means that someone has already installed a handler for 715 * means that someone has already installed a handler for
713 * the branch of the namespace from this device on. Just 716 * the branch of the namespace from this device on. Just
714 * bail out telling the walk routine to not traverse this 717 * bail out telling the walk routine to not traverse this
715 * branch. This preserves the scoping rule for handlers. 718 * branch. This preserves the scoping rule for handlers.
716 */ 719 */
717 return (AE_CTRL_DEPTH); 720 return (AE_CTRL_DEPTH);
718 } 721 }
@@ -723,9 +726,8 @@ acpi_ev_install_handler(acpi_handle obj_handle,
723 } 726 }
724 727
725 /* 728 /*
726 * As long as the device didn't have a handler for this 729 * As long as the device didn't have a handler for this space we
727 * space we don't care about it. We just ignore it and 730 * don't care about it. We just ignore it and proceed.
728 * proceed.
729 */ 731 */
730 return (AE_OK); 732 return (AE_OK);
731 } 733 }
@@ -733,16 +735,14 @@ acpi_ev_install_handler(acpi_handle obj_handle,
733 /* Object is a Region */ 735 /* Object is a Region */
734 736
735 if (obj_desc->region.space_id != handler_obj->address_space.space_id) { 737 if (obj_desc->region.space_id != handler_obj->address_space.space_id) {
736 /* 738
737 * This region is for a different address space 739 /* This region is for a different address space, just ignore it */
738 * -- just ignore it 740
739 */
740 return (AE_OK); 741 return (AE_OK);
741 } 742 }
742 743
743 /* 744 /*
744 * Now we have a region and it is for the handler's address 745 * Now we have a region and it is for the handler's address space type.
745 * space type.
746 * 746 *
747 * First disconnect region for any previous handler (if any) 747 * First disconnect region for any previous handler (if any)
748 */ 748 */
@@ -786,9 +786,8 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
786 ACPI_FUNCTION_TRACE(ev_install_space_handler); 786 ACPI_FUNCTION_TRACE(ev_install_space_handler);
787 787
788 /* 788 /*
789 * This registration is valid for only the types below 789 * This registration is valid for only the types below and the root. This
790 * and the root. This is where the default handlers 790 * is where the default handlers get placed.
791 * get placed.
792 */ 791 */
793 if ((node->type != ACPI_TYPE_DEVICE) && 792 if ((node->type != ACPI_TYPE_DEVICE) &&
794 (node->type != ACPI_TYPE_PROCESSOR) && 793 (node->type != ACPI_TYPE_PROCESSOR) &&
@@ -848,8 +847,8 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
848 obj_desc = acpi_ns_get_attached_object(node); 847 obj_desc = acpi_ns_get_attached_object(node);
849 if (obj_desc) { 848 if (obj_desc) {
850 /* 849 /*
851 * The attached device object already exists. 850 * The attached device object already exists. Make sure the handler
852 * Make sure the handler is not already installed. 851 * is not already installed.
853 */ 852 */
854 handler_obj = obj_desc->device.handler; 853 handler_obj = obj_desc->device.handler;
855 854
@@ -864,8 +863,8 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
864 handler) { 863 handler) {
865 /* 864 /*
866 * It is (relatively) OK to attempt to install the SAME 865 * It is (relatively) OK to attempt to install the SAME
867 * handler twice. This can easily happen 866 * handler twice. This can easily happen with the
868 * with PCI_Config space. 867 * PCI_Config space.
869 */ 868 */
870 status = AE_SAME_HANDLER; 869 status = AE_SAME_HANDLER;
871 goto unlock_and_exit; 870 goto unlock_and_exit;
@@ -925,9 +924,8 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
925 /* 924 /*
926 * Install the handler 925 * Install the handler
927 * 926 *
928 * At this point there is no existing handler. 927 * At this point there is no existing handler. Just allocate the object
929 * Just allocate the object for the handler and link it 928 * for the handler and link it into the list.
930 * into the list.
931 */ 929 */
932 handler_obj = 930 handler_obj =
933 acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_ADDRESS_HANDLER); 931 acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_ADDRESS_HANDLER);
@@ -1000,11 +998,10 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
1000 ACPI_FUNCTION_TRACE(ev_execute_reg_methods); 998 ACPI_FUNCTION_TRACE(ev_execute_reg_methods);
1001 999
1002 /* 1000 /*
1003 * Run all _REG methods for all Operation Regions for this 1001 * Run all _REG methods for all Operation Regions for this space ID. This
1004 * space ID. This is a separate walk in order to handle any 1002 * is a separate walk in order to handle any interdependencies between
1005 * interdependencies between regions and _REG methods. (i.e. handlers 1003 * regions and _REG methods. (i.e. handlers must be installed for all
1006 * must be installed for all regions of this Space ID before we 1004 * regions of this Space ID before we can run any _REG methods)
1007 * can run any _REG methods)
1008 */ 1005 */
1009 status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, 1006 status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
1010 ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, 1007 ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run,
@@ -1042,8 +1039,8 @@ acpi_ev_reg_run(acpi_handle obj_handle,
1042 } 1039 }
1043 1040
1044 /* 1041 /*
1045 * We only care about regions.and objects 1042 * We only care about regions.and objects that are allowed to have address
1046 * that are allowed to have address space handlers 1043 * space handlers
1047 */ 1044 */
1048 if ((node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) { 1045 if ((node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
1049 return (AE_OK); 1046 return (AE_OK);
@@ -1062,10 +1059,9 @@ acpi_ev_reg_run(acpi_handle obj_handle,
1062 /* Object is a Region */ 1059 /* Object is a Region */
1063 1060
1064 if (obj_desc->region.space_id != space_id) { 1061 if (obj_desc->region.space_id != space_id) {
1065 /* 1062
1066 * This region is for a different address space 1063 /* This region is for a different address space, just ignore it */
1067 * -- just ignore it 1064
1068 */
1069 return (AE_OK); 1065 return (AE_OK);
1070 } 1066 }
1071 1067
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 6b94b38df07d..f3f1fb45c3dc 100644
--- a/drivers/acpi/events/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -42,8 +42,9 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acevents.h> 45#include "accommon.h"
46#include <acpi/acnamesp.h> 46#include "acevents.h"
47#include "acnamesp.h"
47 48
48#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
49ACPI_MODULE_NAME("evrgnini") 50ACPI_MODULE_NAME("evrgnini")
@@ -233,9 +234,9 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
233 if (ACPI_FAILURE(status)) { 234 if (ACPI_FAILURE(status)) {
234 if (status == AE_SAME_HANDLER) { 235 if (status == AE_SAME_HANDLER) {
235 /* 236 /*
236 * It is OK if the handler is already installed on the root 237 * It is OK if the handler is already installed on the
237 * bridge. Still need to return a context object for the 238 * root bridge. Still need to return a context object
238 * new PCI_Config operation region, however. 239 * for the new PCI_Config operation region, however.
239 */ 240 */
240 status = AE_OK; 241 status = AE_OK;
241 } else { 242 } else {
@@ -272,8 +273,8 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
272 } 273 }
273 274
274 /* 275 /*
275 * For PCI_Config space access, we need the segment, bus, 276 * For PCI_Config space access, we need the segment, bus, device and
276 * device and function numbers. Acquire them here. 277 * function numbers. Acquire them here.
277 * 278 *
278 * Find the parent device object. (This allows the operation region to be 279 * Find the parent device object. (This allows the operation region to be
279 * within a subscope under the device, such as a control method.) 280 * within a subscope under the device, such as a control method.)
@@ -289,16 +290,16 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
289 } 290 }
290 291
291 /* 292 /*
292 * Get the PCI device and function numbers from the _ADR object 293 * Get the PCI device and function numbers from the _ADR object contained
293 * contained in the parent's scope. 294 * in the parent's scope.
294 */ 295 */
295 status = 296 status =
296 acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, pci_device_node, 297 acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, pci_device_node,
297 &pci_value); 298 &pci_value);
298 299
299 /* 300 /*
300 * The default is zero, and since the allocation above zeroed 301 * The default is zero, and since the allocation above zeroed the data,
301 * the data, just do nothing on failure. 302 * just do nothing on failure.
302 */ 303 */
303 if (ACPI_SUCCESS(status)) { 304 if (ACPI_SUCCESS(status)) {
304 pci_id->device = ACPI_HIWORD(ACPI_LODWORD(pci_value)); 305 pci_id->device = ACPI_HIWORD(ACPI_LODWORD(pci_value));
@@ -382,9 +383,8 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
382 struct acpi_compatible_id_list *cid; 383 struct acpi_compatible_id_list *cid;
383 u32 i; 384 u32 i;
384 385
385 /* 386 /* Get the _HID and check for a PCI Root Bridge */
386 * Get the _HID and check for a PCI Root Bridge 387
387 */
388 status = acpi_ut_execute_HID(node, &hid); 388 status = acpi_ut_execute_HID(node, &hid);
389 if (ACPI_FAILURE(status)) { 389 if (ACPI_FAILURE(status)) {
390 return (FALSE); 390 return (FALSE);
@@ -394,10 +394,8 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
394 return (TRUE); 394 return (TRUE);
395 } 395 }
396 396
397 /* 397 /* The _HID did not match. Get the _CID and check for a PCI Root Bridge */
398 * The _HID did not match. 398
399 * Get the _CID and check for a PCI Root Bridge
400 */
401 status = acpi_ut_execute_CID(node, &cid); 399 status = acpi_ut_execute_CID(node, &cid);
402 if (ACPI_FAILURE(status)) { 400 if (ACPI_FAILURE(status)) {
403 return (FALSE); 401 return (FALSE);
@@ -516,9 +514,9 @@ acpi_ev_default_region_setup(acpi_handle handle,
516 * Get the appropriate address space handler for a newly 514 * Get the appropriate address space handler for a newly
517 * created region. 515 * created region.
518 * 516 *
519 * This also performs address space specific initialization. For 517 * This also performs address space specific initialization. For
520 * example, PCI regions must have an _ADR object that contains 518 * example, PCI regions must have an _ADR object that contains
521 * a PCI address in the scope of the definition. This address is 519 * a PCI address in the scope of the definition. This address is
522 * required to perform an access to PCI config space. 520 * required to perform an access to PCI config space.
523 * 521 *
524 * MUTEX: Interpreter should be unlocked, because we may run the _REG 522 * MUTEX: Interpreter should be unlocked, because we may run the _REG
@@ -572,7 +570,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
572 if (ACPI_SUCCESS(status)) { 570 if (ACPI_SUCCESS(status)) {
573 /* 571 /*
574 * The _REG method is optional and there can be only one per region 572 * The _REG method is optional and there can be only one per region
575 * definition. This will be executed when the handler is attached 573 * definition. This will be executed when the handler is attached
576 * or removed 574 * or removed
577 */ 575 */
578 region_obj2->extra.method_REG = method_node; 576 region_obj2->extra.method_REG = method_node;
@@ -670,10 +668,8 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
670 } 668 }
671 } 669 }
672 670
673 /* 671 /* This node does not have the handler we need; Pop up one level */
674 * This node does not have the handler we need; 672
675 * Pop up one level
676 */
677 node = acpi_ns_get_parent_node(node); 673 node = acpi_ns_get_parent_node(node);
678 } 674 }
679 675
diff --git a/drivers/acpi/events/evsci.c b/drivers/acpi/acpica/evsci.c
index 2a8b77877610..567b356c85af 100644
--- a/drivers/acpi/events/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -43,7 +43,8 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acevents.h> 46#include "accommon.h"
47#include "acevents.h"
47 48
48#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
49ACPI_MODULE_NAME("evsci") 50ACPI_MODULE_NAME("evsci")
@@ -115,10 +116,8 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
115 * if this interrupt handler is installed, ACPI is enabled. 116 * if this interrupt handler is installed, ACPI is enabled.
116 */ 117 */
117 118
118 /* 119 /* GPEs: Check for and dispatch any GPEs that have occurred */
119 * GPEs: 120
120 * Check for and dispatch any GPEs that have occurred
121 */
122 interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list); 121 interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
123 122
124 return_UINT32(interrupt_handled); 123 return_UINT32(interrupt_handled);
@@ -158,11 +157,11 @@ u32 acpi_ev_install_sci_handler(void)
158 * RETURN: E_OK if handler uninstalled OK, E_ERROR if handler was not 157 * RETURN: E_OK if handler uninstalled OK, E_ERROR if handler was not
159 * installed to begin with 158 * installed to begin with
160 * 159 *
161 * DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be 160 * DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be
162 * taken. 161 * taken.
163 * 162 *
164 * Note: It doesn't seem important to disable all events or set the event 163 * Note: It doesn't seem important to disable all events or set the event
165 * enable registers to their original values. The OS should disable 164 * enable registers to their original values. The OS should disable
166 * the SCI interrupt level when the handler is removed, so no more 165 * the SCI interrupt level when the handler is removed, so no more
167 * events will come in. 166 * events will come in.
168 * 167 *
diff --git a/drivers/acpi/events/evxface.c b/drivers/acpi/acpica/evxface.c
index 94a6efe020be..3aca9010a11e 100644
--- a/drivers/acpi/events/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -42,9 +42,10 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include "accommon.h"
46#include <acpi/acevents.h> 46#include "acnamesp.h"
47#include <acpi/acinterp.h> 47#include "acevents.h"
48#include "acinterp.h"
48 49
49#define _COMPONENT ACPI_EVENTS 50#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evxface") 51ACPI_MODULE_NAME("evxface")
@@ -267,7 +268,7 @@ acpi_install_notify_handler(acpi_handle device,
267 /* 268 /*
268 * Root Object: 269 * Root Object:
269 * Registering a notify handler on the root object indicates that the 270 * Registering a notify handler on the root object indicates that the
270 * caller wishes to receive notifications for all objects. Note that 271 * caller wishes to receive notifications for all objects. Note that
271 * only one <external> global handler can be regsitered (per notify type). 272 * only one <external> global handler can be regsitered (per notify type).
272 */ 273 */
273 if (device == ACPI_ROOT_OBJECT) { 274 if (device == ACPI_ROOT_OBJECT) {
diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 41554f736b68..35485e4b60a6 100644
--- a/drivers/acpi/events/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -42,13 +42,19 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acevents.h> 45#include "accommon.h"
46#include <acpi/acnamesp.h> 46#include "acevents.h"
47#include <acpi/actables.h> 47#include "acnamesp.h"
48#include "actables.h"
48 49
49#define _COMPONENT ACPI_EVENTS 50#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evxfevnt") 51ACPI_MODULE_NAME("evxfevnt")
51 52
53/* Local prototypes */
54acpi_status
55acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
56 struct acpi_gpe_block_info *gpe_block, void *context);
57
52/******************************************************************************* 58/*******************************************************************************
53 * 59 *
54 * FUNCTION: acpi_enable 60 * FUNCTION: acpi_enable
@@ -60,6 +66,7 @@ ACPI_MODULE_NAME("evxfevnt")
60 * DESCRIPTION: Transfers the system into ACPI mode. 66 * DESCRIPTION: Transfers the system into ACPI mode.
61 * 67 *
62 ******************************************************************************/ 68 ******************************************************************************/
69
63acpi_status acpi_enable(void) 70acpi_status acpi_enable(void)
64{ 71{
65 acpi_status status = AE_OK; 72 acpi_status status = AE_OK;
@@ -161,8 +168,8 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
161 } 168 }
162 169
163 /* 170 /*
164 * Enable the requested fixed event (by writing a one to the 171 * Enable the requested fixed event (by writing a one to the enable
165 * enable register bit) 172 * register bit)
166 */ 173 */
167 status = 174 status =
168 acpi_set_register(acpi_gbl_fixed_event_info[event]. 175 acpi_set_register(acpi_gbl_fixed_event_info[event].
@@ -343,8 +350,8 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
343 } 350 }
344 351
345 /* 352 /*
346 * Disable the requested fixed event (by writing a zero to the 353 * Disable the requested fixed event (by writing a zero to the enable
347 * enable register bit) 354 * register bit)
348 */ 355 */
349 status = 356 status =
350 acpi_set_register(acpi_gbl_fixed_event_info[event]. 357 acpi_set_register(acpi_gbl_fixed_event_info[event].
@@ -396,8 +403,8 @@ acpi_status acpi_clear_event(u32 event)
396 } 403 }
397 404
398 /* 405 /*
399 * Clear the requested fixed event (By writing a one to the 406 * Clear the requested fixed event (By writing a one to the status
400 * status register bit) 407 * register bit)
401 */ 408 */
402 status = 409 status =
403 acpi_set_register(acpi_gbl_fixed_event_info[event]. 410 acpi_set_register(acpi_gbl_fixed_event_info[event].
@@ -717,3 +724,148 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
717} 724}
718 725
719ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block) 726ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
727
728/*******************************************************************************
729 *
730 * FUNCTION: acpi_get_gpe_device
731 *
732 * PARAMETERS: Index - System GPE index (0-current_gpe_count)
733 * gpe_device - Where the parent GPE Device is returned
734 *
735 * RETURN: Status
736 *
737 * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL
738 * gpe device indicates that the gpe number is contained in one of
739 * the FADT-defined gpe blocks. Otherwise, the GPE block device.
740 *
741 ******************************************************************************/
742acpi_status
743acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
744{
745 struct acpi_gpe_device_info info;
746 acpi_status status;
747
748 ACPI_FUNCTION_TRACE(acpi_get_gpe_device);
749
750 if (!gpe_device) {
751 return_ACPI_STATUS(AE_BAD_PARAMETER);
752 }
753
754 if (index >= acpi_current_gpe_count) {
755 return_ACPI_STATUS(AE_NOT_EXIST);
756 }
757
758 /* Setup and walk the GPE list */
759
760 info.index = index;
761 info.status = AE_NOT_EXIST;
762 info.gpe_device = NULL;
763 info.next_block_base_index = 0;
764
765 status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info);
766 if (ACPI_FAILURE(status)) {
767 return_ACPI_STATUS(status);
768 }
769
770 *gpe_device = info.gpe_device;
771 return_ACPI_STATUS(info.status);
772}
773
774ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
775
776/*******************************************************************************
777 *
778 * FUNCTION: acpi_ev_get_gpe_device
779 *
780 * PARAMETERS: GPE_WALK_CALLBACK
781 *
782 * RETURN: Status
783 *
784 * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
785 * block device. NULL if the GPE is one of the FADT-defined GPEs.
786 *
787 ******************************************************************************/
788acpi_status
789acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
790 struct acpi_gpe_block_info *gpe_block, void *context)
791{
792 struct acpi_gpe_device_info *info = context;
793
794 /* Increment Index by the number of GPEs in this block */
795
796 info->next_block_base_index +=
797 (gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH);
798
799 if (info->index < info->next_block_base_index) {
800 /*
801 * The GPE index is within this block, get the node. Leave the node
802 * NULL for the FADT-defined GPEs
803 */
804 if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
805 info->gpe_device = gpe_block->node;
806 }
807
808 info->status = AE_OK;
809 return (AE_CTRL_END);
810 }
811
812 return (AE_OK);
813}
814
815/******************************************************************************
816 *
817 * FUNCTION: acpi_disable_all_gpes
818 *
819 * PARAMETERS: None
820 *
821 * RETURN: Status
822 *
823 * DESCRIPTION: Disable and clear all GPEs in all GPE blocks
824 *
825 ******************************************************************************/
826
827acpi_status acpi_disable_all_gpes(void)
828{
829 acpi_status status;
830
831 ACPI_FUNCTION_TRACE(acpi_disable_all_gpes);
832
833 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
834 if (ACPI_FAILURE(status)) {
835 return_ACPI_STATUS(status);
836 }
837
838 status = acpi_hw_disable_all_gpes();
839 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
840
841 return_ACPI_STATUS(status);
842}
843
844/******************************************************************************
845 *
846 * FUNCTION: acpi_enable_all_runtime_gpes
847 *
848 * PARAMETERS: None
849 *
850 * RETURN: Status
851 *
852 * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks
853 *
854 ******************************************************************************/
855
856acpi_status acpi_enable_all_runtime_gpes(void)
857{
858 acpi_status status;
859
860 ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes);
861
862 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
863 if (ACPI_FAILURE(status)) {
864 return_ACPI_STATUS(status);
865 }
866
867 status = acpi_hw_enable_all_runtime_gpes();
868 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
869
870 return_ACPI_STATUS(status);
871}
diff --git a/drivers/acpi/events/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index e8750807e57d..479e7a3721be 100644
--- a/drivers/acpi/events/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -43,8 +43,9 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acnamesp.h> 46#include "accommon.h"
47#include <acpi/acevents.h> 47#include "acnamesp.h"
48#include "acevents.h"
48 49
49#define _COMPONENT ACPI_EVENTS 50#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evxfregn") 51ACPI_MODULE_NAME("evxfregn")
diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 74da6fa52ef1..932bbc26aa04 100644
--- a/drivers/acpi/executer/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -42,10 +42,11 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acinterp.h> 45#include "accommon.h"
46#include <acpi/acnamesp.h> 46#include "acinterp.h"
47#include <acpi/actables.h> 47#include "acnamesp.h"
48#include <acpi/acdispat.h> 48#include "actables.h"
49#include "acdispat.h"
49 50
50#define _COMPONENT ACPI_EXECUTER 51#define _COMPONENT ACPI_EXECUTER
51ACPI_MODULE_NAME("exconfig") 52ACPI_MODULE_NAME("exconfig")
diff --git a/drivers/acpi/executer/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 1d1f35adddde..0be10188316e 100644
--- a/drivers/acpi/executer/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -42,8 +42,9 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acinterp.h> 45#include "accommon.h"
46#include <acpi/amlcode.h> 46#include "acinterp.h"
47#include "amlcode.h"
47 48
48#define _COMPONENT ACPI_EXECUTER 49#define _COMPONENT ACPI_EXECUTER
49ACPI_MODULE_NAME("exconvrt") 50ACPI_MODULE_NAME("exconvrt")
diff --git a/drivers/acpi/executer/excreate.c b/drivers/acpi/acpica/excreate.c
index ad09696d5069..a57ad2564ab0 100644
--- a/drivers/acpi/executer/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -42,9 +42,10 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acinterp.h> 45#include "accommon.h"
46#include <acpi/amlcode.h> 46#include "acinterp.h"
47#include <acpi/acnamesp.h> 47#include "amlcode.h"
48#include "acnamesp.h"
48 49
49#define _COMPONENT ACPI_EXECUTER 50#define _COMPONENT ACPI_EXECUTER
50ACPI_MODULE_NAME("excreate") 51ACPI_MODULE_NAME("excreate")
diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/acpica/exdump.c
index d087a7d28aa5..aa313574b0df 100644
--- a/drivers/acpi/executer/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -42,9 +42,10 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acinterp.h> 45#include "accommon.h"
46#include <acpi/amlcode.h> 46#include "acinterp.h"
47#include <acpi/acnamesp.h> 47#include "amlcode.h"
48#include "acnamesp.h"
48 49
49#define _COMPONENT ACPI_EXECUTER 50#define _COMPONENT ACPI_EXECUTER
50ACPI_MODULE_NAME("exdump") 51ACPI_MODULE_NAME("exdump")
diff --git a/drivers/acpi/executer/exfield.c b/drivers/acpi/acpica/exfield.c
index 3e440d84226a..a352d0233857 100644
--- a/drivers/acpi/executer/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -42,8 +42,9 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acdispat.h> 45#include "accommon.h"
46#include <acpi/acinterp.h> 46#include "acdispat.h"
47#include "acinterp.h"
47 48
48#define _COMPONENT ACPI_EXECUTER 49#define _COMPONENT ACPI_EXECUTER
49ACPI_MODULE_NAME("exfield") 50ACPI_MODULE_NAME("exfield")
diff --git a/drivers/acpi/executer/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 9ff9d1f4615d..ef58ac4e687b 100644
--- a/drivers/acpi/executer/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -42,10 +42,11 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acinterp.h> 45#include "accommon.h"
46#include <acpi/amlcode.h> 46#include "acinterp.h"
47#include <acpi/acevents.h> 47#include "amlcode.h"
48#include <acpi/acdispat.h> 48#include "acevents.h"
49#include "acdispat.h"
49 50
50#define _COMPONENT ACPI_EXECUTER 51#define _COMPONENT ACPI_EXECUTER
51ACPI_MODULE_NAME("exfldio") 52ACPI_MODULE_NAME("exfldio")
@@ -498,14 +499,13 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
498 return_ACPI_STATUS(status); 499 return_ACPI_STATUS(status);
499 } 500 }
500 501
501 ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
502 "I/O to Data Register: ValuePtr %p\n",
503 value));
504
505 if (read_write == ACPI_READ) { 502 if (read_write == ACPI_READ) {
506 503
507 /* Read the datum from the data_register */ 504 /* Read the datum from the data_register */
508 505
506 ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
507 "Read from Data Register\n"));
508
509 status = 509 status =
510 acpi_ex_extract_from_field(obj_desc->index_field. 510 acpi_ex_extract_from_field(obj_desc->index_field.
511 data_obj, value, 511 data_obj, value,
@@ -513,6 +513,10 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
513 } else { 513 } else {
514 /* Write the datum to the data_register */ 514 /* Write the datum to the data_register */
515 515
516 ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
517 "Write to Data Register: Value %8.8X%8.8X\n",
518 ACPI_FORMAT_UINT64(*value)));
519
516 status = 520 status =
517 acpi_ex_insert_into_field(obj_desc->index_field. 521 acpi_ex_insert_into_field(obj_desc->index_field.
518 data_obj, value, 522 data_obj, value,
diff --git a/drivers/acpi/executer/exmisc.c b/drivers/acpi/acpica/exmisc.c
index efb191340059..6b0747ac683b 100644
--- a/drivers/acpi/executer/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -43,9 +43,10 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acinterp.h> 46#include "accommon.h"
47#include <acpi/amlcode.h> 47#include "acinterp.h"
48#include <acpi/amlresrc.h> 48#include "amlcode.h"
49#include "amlresrc.h"
49 50
50#define _COMPONENT ACPI_EXECUTER 51#define _COMPONENT ACPI_EXECUTER
51ACPI_MODULE_NAME("exmisc") 52ACPI_MODULE_NAME("exmisc")
diff --git a/drivers/acpi/executer/exmutex.c b/drivers/acpi/acpica/exmutex.c
index a8bf3d713e28..d301c1f363ef 100644
--- a/drivers/acpi/executer/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -43,8 +43,9 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acinterp.h> 46#include "accommon.h"
47#include <acpi/acevents.h> 47#include "acinterp.h"
48#include "acevents.h"
48 49
49#define _COMPONENT ACPI_EXECUTER 50#define _COMPONENT ACPI_EXECUTER
50ACPI_MODULE_NAME("exmutex") 51ACPI_MODULE_NAME("exmutex")
diff --git a/drivers/acpi/executer/exnames.c b/drivers/acpi/acpica/exnames.c
index 817e67be3697..ffdae122d94a 100644
--- a/drivers/acpi/executer/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -43,8 +43,9 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acinterp.h> 46#include "accommon.h"
47#include <acpi/amlcode.h> 47#include "acinterp.h"
48#include "amlcode.h"
48 49
49#define _COMPONENT ACPI_EXECUTER 50#define _COMPONENT ACPI_EXECUTER
50ACPI_MODULE_NAME("exnames") 51ACPI_MODULE_NAME("exnames")
diff --git a/drivers/acpi/executer/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index f622f9eac8a1..b530480cc7d5 100644
--- a/drivers/acpi/executer/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -43,11 +43,12 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acparser.h> 46#include "accommon.h"
47#include <acpi/acdispat.h> 47#include "acparser.h"
48#include <acpi/acinterp.h> 48#include "acdispat.h"
49#include <acpi/amlcode.h> 49#include "acinterp.h"
50#include <acpi/acnamesp.h> 50#include "amlcode.h"
51#include "acnamesp.h"
51 52
52#define _COMPONENT ACPI_EXECUTER 53#define _COMPONENT ACPI_EXECUTER
53ACPI_MODULE_NAME("exoparg1") 54ACPI_MODULE_NAME("exoparg1")
diff --git a/drivers/acpi/executer/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 368def5dffce..0b4f513ca885 100644
--- a/drivers/acpi/executer/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -42,10 +42,11 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acparser.h> 45#include "accommon.h"
46#include <acpi/acinterp.h> 46#include "acparser.h"
47#include <acpi/acevents.h> 47#include "acinterp.h"
48#include <acpi/amlcode.h> 48#include "acevents.h"
49#include "amlcode.h"
49 50
50#define _COMPONENT ACPI_EXECUTER 51#define _COMPONENT ACPI_EXECUTER
51ACPI_MODULE_NAME("exoparg2") 52ACPI_MODULE_NAME("exoparg2")
diff --git a/drivers/acpi/executer/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 9cb4197681af..c6520bbf882b 100644
--- a/drivers/acpi/executer/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -43,9 +43,10 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acinterp.h> 46#include "accommon.h"
47#include <acpi/acparser.h> 47#include "acinterp.h"
48#include <acpi/amlcode.h> 48#include "acparser.h"
49#include "amlcode.h"
49 50
50#define _COMPONENT ACPI_EXECUTER 51#define _COMPONENT ACPI_EXECUTER
51ACPI_MODULE_NAME("exoparg3") 52ACPI_MODULE_NAME("exoparg3")
diff --git a/drivers/acpi/executer/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 67d48737af53..ae43f7670a6c 100644
--- a/drivers/acpi/executer/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -43,9 +43,10 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acinterp.h> 46#include "accommon.h"
47#include <acpi/acparser.h> 47#include "acinterp.h"
48#include <acpi/amlcode.h> 48#include "acparser.h"
49#include "amlcode.h"
49 50
50#define _COMPONENT ACPI_EXECUTER 51#define _COMPONENT ACPI_EXECUTER
51ACPI_MODULE_NAME("exoparg6") 52ACPI_MODULE_NAME("exoparg6")
diff --git a/drivers/acpi/executer/exprep.c b/drivers/acpi/acpica/exprep.c
index a7dc87ecee37..a226f74d4a5c 100644
--- a/drivers/acpi/executer/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -43,9 +43,10 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acinterp.h> 46#include "accommon.h"
47#include <acpi/amlcode.h> 47#include "acinterp.h"
48#include <acpi/acnamesp.h> 48#include "amlcode.h"
49#include "acnamesp.h"
49 50
50#define _COMPONENT ACPI_EXECUTER 51#define _COMPONENT ACPI_EXECUTER
51ACPI_MODULE_NAME("exprep") 52ACPI_MODULE_NAME("exprep")
diff --git a/drivers/acpi/executer/exregion.c b/drivers/acpi/acpica/exregion.c
index 7a41c409ae4d..76ec8ff903b8 100644
--- a/drivers/acpi/executer/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -43,7 +43,8 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acinterp.h> 46#include "accommon.h"
47#include "acinterp.h"
47 48
48#define _COMPONENT ACPI_EXECUTER 49#define _COMPONENT ACPI_EXECUTER
49ACPI_MODULE_NAME("exregion") 50ACPI_MODULE_NAME("exregion")
diff --git a/drivers/acpi/executer/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 423ad3635f3d..a063a74006f6 100644
--- a/drivers/acpi/executer/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -43,9 +43,10 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acdispat.h> 46#include "accommon.h"
47#include <acpi/acinterp.h> 47#include "acdispat.h"
48#include <acpi/acnamesp.h> 48#include "acinterp.h"
49#include "acnamesp.h"
49 50
50#define _COMPONENT ACPI_EXECUTER 51#define _COMPONENT ACPI_EXECUTER
51ACPI_MODULE_NAME("exresnte") 52ACPI_MODULE_NAME("exresnte")
diff --git a/drivers/acpi/executer/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 60e8c47128e9..f6105a6d6126 100644
--- a/drivers/acpi/executer/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -43,10 +43,11 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/amlcode.h> 46#include "accommon.h"
47#include <acpi/acdispat.h> 47#include "amlcode.h"
48#include <acpi/acinterp.h> 48#include "acdispat.h"
49#include <acpi/acnamesp.h> 49#include "acinterp.h"
50#include "acnamesp.h"
50 51
51#define _COMPONENT ACPI_EXECUTER 52#define _COMPONENT ACPI_EXECUTER
52ACPI_MODULE_NAME("exresolv") 53ACPI_MODULE_NAME("exresolv")
diff --git a/drivers/acpi/executer/exresop.c b/drivers/acpi/acpica/exresop.c
index 0bb82593da72..3c3802764bfb 100644
--- a/drivers/acpi/executer/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -43,10 +43,11 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/amlcode.h> 46#include "accommon.h"
47#include <acpi/acparser.h> 47#include "amlcode.h"
48#include <acpi/acinterp.h> 48#include "acparser.h"
49#include <acpi/acnamesp.h> 49#include "acinterp.h"
50#include "acnamesp.h"
50 51
51#define _COMPONENT ACPI_EXECUTER 52#define _COMPONENT ACPI_EXECUTER
52ACPI_MODULE_NAME("exresop") 53ACPI_MODULE_NAME("exresop")
diff --git a/drivers/acpi/executer/exstore.c b/drivers/acpi/acpica/exstore.c
index 1c118ba78adb..e35e9b4f6a4e 100644
--- a/drivers/acpi/executer/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -43,10 +43,11 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acdispat.h> 46#include "accommon.h"
47#include <acpi/acinterp.h> 47#include "acdispat.h"
48#include <acpi/amlcode.h> 48#include "acinterp.h"
49#include <acpi/acnamesp.h> 49#include "amlcode.h"
50#include "acnamesp.h"
50 51
51#define _COMPONENT ACPI_EXECUTER 52#define _COMPONENT ACPI_EXECUTER
52ACPI_MODULE_NAME("exstore") 53ACPI_MODULE_NAME("exstore")
diff --git a/drivers/acpi/executer/exstoren.c b/drivers/acpi/acpica/exstoren.c
index eef61a00803e..145d15305f70 100644
--- a/drivers/acpi/executer/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -44,8 +44,9 @@
44 */ 44 */
45 45
46#include <acpi/acpi.h> 46#include <acpi/acpi.h>
47#include <acpi/acinterp.h> 47#include "accommon.h"
48#include <acpi/amlcode.h> 48#include "acinterp.h"
49#include "amlcode.h"
49 50
50#define _COMPONENT ACPI_EXECUTER 51#define _COMPONENT ACPI_EXECUTER
51ACPI_MODULE_NAME("exstoren") 52ACPI_MODULE_NAME("exstoren")
diff --git a/drivers/acpi/executer/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 9a75ff09fb0c..67340cc70142 100644
--- a/drivers/acpi/executer/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -43,7 +43,8 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acinterp.h> 46#include "accommon.h"
47#include "acinterp.h"
47 48
48#define _COMPONENT ACPI_EXECUTER 49#define _COMPONENT ACPI_EXECUTER
49ACPI_MODULE_NAME("exstorob") 50ACPI_MODULE_NAME("exstorob")
diff --git a/drivers/acpi/executer/exsystem.c b/drivers/acpi/acpica/exsystem.c
index 68990f1df371..3d00b9357233 100644
--- a/drivers/acpi/executer/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -43,7 +43,8 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acinterp.h> 46#include "accommon.h"
47#include "acinterp.h"
47 48
48#define _COMPONENT ACPI_EXECUTER 49#define _COMPONENT ACPI_EXECUTER
49ACPI_MODULE_NAME("exsystem") 50ACPI_MODULE_NAME("exsystem")
diff --git a/drivers/acpi/executer/exutils.c b/drivers/acpi/acpica/exutils.c
index 86c03880b523..32b85d68e756 100644
--- a/drivers/acpi/executer/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -59,8 +59,9 @@
59#define DEFINE_AML_GLOBALS 59#define DEFINE_AML_GLOBALS
60 60
61#include <acpi/acpi.h> 61#include <acpi/acpi.h>
62#include <acpi/acinterp.h> 62#include "accommon.h"
63#include <acpi/amlcode.h> 63#include "acinterp.h"
64#include "amlcode.h"
64 65
65#define _COMPONENT ACPI_EXECUTER 66#define _COMPONENT ACPI_EXECUTER
66ACPI_MODULE_NAME("exutils") 67ACPI_MODULE_NAME("exutils")
diff --git a/drivers/acpi/hardware/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 816894ea839e..a9d4fea4167f 100644
--- a/drivers/acpi/hardware/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -43,6 +43,7 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include "accommon.h"
46 47
47#define _COMPONENT ACPI_HARDWARE 48#define _COMPONENT ACPI_HARDWARE
48ACPI_MODULE_NAME("hwacpi") 49ACPI_MODULE_NAME("hwacpi")
diff --git a/drivers/acpi/hardware/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 0b80db9d9197..2013b66745d2 100644
--- a/drivers/acpi/hardware/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -43,7 +43,8 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acevents.h> 46#include "accommon.h"
47#include "acevents.h"
47 48
48#define _COMPONENT ACPI_HARDWARE 49#define _COMPONENT ACPI_HARDWARE
49ACPI_MODULE_NAME("hwgpe") 50ACPI_MODULE_NAME("hwgpe")
@@ -51,7 +52,8 @@ ACPI_MODULE_NAME("hwgpe")
51/* Local prototypes */ 52/* Local prototypes */
52static acpi_status 53static acpi_status
53acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 54acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
54 struct acpi_gpe_block_info *gpe_block); 55 struct acpi_gpe_block_info *gpe_block,
56 void *context);
55 57
56/****************************************************************************** 58/******************************************************************************
57 * 59 *
@@ -80,8 +82,7 @@ acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
80 82
81 /* Get current value of the enable register that contains this GPE */ 83 /* Get current value of the enable register that contains this GPE */
82 84
83 status = acpi_hw_low_level_read(ACPI_GPE_REGISTER_WIDTH, &enable_mask, 85 status = acpi_read(&enable_mask, &gpe_register_info->enable_address);
84 &gpe_register_info->enable_address);
85 if (ACPI_FAILURE(status)) { 86 if (ACPI_FAILURE(status)) {
86 return (status); 87 return (status);
87 } 88 }
@@ -95,9 +96,7 @@ acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
95 96
96 /* Write the updated enable mask */ 97 /* Write the updated enable mask */
97 98
98 status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, enable_mask, 99 status = acpi_write(enable_mask, &gpe_register_info->enable_address);
99 &gpe_register_info->enable_address);
100
101 return (status); 100 return (status);
102} 101}
103 102
@@ -132,8 +131,8 @@ acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info)
132 131
133 /* Write the entire GPE (runtime) enable register */ 132 /* Write the entire GPE (runtime) enable register */
134 133
135 status = acpi_hw_low_level_write(8, gpe_register_info->enable_for_run, 134 status = acpi_write(gpe_register_info->enable_for_run,
136 &gpe_register_info->enable_address); 135 &gpe_register_info->enable_address);
137 136
138 return (status); 137 return (status);
139} 138}
@@ -166,9 +165,8 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
166 * Write a one to the appropriate bit in the status register to 165 * Write a one to the appropriate bit in the status register to
167 * clear this GPE. 166 * clear this GPE.
168 */ 167 */
169 status = acpi_hw_low_level_write(8, register_bit, 168 status = acpi_write(register_bit,
170 &gpe_event_info->register_info-> 169 &gpe_event_info->register_info->status_address);
171 status_address);
172 170
173 return (status); 171 return (status);
174} 172}
@@ -227,9 +225,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
227 225
228 /* GPE currently active (status bit == 1)? */ 226 /* GPE currently active (status bit == 1)? */
229 227
230 status = 228 status = acpi_read(&in_byte, &gpe_register_info->status_address);
231 acpi_hw_low_level_read(8, &in_byte,
232 &gpe_register_info->status_address);
233 if (ACPI_FAILURE(status)) { 229 if (ACPI_FAILURE(status)) {
234 goto unlock_and_exit; 230 goto unlock_and_exit;
235 } 231 }
@@ -260,8 +256,8 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
260 ******************************************************************************/ 256 ******************************************************************************/
261 257
262acpi_status 258acpi_status
263acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info, 259acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
264 struct acpi_gpe_block_info * gpe_block) 260 struct acpi_gpe_block_info *gpe_block, void *context)
265{ 261{
266 u32 i; 262 u32 i;
267 acpi_status status; 263 acpi_status status;
@@ -272,9 +268,9 @@ acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info,
272 268
273 /* Disable all GPEs in this register */ 269 /* Disable all GPEs in this register */
274 270
275 status = acpi_hw_low_level_write(8, 0x00, 271 status =
276 &gpe_block->register_info[i]. 272 acpi_write(0x00,
277 enable_address); 273 &gpe_block->register_info[i].enable_address);
278 if (ACPI_FAILURE(status)) { 274 if (ACPI_FAILURE(status)) {
279 return (status); 275 return (status);
280 } 276 }
@@ -297,8 +293,8 @@ acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info,
297 ******************************************************************************/ 293 ******************************************************************************/
298 294
299acpi_status 295acpi_status
300acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info, 296acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
301 struct acpi_gpe_block_info * gpe_block) 297 struct acpi_gpe_block_info *gpe_block, void *context)
302{ 298{
303 u32 i; 299 u32 i;
304 acpi_status status; 300 acpi_status status;
@@ -309,9 +305,9 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info,
309 305
310 /* Clear status on all GPEs in this register */ 306 /* Clear status on all GPEs in this register */
311 307
312 status = acpi_hw_low_level_write(8, 0xFF, 308 status =
313 &gpe_block->register_info[i]. 309 acpi_write(0xFF,
314 status_address); 310 &gpe_block->register_info[i].status_address);
315 if (ACPI_FAILURE(status)) { 311 if (ACPI_FAILURE(status)) {
316 return (status); 312 return (status);
317 } 313 }
@@ -335,8 +331,8 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info,
335 ******************************************************************************/ 331 ******************************************************************************/
336 332
337acpi_status 333acpi_status
338acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info, 334acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
339 struct acpi_gpe_block_info * gpe_block) 335 struct acpi_gpe_block_info *gpe_block, void *context)
340{ 336{
341 u32 i; 337 u32 i;
342 acpi_status status; 338 acpi_status status;
@@ -352,12 +348,9 @@ acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info,
352 348
353 /* Enable all "runtime" GPEs in this register */ 349 /* Enable all "runtime" GPEs in this register */
354 350
355 status = 351 status = acpi_write(gpe_block->register_info[i].enable_for_run,
356 acpi_hw_low_level_write(8, 352 &gpe_block->register_info[i].
357 gpe_block->register_info[i]. 353 enable_address);
358 enable_for_run,
359 &gpe_block->register_info[i].
360 enable_address);
361 if (ACPI_FAILURE(status)) { 354 if (ACPI_FAILURE(status)) {
362 return (status); 355 return (status);
363 } 356 }
@@ -382,7 +375,8 @@ acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info,
382 375
383static acpi_status 376static acpi_status
384acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 377acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
385 struct acpi_gpe_block_info *gpe_block) 378 struct acpi_gpe_block_info *gpe_block,
379 void *context)
386{ 380{
387 u32 i; 381 u32 i;
388 acpi_status status; 382 acpi_status status;
@@ -396,11 +390,9 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
396 390
397 /* Enable all "wake" GPEs in this register */ 391 /* Enable all "wake" GPEs in this register */
398 392
399 status = acpi_hw_low_level_write(8, 393 status = acpi_write(gpe_block->register_info[i].enable_for_wake,
400 gpe_block->register_info[i]. 394 &gpe_block->register_info[i].
401 enable_for_wake, 395 enable_address);
402 &gpe_block->register_info[i].
403 enable_address);
404 if (ACPI_FAILURE(status)) { 396 if (ACPI_FAILURE(status)) {
405 return (status); 397 return (status);
406 } 398 }
@@ -427,8 +419,8 @@ acpi_status acpi_hw_disable_all_gpes(void)
427 419
428 ACPI_FUNCTION_TRACE(hw_disable_all_gpes); 420 ACPI_FUNCTION_TRACE(hw_disable_all_gpes);
429 421
430 status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block); 422 status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL);
431 status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block); 423 status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL);
432 return_ACPI_STATUS(status); 424 return_ACPI_STATUS(status);
433} 425}
434 426
@@ -450,7 +442,7 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void)
450 442
451 ACPI_FUNCTION_TRACE(hw_enable_all_runtime_gpes); 443 ACPI_FUNCTION_TRACE(hw_enable_all_runtime_gpes);
452 444
453 status = acpi_ev_walk_gpe_list(acpi_hw_enable_runtime_gpe_block); 445 status = acpi_ev_walk_gpe_list(acpi_hw_enable_runtime_gpe_block, NULL);
454 return_ACPI_STATUS(status); 446 return_ACPI_STATUS(status);
455} 447}
456 448
@@ -472,6 +464,6 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
472 464
473 ACPI_FUNCTION_TRACE(hw_enable_all_wakeup_gpes); 465 ACPI_FUNCTION_TRACE(hw_enable_all_wakeup_gpes);
474 466
475 status = acpi_ev_walk_gpe_list(acpi_hw_enable_wakeup_gpe_block); 467 status = acpi_ev_walk_gpe_list(acpi_hw_enable_wakeup_gpe_block, NULL);
476 return_ACPI_STATUS(status); 468 return_ACPI_STATUS(status);
477} 469}
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
new file mode 100644
index 000000000000..4dc43b018517
--- /dev/null
+++ b/drivers/acpi/acpica/hwregs.c
@@ -0,0 +1,353 @@
1
2/*******************************************************************************
3 *
4 * Module Name: hwregs - Read/write access functions for the various ACPI
5 * control and status registers.
6 *
7 ******************************************************************************/
8
9/*
10 * Copyright (C) 2000 - 2008, Intel Corp.
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions, and the following disclaimer,
18 * without modification.
19 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
20 * substantially similar to the "NO WARRANTY" disclaimer below
21 * ("Disclaimer") and any redistribution must be conditioned upon
22 * including a substantially similar Disclaimer requirement for further
23 * binary redistribution.
24 * 3. Neither the names of the above-listed copyright holders nor the names
25 * of any contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * Alternatively, this software may be distributed under the terms of the
29 * GNU General Public License ("GPL") version 2 as published by the Free
30 * Software Foundation.
31 *
32 * NO WARRANTY
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
42 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGES.
44 */
45
46#include <acpi/acpi.h>
47#include "accommon.h"
48#include "acnamesp.h"
49#include "acevents.h"
50
51#define _COMPONENT ACPI_HARDWARE
52ACPI_MODULE_NAME("hwregs")
53
54/*******************************************************************************
55 *
56 * FUNCTION: acpi_hw_clear_acpi_status
57 *
58 * PARAMETERS: None
59 *
60 * RETURN: Status
61 *
62 * DESCRIPTION: Clears all fixed and general purpose status bits
63 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
64 *
65 ******************************************************************************/
66acpi_status acpi_hw_clear_acpi_status(void)
67{
68 acpi_status status;
69 acpi_cpu_flags lock_flags = 0;
70
71 ACPI_FUNCTION_TRACE(hw_clear_acpi_status);
72
73 ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %04X\n",
74 ACPI_BITMASK_ALL_FIXED_STATUS,
75 (u16) acpi_gbl_FADT.xpm1a_event_block.address));
76
77 lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
78
79 status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
80 ACPI_BITMASK_ALL_FIXED_STATUS);
81 if (ACPI_FAILURE(status)) {
82 goto unlock_and_exit;
83 }
84
85 /* Clear the fixed events */
86
87 if (acpi_gbl_FADT.xpm1b_event_block.address) {
88 status = acpi_write(ACPI_BITMASK_ALL_FIXED_STATUS,
89 &acpi_gbl_FADT.xpm1b_event_block);
90 if (ACPI_FAILURE(status)) {
91 goto unlock_and_exit;
92 }
93 }
94
95 /* Clear the GPE Bits in all GPE registers in all GPE blocks */
96
97 status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL);
98
99 unlock_and_exit:
100 acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
101 return_ACPI_STATUS(status);
102}
103
104/*******************************************************************************
105 *
106 * FUNCTION: acpi_hw_get_register_bit_mask
107 *
108 * PARAMETERS: register_id - Index of ACPI Register to access
109 *
110 * RETURN: The bitmask to be used when accessing the register
111 *
112 * DESCRIPTION: Map register_id into a register bitmask.
113 *
114 ******************************************************************************/
115
116struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
117{
118 ACPI_FUNCTION_ENTRY();
119
120 if (register_id > ACPI_BITREG_MAX) {
121 ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: %X",
122 register_id));
123 return (NULL);
124 }
125
126 return (&acpi_gbl_bit_register_info[register_id]);
127}
128
129/******************************************************************************
130 *
131 * FUNCTION: acpi_hw_register_read
132 *
133 * PARAMETERS: register_id - ACPI Register ID
134 * return_value - Where the register value is returned
135 *
136 * RETURN: Status and the value read.
137 *
138 * DESCRIPTION: Read from the specified ACPI register
139 *
140 ******************************************************************************/
141acpi_status
142acpi_hw_register_read(u32 register_id, u32 * return_value)
143{
144 u32 value1 = 0;
145 u32 value2 = 0;
146 acpi_status status;
147
148 ACPI_FUNCTION_TRACE(hw_register_read);
149
150 switch (register_id) {
151 case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */
152
153 status = acpi_read(&value1, &acpi_gbl_FADT.xpm1a_event_block);
154 if (ACPI_FAILURE(status)) {
155 goto exit;
156 }
157
158 /* PM1B is optional */
159
160 status = acpi_read(&value2, &acpi_gbl_FADT.xpm1b_event_block);
161 value1 |= value2;
162 break;
163
164 case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */
165
166 status = acpi_read(&value1, &acpi_gbl_xpm1a_enable);
167 if (ACPI_FAILURE(status)) {
168 goto exit;
169 }
170
171 /* PM1B is optional */
172
173 status = acpi_read(&value2, &acpi_gbl_xpm1b_enable);
174 value1 |= value2;
175 break;
176
177 case ACPI_REGISTER_PM1_CONTROL: /* 16-bit access */
178
179 status = acpi_read(&value1, &acpi_gbl_FADT.xpm1a_control_block);
180 if (ACPI_FAILURE(status)) {
181 goto exit;
182 }
183
184 status = acpi_read(&value2, &acpi_gbl_FADT.xpm1b_control_block);
185 value1 |= value2;
186 break;
187
188 case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */
189
190 status = acpi_read(&value1, &acpi_gbl_FADT.xpm2_control_block);
191 break;
192
193 case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
194
195 status = acpi_read(&value1, &acpi_gbl_FADT.xpm_timer_block);
196 break;
197
198 case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
199
200 status =
201 acpi_os_read_port(acpi_gbl_FADT.smi_command, &value1, 8);
202 break;
203
204 default:
205 ACPI_ERROR((AE_INFO, "Unknown Register ID: %X", register_id));
206 status = AE_BAD_PARAMETER;
207 break;
208 }
209
210 exit:
211
212 if (ACPI_SUCCESS(status)) {
213 *return_value = value1;
214 }
215
216 return_ACPI_STATUS(status);
217}
218
219/******************************************************************************
220 *
221 * FUNCTION: acpi_hw_register_write
222 *
223 * PARAMETERS: register_id - ACPI Register ID
224 * Value - The value to write
225 *
226 * RETURN: Status
227 *
228 * DESCRIPTION: Write to the specified ACPI register
229 *
230 * NOTE: In accordance with the ACPI specification, this function automatically
231 * preserves the value of the following bits, meaning that these bits cannot be
232 * changed via this interface:
233 *
234 * PM1_CONTROL[0] = SCI_EN
235 * PM1_CONTROL[9]
236 * PM1_STATUS[11]
237 *
238 * ACPI References:
239 * 1) Hardware Ignored Bits: When software writes to a register with ignored
240 * bit fields, it preserves the ignored bit fields
241 * 2) SCI_EN: OSPM always preserves this bit position
242 *
243 ******************************************************************************/
244
245acpi_status acpi_hw_register_write(u32 register_id, u32 value)
246{
247 acpi_status status;
248 u32 read_value;
249
250 ACPI_FUNCTION_TRACE(hw_register_write);
251
252 switch (register_id) {
253 case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */
254
255 /* Perform a read first to preserve certain bits (per ACPI spec) */
256
257 status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS,
258 &read_value);
259 if (ACPI_FAILURE(status)) {
260 goto exit;
261 }
262
263 /* Insert the bits to be preserved */
264
265 ACPI_INSERT_BITS(value, ACPI_PM1_STATUS_PRESERVED_BITS,
266 read_value);
267
268 /* Now we can write the data */
269
270 status = acpi_write(value, &acpi_gbl_FADT.xpm1a_event_block);
271 if (ACPI_FAILURE(status)) {
272 goto exit;
273 }
274
275 /* PM1B is optional */
276
277 status = acpi_write(value, &acpi_gbl_FADT.xpm1b_event_block);
278 break;
279
280 case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */
281
282 status = acpi_write(value, &acpi_gbl_xpm1a_enable);
283 if (ACPI_FAILURE(status)) {
284 goto exit;
285 }
286
287 /* PM1B is optional */
288
289 status = acpi_write(value, &acpi_gbl_xpm1b_enable);
290 break;
291
292 case ACPI_REGISTER_PM1_CONTROL: /* 16-bit access */
293
294 /*
295 * Perform a read first to preserve certain bits (per ACPI spec)
296 */
297 status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
298 &read_value);
299 if (ACPI_FAILURE(status)) {
300 goto exit;
301 }
302
303 /* Insert the bits to be preserved */
304
305 ACPI_INSERT_BITS(value, ACPI_PM1_CONTROL_PRESERVED_BITS,
306 read_value);
307
308 /* Now we can write the data */
309
310 status = acpi_write(value, &acpi_gbl_FADT.xpm1a_control_block);
311 if (ACPI_FAILURE(status)) {
312 goto exit;
313 }
314
315 status = acpi_write(value, &acpi_gbl_FADT.xpm1b_control_block);
316 break;
317
318 case ACPI_REGISTER_PM1A_CONTROL: /* 16-bit access */
319
320 status = acpi_write(value, &acpi_gbl_FADT.xpm1a_control_block);
321 break;
322
323 case ACPI_REGISTER_PM1B_CONTROL: /* 16-bit access */
324
325 status = acpi_write(value, &acpi_gbl_FADT.xpm1b_control_block);
326 break;
327
328 case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */
329
330 status = acpi_write(value, &acpi_gbl_FADT.xpm2_control_block);
331 break;
332
333 case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
334
335 status = acpi_write(value, &acpi_gbl_FADT.xpm_timer_block);
336 break;
337
338 case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
339
340 /* SMI_CMD is currently always in IO space */
341
342 status =
343 acpi_os_write_port(acpi_gbl_FADT.smi_command, value, 8);
344 break;
345
346 default:
347 status = AE_BAD_PARAMETER;
348 break;
349 }
350
351 exit:
352 return_ACPI_STATUS(status);
353}
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 25dccdf179b9..a2af2a4f2f26 100644
--- a/drivers/acpi/hardware/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -43,7 +43,8 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/actables.h> 46#include "accommon.h"
47#include "actables.h"
47 48
48#define _COMPONENT ACPI_HARDWARE 49#define _COMPONENT ACPI_HARDWARE
49ACPI_MODULE_NAME("hwsleep") 50ACPI_MODULE_NAME("hwsleep")
@@ -52,31 +53,19 @@ ACPI_MODULE_NAME("hwsleep")
52 * 53 *
53 * FUNCTION: acpi_set_firmware_waking_vector 54 * FUNCTION: acpi_set_firmware_waking_vector
54 * 55 *
55 * PARAMETERS: physical_address - Physical address of ACPI real mode 56 * PARAMETERS: physical_address - 32-bit physical address of ACPI real mode
56 * entry point. 57 * entry point.
57 * 58 *
58 * RETURN: Status 59 * RETURN: Status
59 * 60 *
60 * DESCRIPTION: Access function for the firmware_waking_vector field in FACS 61 * DESCRIPTION: Sets the 32-bit firmware_waking_vector field of the FACS
61 * 62 *
62 ******************************************************************************/ 63 ******************************************************************************/
63acpi_status 64acpi_status
64acpi_set_firmware_waking_vector(acpi_physical_address physical_address) 65acpi_set_firmware_waking_vector(u32 physical_address)
65{ 66{
66 struct acpi_table_facs *facs;
67 acpi_status status;
68
69 ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector); 67 ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
70 68
71 /* Get the FACS */
72
73 status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
74 ACPI_CAST_INDIRECT_PTR(struct
75 acpi_table_header,
76 &facs));
77 if (ACPI_FAILURE(status)) {
78 return_ACPI_STATUS(status);
79 }
80 69
81 /* 70 /*
82 * According to the ACPI specification 2.0c and later, the 64-bit 71 * According to the ACPI specification 2.0c and later, the 64-bit
@@ -85,10 +74,16 @@ acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
85 * Protected Mode. Some systems (for example HP dv5-1004nr) are known 74 * Protected Mode. Some systems (for example HP dv5-1004nr) are known
86 * to fail to resume if the 64-bit vector is used. 75 * to fail to resume if the 64-bit vector is used.
87 */ 76 */
88 if (facs->version >= 1)
89 facs->xfirmware_waking_vector = 0;
90 77
91 facs->firmware_waking_vector = (u32)physical_address; 78 /* Set the 32-bit vector */
79
80 acpi_gbl_FACS->firmware_waking_vector = physical_address;
81
82 /* Clear the 64-bit vector if it exists */
83
84 if ((acpi_gbl_FACS->length > 32) && (acpi_gbl_FACS->version >= 1)) {
85 acpi_gbl_FACS->xfirmware_waking_vector = 0;
86 }
92 87
93 return_ACPI_STATUS(AE_OK); 88 return_ACPI_STATUS(AE_OK);
94} 89}
@@ -97,48 +92,39 @@ ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
97 92
98/******************************************************************************* 93/*******************************************************************************
99 * 94 *
100 * FUNCTION: acpi_get_firmware_waking_vector 95 * FUNCTION: acpi_set_firmware_waking_vector64
101 * 96 *
102 * PARAMETERS: *physical_address - Where the contents of 97 * PARAMETERS: physical_address - 64-bit physical address of ACPI protected
103 * the firmware_waking_vector field of 98 * mode entry point.
104 * the FACS will be returned.
105 * 99 *
106 * RETURN: Status, vector 100 * RETURN: Status
107 * 101 *
108 * DESCRIPTION: Access function for the firmware_waking_vector field in FACS 102 * DESCRIPTION: Sets the 64-bit X_firmware_waking_vector field of the FACS, if
103 * it exists in the table.
109 * 104 *
110 ******************************************************************************/ 105 ******************************************************************************/
111#ifdef ACPI_FUTURE_USAGE
112acpi_status 106acpi_status
113acpi_get_firmware_waking_vector(acpi_physical_address * physical_address) 107acpi_set_firmware_waking_vector64(u64 physical_address)
114{ 108{
115 struct acpi_table_facs *facs; 109 ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector64);
116 acpi_status status;
117 110
118 ACPI_FUNCTION_TRACE(acpi_get_firmware_waking_vector);
119
120 if (!physical_address) {
121 return_ACPI_STATUS(AE_BAD_PARAMETER);
122 }
123 111
124 /* Get the FACS */ 112 /* Determine if the 64-bit vector actually exists */
125 113
126 status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS, 114 if ((acpi_gbl_FACS->length <= 32) || (acpi_gbl_FACS->version < 1)) {
127 ACPI_CAST_INDIRECT_PTR(struct 115 return_ACPI_STATUS(AE_NOT_EXIST);
128 acpi_table_header,
129 &facs));
130 if (ACPI_FAILURE(status)) {
131 return_ACPI_STATUS(status);
132 } 116 }
133 117
134 /* Get the vector */ 118 /* Clear 32-bit vector, set the 64-bit X_ vector */
135 *physical_address = (acpi_physical_address)facs->firmware_waking_vector; 119
120 acpi_gbl_FACS->firmware_waking_vector = 0;
121 acpi_gbl_FACS->xfirmware_waking_vector = physical_address;
136 122
137 return_ACPI_STATUS(AE_OK); 123 return_ACPI_STATUS(AE_OK);
138} 124}
139 125
140ACPI_EXPORT_SYMBOL(acpi_get_firmware_waking_vector) 126ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector64)
141#endif 127
142/******************************************************************************* 128/*******************************************************************************
143 * 129 *
144 * FUNCTION: acpi_enter_sleep_state_prep 130 * FUNCTION: acpi_enter_sleep_state_prep
diff --git a/drivers/acpi/hardware/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index b53d575491b9..b7f522c8f023 100644
--- a/drivers/acpi/hardware/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -43,6 +43,7 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include "accommon.h"
46 47
47#define _COMPONENT ACPI_HARDWARE 48#define _COMPONENT ACPI_HARDWARE
48ACPI_MODULE_NAME("hwtimer") 49ACPI_MODULE_NAME("hwtimer")
diff --git a/drivers/acpi/hardware/hwregs.c b/drivers/acpi/acpica/hwxface.c
index ddf792adcf96..ae597c0ab53f 100644
--- a/drivers/acpi/hardware/hwregs.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -1,10 +1,9 @@
1 1
2/******************************************************************************* 2/******************************************************************************
3 * 3 *
4 * Module Name: hwregs - Read/write access functions for the various ACPI 4 * Module Name: hwxface - Public ACPICA hardware interfaces
5 * control and status registers.
6 * 5 *
7 ******************************************************************************/ 6 *****************************************************************************/
8 7
9/* 8/*
10 * Copyright (C) 2000 - 2008, Intel Corp. 9 * Copyright (C) 2000 - 2008, Intel Corp.
@@ -44,209 +43,208 @@
44 */ 43 */
45 44
46#include <acpi/acpi.h> 45#include <acpi/acpi.h>
47#include <acpi/acnamesp.h> 46#include "accommon.h"
48#include <acpi/acevents.h> 47#include "acnamesp.h"
49 48
50#define _COMPONENT ACPI_HARDWARE 49#define _COMPONENT ACPI_HARDWARE
51ACPI_MODULE_NAME("hwregs") 50ACPI_MODULE_NAME("hwxface")
52 51
53/******************************************************************************* 52/******************************************************************************
54 * 53 *
55 * FUNCTION: acpi_hw_clear_acpi_status 54 * FUNCTION: acpi_reset
56 * 55 *
57 * PARAMETERS: None 56 * PARAMETERS: None
58 * 57 *
59 * RETURN: None 58 * RETURN: Status
60 * 59 *
61 * DESCRIPTION: Clears all fixed and general purpose status bits 60 * DESCRIPTION: Set reset register in memory or IO space. Note: Does not
62 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED 61 * support reset register in PCI config space, this must be
62 * handled separately.
63 * 63 *
64 ******************************************************************************/ 64 ******************************************************************************/
65acpi_status acpi_hw_clear_acpi_status(void) 65acpi_status acpi_reset(void)
66{ 66{
67 struct acpi_generic_address *reset_reg;
67 acpi_status status; 68 acpi_status status;
68 acpi_cpu_flags lock_flags = 0;
69 69
70 ACPI_FUNCTION_TRACE(hw_clear_acpi_status); 70 ACPI_FUNCTION_TRACE(acpi_reset);
71 71
72 ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %04X\n", 72 reset_reg = &acpi_gbl_FADT.reset_register;
73 ACPI_BITMASK_ALL_FIXED_STATUS,
74 (u16) acpi_gbl_FADT.xpm1a_event_block.address));
75 73
76 lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); 74 /* Check if the reset register is supported */
77 75
78 status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, 76 if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) ||
79 ACPI_BITMASK_ALL_FIXED_STATUS); 77 !reset_reg->address) {
80 if (ACPI_FAILURE(status)) { 78 return_ACPI_STATUS(AE_NOT_EXIST);
81 goto unlock_and_exit;
82 } 79 }
83 80
84 /* Clear the fixed events */ 81 /* Write the reset value to the reset register */
85
86 if (acpi_gbl_FADT.xpm1b_event_block.address) {
87 status =
88 acpi_hw_low_level_write(16, ACPI_BITMASK_ALL_FIXED_STATUS,
89 &acpi_gbl_FADT.xpm1b_event_block);
90 if (ACPI_FAILURE(status)) {
91 goto unlock_and_exit;
92 }
93 }
94
95 /* Clear the GPE Bits in all GPE registers in all GPE blocks */
96
97 status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block);
98 82
99 unlock_and_exit: 83 status = acpi_write(acpi_gbl_FADT.reset_value, reset_reg);
100 acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
101 return_ACPI_STATUS(status); 84 return_ACPI_STATUS(status);
102} 85}
103 86
104/******************************************************************************* 87ACPI_EXPORT_SYMBOL(acpi_reset)
88
89/******************************************************************************
105 * 90 *
106 * FUNCTION: acpi_get_sleep_type_data 91 * FUNCTION: acpi_read
107 * 92 *
108 * PARAMETERS: sleep_state - Numeric sleep state 93 * PARAMETERS: Value - Where the value is returned
109 * *sleep_type_a - Where SLP_TYPa is returned 94 * Reg - GAS register structure
110 * *sleep_type_b - Where SLP_TYPb is returned
111 * 95 *
112 * RETURN: Status - ACPI status 96 * RETURN: Status
113 * 97 *
114 * DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested sleep 98 * DESCRIPTION: Read from either memory or IO space.
115 * state.
116 * 99 *
117 ******************************************************************************/ 100 ******************************************************************************/
118 101acpi_status acpi_read(u32 *value, struct acpi_generic_address *reg)
119acpi_status
120acpi_get_sleep_type_data(u8 sleep_state, u8 * sleep_type_a, u8 * sleep_type_b)
121{ 102{
122 acpi_status status = AE_OK; 103 u32 width;
123 struct acpi_evaluate_info *info; 104 u64 address;
124 105 acpi_status status;
125 ACPI_FUNCTION_TRACE(acpi_get_sleep_type_data);
126
127 /* Validate parameters */
128
129 if ((sleep_state > ACPI_S_STATES_MAX) || !sleep_type_a || !sleep_type_b) {
130 return_ACPI_STATUS(AE_BAD_PARAMETER);
131 }
132 106
133 /* Allocate the evaluation information block */ 107 ACPI_FUNCTION_NAME(acpi_read);
134 108
135 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); 109 /*
136 if (!info) { 110 * Must have a valid pointer to a GAS structure, and
137 return_ACPI_STATUS(AE_NO_MEMORY); 111 * a non-zero address within. However, don't return an error
112 * because the PM1A/B code must not fail if B isn't present.
113 */
114 if (!reg) {
115 return (AE_OK);
138 } 116 }
139 117
140 info->pathname = 118 /* Get a local copy of the address. Handles possible alignment issues */
141 ACPI_CAST_PTR(char, acpi_gbl_sleep_state_names[sleep_state]);
142
143 /* Evaluate the namespace object containing the values for this state */
144
145 status = acpi_ns_evaluate(info);
146 if (ACPI_FAILURE(status)) {
147 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
148 "%s while evaluating SleepState [%s]\n",
149 acpi_format_exception(status),
150 info->pathname));
151 119
152 goto cleanup; 120 ACPI_MOVE_64_TO_64(&address, &reg->address);
121 if (!address) {
122 return (AE_OK);
153 } 123 }
154 124
155 /* Must have a return object */ 125 /* Supported widths are 8/16/32 */
156 126
157 if (!info->return_object) { 127 width = reg->bit_width;
158 ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]", 128 if ((width != 8) && (width != 16) && (width != 32)) {
159 info->pathname)); 129 return (AE_SUPPORT);
160 status = AE_NOT_EXIST;
161 } 130 }
162 131
163 /* It must be of type Package */ 132 /* Initialize entire 32-bit return value to zero */
164 133
165 else if (ACPI_GET_OBJECT_TYPE(info->return_object) != ACPI_TYPE_PACKAGE) { 134 *value = 0;
166 ACPI_ERROR((AE_INFO,
167 "Sleep State return object is not a Package"));
168 status = AE_AML_OPERAND_TYPE;
169 }
170 135
171 /* 136 /*
172 * The package must have at least two elements. NOTE (March 2005): This 137 * Two address spaces supported: Memory or IO.
173 * goes against the current ACPI spec which defines this object as a 138 * PCI_Config is not supported here because the GAS struct is insufficient
174 * package with one encoded DWORD element. However, existing practice
175 * by BIOS vendors seems to be to have 2 or more elements, at least
176 * one per sleep type (A/B).
177 */ 139 */
178 else if (info->return_object->package.count < 2) { 140 switch (reg->space_id) {
179 ACPI_ERROR((AE_INFO, 141 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
180 "Sleep State return package does not have at least two elements"));
181 status = AE_AML_NO_OPERAND;
182 }
183 142
184 /* The first two elements must both be of type Integer */ 143 status = acpi_os_read_memory((acpi_physical_address) address,
144 value, width);
145 break;
185 146
186 else if ((ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[0]) 147 case ACPI_ADR_SPACE_SYSTEM_IO:
187 != ACPI_TYPE_INTEGER) ||
188 (ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[1])
189 != ACPI_TYPE_INTEGER)) {
190 ACPI_ERROR((AE_INFO,
191 "Sleep State return package elements are not both Integers (%s, %s)",
192 acpi_ut_get_object_type_name(info->return_object->
193 package.elements[0]),
194 acpi_ut_get_object_type_name(info->return_object->
195 package.elements[1])));
196 status = AE_AML_OPERAND_TYPE;
197 } else {
198 /* Valid _Sx_ package size, type, and value */
199 148
200 *sleep_type_a = (u8) 149 status =
201 (info->return_object->package.elements[0])->integer.value; 150 acpi_os_read_port((acpi_io_address) address, value, width);
202 *sleep_type_b = (u8) 151 break;
203 (info->return_object->package.elements[1])->integer.value;
204 }
205 152
206 if (ACPI_FAILURE(status)) { 153 default:
207 ACPI_EXCEPTION((AE_INFO, status, 154 ACPI_ERROR((AE_INFO,
208 "While evaluating SleepState [%s], bad Sleep object %p type %s", 155 "Unsupported address space: %X", reg->space_id));
209 info->pathname, info->return_object, 156 return (AE_BAD_PARAMETER);
210 acpi_ut_get_object_type_name(info->
211 return_object)));
212 } 157 }
213 158
214 acpi_ut_remove_reference(info->return_object); 159 ACPI_DEBUG_PRINT((ACPI_DB_IO,
160 "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n",
161 *value, width, ACPI_FORMAT_UINT64(address),
162 acpi_ut_get_region_name(reg->space_id)));
215 163
216 cleanup: 164 return (status);
217 ACPI_FREE(info);
218 return_ACPI_STATUS(status);
219} 165}
220 166
221ACPI_EXPORT_SYMBOL(acpi_get_sleep_type_data) 167ACPI_EXPORT_SYMBOL(acpi_read)
222 168
223/******************************************************************************* 169/******************************************************************************
224 * 170 *
225 * FUNCTION: acpi_hw_get_register_bit_mask 171 * FUNCTION: acpi_write
226 * 172 *
227 * PARAMETERS: register_id - Index of ACPI Register to access 173 * PARAMETERS: Value - To be written
174 * Reg - GAS register structure
228 * 175 *
229 * RETURN: The bitmask to be used when accessing the register 176 * RETURN: Status
230 * 177 *
231 * DESCRIPTION: Map register_id into a register bitmask. 178 * DESCRIPTION: Write to either memory or IO space.
232 * 179 *
233 ******************************************************************************/ 180 ******************************************************************************/
234struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id) 181acpi_status acpi_write(u32 value, struct acpi_generic_address *reg)
235{ 182{
236 ACPI_FUNCTION_ENTRY(); 183 u32 width;
184 u64 address;
185 acpi_status status;
237 186
238 if (register_id > ACPI_BITREG_MAX) { 187 ACPI_FUNCTION_NAME(acpi_write);
239 ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: %X", 188
240 register_id)); 189 /*
241 return (NULL); 190 * Must have a valid pointer to a GAS structure, and
191 * a non-zero address within. However, don't return an error
192 * because the PM1A/B code must not fail if B isn't present.
193 */
194 if (!reg) {
195 return (AE_OK);
242 } 196 }
243 197
244 return (&acpi_gbl_bit_register_info[register_id]); 198 /* Get a local copy of the address. Handles possible alignment issues */
199
200 ACPI_MOVE_64_TO_64(&address, &reg->address);
201 if (!address) {
202 return (AE_OK);
203 }
204
205 /* Supported widths are 8/16/32 */
206
207 width = reg->bit_width;
208 if ((width != 8) && (width != 16) && (width != 32)) {
209 return (AE_SUPPORT);
210 }
211
212 /*
213 * Two address spaces supported: Memory or IO.
214 * PCI_Config is not supported here because the GAS struct is insufficient
215 */
216 switch (reg->space_id) {
217 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
218
219 status = acpi_os_write_memory((acpi_physical_address) address,
220 value, width);
221 break;
222
223 case ACPI_ADR_SPACE_SYSTEM_IO:
224
225 status = acpi_os_write_port((acpi_io_address) address, value,
226 width);
227 break;
228
229 default:
230 ACPI_ERROR((AE_INFO,
231 "Unsupported address space: %X", reg->space_id));
232 return (AE_BAD_PARAMETER);
233 }
234
235 ACPI_DEBUG_PRINT((ACPI_DB_IO,
236 "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n",
237 value, width, ACPI_FORMAT_UINT64(address),
238 acpi_ut_get_region_name(reg->space_id)));
239
240 return (status);
245} 241}
246 242
243ACPI_EXPORT_SYMBOL(acpi_write)
244
247/******************************************************************************* 245/*******************************************************************************
248 * 246 *
249 * FUNCTION: acpi_get_register 247 * FUNCTION: acpi_get_register_unlocked
250 * 248 *
251 * PARAMETERS: register_id - ID of ACPI bit_register to access 249 * PARAMETERS: register_id - ID of ACPI bit_register to access
252 * return_value - Value that was read from the register 250 * return_value - Value that was read from the register
@@ -254,17 +252,16 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
254 * RETURN: Status and the value read from specified Register. Value 252 * RETURN: Status and the value read from specified Register. Value
255 * returned is normalized to bit0 (is shifted all the way right) 253 * returned is normalized to bit0 (is shifted all the way right)
256 * 254 *
257 * DESCRIPTION: ACPI bit_register read function. 255 * DESCRIPTION: ACPI bit_register read function. Does not acquire the HW lock.
258 * 256 *
259 ******************************************************************************/ 257 ******************************************************************************/
260 258acpi_status acpi_get_register_unlocked(u32 register_id, u32 *return_value)
261acpi_status acpi_get_register_unlocked(u32 register_id, u32 * return_value)
262{ 259{
263 u32 register_value = 0; 260 u32 register_value = 0;
264 struct acpi_bit_register_info *bit_reg_info; 261 struct acpi_bit_register_info *bit_reg_info;
265 acpi_status status; 262 acpi_status status;
266 263
267 ACPI_FUNCTION_TRACE(acpi_get_register); 264 ACPI_FUNCTION_TRACE(acpi_get_register_unlocked);
268 265
269 /* Get the info structure corresponding to the requested ACPI Register */ 266 /* Get the info structure corresponding to the requested ACPI Register */
270 267
@@ -296,14 +293,31 @@ acpi_status acpi_get_register_unlocked(u32 register_id, u32 * return_value)
296 return_ACPI_STATUS(status); 293 return_ACPI_STATUS(status);
297} 294}
298 295
299acpi_status acpi_get_register(u32 register_id, u32 * return_value) 296ACPI_EXPORT_SYMBOL(acpi_get_register_unlocked)
297
298/*******************************************************************************
299 *
300 * FUNCTION: acpi_get_register
301 *
302 * PARAMETERS: register_id - ID of ACPI bit_register to access
303 * return_value - Value that was read from the register
304 *
305 * RETURN: Status and the value read from specified Register. Value
306 * returned is normalized to bit0 (is shifted all the way right)
307 *
308 * DESCRIPTION: ACPI bit_register read function.
309 *
310 ******************************************************************************/
311acpi_status acpi_get_register(u32 register_id, u32 *return_value)
300{ 312{
301 acpi_status status; 313 acpi_status status;
302 acpi_cpu_flags flags; 314 acpi_cpu_flags flags;
315
303 flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); 316 flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
304 status = acpi_get_register_unlocked(register_id, return_value); 317 status = acpi_get_register_unlocked(register_id, return_value);
305 acpi_os_release_lock(acpi_gbl_hardware_lock, flags); 318 acpi_os_release_lock(acpi_gbl_hardware_lock, flags);
306 return status; 319
320 return (status);
307} 321}
308 322
309ACPI_EXPORT_SYMBOL(acpi_get_register) 323ACPI_EXPORT_SYMBOL(acpi_get_register)
@@ -370,8 +384,9 @@ acpi_status acpi_set_register(u32 register_id, u32 value)
370 bit_reg_info-> 384 bit_reg_info->
371 access_bit_mask); 385 access_bit_mask);
372 if (value) { 386 if (value) {
373 status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, 387 status =
374 (u16) value); 388 acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
389 (u16) value);
375 register_value = 0; 390 register_value = 0;
376 } 391 }
377 break; 392 break;
@@ -459,399 +474,120 @@ acpi_status acpi_set_register(u32 register_id, u32 value)
459 474
460ACPI_EXPORT_SYMBOL(acpi_set_register) 475ACPI_EXPORT_SYMBOL(acpi_set_register)
461 476
462/****************************************************************************** 477/*******************************************************************************
463 * 478 *
464 * FUNCTION: acpi_hw_register_read 479 * FUNCTION: acpi_get_sleep_type_data
465 * 480 *
466 * PARAMETERS: register_id - ACPI Register ID 481 * PARAMETERS: sleep_state - Numeric sleep state
467 * return_value - Where the register value is returned 482 * *sleep_type_a - Where SLP_TYPa is returned
483 * *sleep_type_b - Where SLP_TYPb is returned
468 * 484 *
469 * RETURN: Status and the value read. 485 * RETURN: Status - ACPI status
470 * 486 *
471 * DESCRIPTION: Read from the specified ACPI register 487 * DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested sleep
488 * state.
472 * 489 *
473 ******************************************************************************/ 490 ******************************************************************************/
474acpi_status 491acpi_status
475acpi_hw_register_read(u32 register_id, u32 * return_value) 492acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
476{ 493{
477 u32 value1 = 0; 494 acpi_status status = AE_OK;
478 u32 value2 = 0; 495 struct acpi_evaluate_info *info;
479 acpi_status status;
480
481 ACPI_FUNCTION_TRACE(hw_register_read);
482
483 switch (register_id) {
484 case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */
485
486 status =
487 acpi_hw_low_level_read(16, &value1,
488 &acpi_gbl_FADT.xpm1a_event_block);
489 if (ACPI_FAILURE(status)) {
490 goto exit;
491 }
492
493 /* PM1B is optional */
494
495 status =
496 acpi_hw_low_level_read(16, &value2,
497 &acpi_gbl_FADT.xpm1b_event_block);
498 value1 |= value2;
499 break;
500
501 case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */
502
503 status =
504 acpi_hw_low_level_read(16, &value1, &acpi_gbl_xpm1a_enable);
505 if (ACPI_FAILURE(status)) {
506 goto exit;
507 }
508
509 /* PM1B is optional */
510
511 status =
512 acpi_hw_low_level_read(16, &value2, &acpi_gbl_xpm1b_enable);
513 value1 |= value2;
514 break;
515
516 case ACPI_REGISTER_PM1_CONTROL: /* 16-bit access */
517
518 status =
519 acpi_hw_low_level_read(16, &value1,
520 &acpi_gbl_FADT.xpm1a_control_block);
521 if (ACPI_FAILURE(status)) {
522 goto exit;
523 }
524
525 status =
526 acpi_hw_low_level_read(16, &value2,
527 &acpi_gbl_FADT.xpm1b_control_block);
528 value1 |= value2;
529 break;
530
531 case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */
532
533 status =
534 acpi_hw_low_level_read(8, &value1,
535 &acpi_gbl_FADT.xpm2_control_block);
536 break;
537
538 case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
539
540 status =
541 acpi_hw_low_level_read(32, &value1,
542 &acpi_gbl_FADT.xpm_timer_block);
543 break;
544 496
545 case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ 497 ACPI_FUNCTION_TRACE(acpi_get_sleep_type_data);
546 498
547 status = 499 /* Validate parameters */
548 acpi_os_read_port(acpi_gbl_FADT.smi_command, &value1, 8);
549 break;
550 500
551 default: 501 if ((sleep_state > ACPI_S_STATES_MAX) || !sleep_type_a || !sleep_type_b) {
552 ACPI_ERROR((AE_INFO, "Unknown Register ID: %X", register_id)); 502 return_ACPI_STATUS(AE_BAD_PARAMETER);
553 status = AE_BAD_PARAMETER;
554 break;
555 } 503 }
556 504
557 exit: 505 /* Allocate the evaluation information block */
558 506
559 if (ACPI_SUCCESS(status)) { 507 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
560 *return_value = value1; 508 if (!info) {
509 return_ACPI_STATUS(AE_NO_MEMORY);
561 } 510 }
562 511
563 return_ACPI_STATUS(status); 512 info->pathname =
564} 513 ACPI_CAST_PTR(char, acpi_gbl_sleep_state_names[sleep_state]);
565
566/******************************************************************************
567 *
568 * FUNCTION: acpi_hw_register_write
569 *
570 * PARAMETERS: register_id - ACPI Register ID
571 * Value - The value to write
572 *
573 * RETURN: Status
574 *
575 * DESCRIPTION: Write to the specified ACPI register
576 *
577 * NOTE: In accordance with the ACPI specification, this function automatically
578 * preserves the value of the following bits, meaning that these bits cannot be
579 * changed via this interface:
580 *
581 * PM1_CONTROL[0] = SCI_EN
582 * PM1_CONTROL[9]
583 * PM1_STATUS[11]
584 *
585 * ACPI References:
586 * 1) Hardware Ignored Bits: When software writes to a register with ignored
587 * bit fields, it preserves the ignored bit fields
588 * 2) SCI_EN: OSPM always preserves this bit position
589 *
590 ******************************************************************************/
591
592acpi_status acpi_hw_register_write(u32 register_id, u32 value)
593{
594 acpi_status status;
595 u32 read_value;
596
597 ACPI_FUNCTION_TRACE(hw_register_write);
598
599 switch (register_id) {
600 case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */
601
602 /* Perform a read first to preserve certain bits (per ACPI spec) */
603
604 status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS,
605 &read_value);
606 if (ACPI_FAILURE(status)) {
607 goto exit;
608 }
609
610 /* Insert the bits to be preserved */
611
612 ACPI_INSERT_BITS(value, ACPI_PM1_STATUS_PRESERVED_BITS,
613 read_value);
614
615 /* Now we can write the data */
616
617 status =
618 acpi_hw_low_level_write(16, value,
619 &acpi_gbl_FADT.xpm1a_event_block);
620 if (ACPI_FAILURE(status)) {
621 goto exit;
622 }
623
624 /* PM1B is optional */
625
626 status =
627 acpi_hw_low_level_write(16, value,
628 &acpi_gbl_FADT.xpm1b_event_block);
629 break;
630
631 case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */
632
633 status =
634 acpi_hw_low_level_write(16, value, &acpi_gbl_xpm1a_enable);
635 if (ACPI_FAILURE(status)) {
636 goto exit;
637 }
638
639 /* PM1B is optional */
640
641 status =
642 acpi_hw_low_level_write(16, value, &acpi_gbl_xpm1b_enable);
643 break;
644
645 case ACPI_REGISTER_PM1_CONTROL: /* 16-bit access */
646
647 /*
648 * Perform a read first to preserve certain bits (per ACPI spec)
649 */
650 status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
651 &read_value);
652 if (ACPI_FAILURE(status)) {
653 goto exit;
654 }
655
656 /* Insert the bits to be preserved */
657
658 ACPI_INSERT_BITS(value, ACPI_PM1_CONTROL_PRESERVED_BITS,
659 read_value);
660
661 /* Now we can write the data */
662
663 status =
664 acpi_hw_low_level_write(16, value,
665 &acpi_gbl_FADT.xpm1a_control_block);
666 if (ACPI_FAILURE(status)) {
667 goto exit;
668 }
669
670 status =
671 acpi_hw_low_level_write(16, value,
672 &acpi_gbl_FADT.xpm1b_control_block);
673 break;
674
675 case ACPI_REGISTER_PM1A_CONTROL: /* 16-bit access */
676
677 status =
678 acpi_hw_low_level_write(16, value,
679 &acpi_gbl_FADT.xpm1a_control_block);
680 break;
681
682 case ACPI_REGISTER_PM1B_CONTROL: /* 16-bit access */
683
684 status =
685 acpi_hw_low_level_write(16, value,
686 &acpi_gbl_FADT.xpm1b_control_block);
687 break;
688
689 case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */
690
691 status =
692 acpi_hw_low_level_write(8, value,
693 &acpi_gbl_FADT.xpm2_control_block);
694 break;
695
696 case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
697
698 status =
699 acpi_hw_low_level_write(32, value,
700 &acpi_gbl_FADT.xpm_timer_block);
701 break;
702
703 case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
704 514
705 /* SMI_CMD is currently always in IO space */ 515 /* Evaluate the namespace object containing the values for this state */
706 516
707 status = 517 status = acpi_ns_evaluate(info);
708 acpi_os_write_port(acpi_gbl_FADT.smi_command, value, 8); 518 if (ACPI_FAILURE(status)) {
709 break; 519 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
520 "%s while evaluating SleepState [%s]\n",
521 acpi_format_exception(status),
522 info->pathname));
710 523
711 default: 524 goto cleanup;
712 status = AE_BAD_PARAMETER;
713 break;
714 } 525 }
715 526
716 exit: 527 /* Must have a return object */
717 return_ACPI_STATUS(status);
718}
719
720/******************************************************************************
721 *
722 * FUNCTION: acpi_hw_low_level_read
723 *
724 * PARAMETERS: Width - 8, 16, or 32
725 * Value - Where the value is returned
726 * Reg - GAS register structure
727 *
728 * RETURN: Status
729 *
730 * DESCRIPTION: Read from either memory or IO space.
731 *
732 ******************************************************************************/
733
734acpi_status
735acpi_hw_low_level_read(u32 width, u32 * value, struct acpi_generic_address *reg)
736{
737 u64 address;
738 acpi_status status;
739
740 ACPI_FUNCTION_NAME(hw_low_level_read);
741 528
742 /* 529 if (!info->return_object) {
743 * Must have a valid pointer to a GAS structure, and 530 ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]",
744 * a non-zero address within. However, don't return an error 531 info->pathname));
745 * because the PM1A/B code must not fail if B isn't present. 532 status = AE_NOT_EXIST;
746 */
747 if (!reg) {
748 return (AE_OK);
749 } 533 }
750 534
751 /* Get a local copy of the address. Handles possible alignment issues */ 535 /* It must be of type Package */
752 536
753 ACPI_MOVE_64_TO_64(&address, &reg->address); 537 else if (ACPI_GET_OBJECT_TYPE(info->return_object) != ACPI_TYPE_PACKAGE) {
754 if (!address) { 538 ACPI_ERROR((AE_INFO,
755 return (AE_OK); 539 "Sleep State return object is not a Package"));
540 status = AE_AML_OPERAND_TYPE;
756 } 541 }
757 *value = 0;
758 542
759 /* 543 /*
760 * Two address spaces supported: Memory or IO. 544 * The package must have at least two elements. NOTE (March 2005): This
761 * PCI_Config is not supported here because the GAS struct is insufficient 545 * goes against the current ACPI spec which defines this object as a
546 * package with one encoded DWORD element. However, existing practice
547 * by BIOS vendors seems to be to have 2 or more elements, at least
548 * one per sleep type (A/B).
762 */ 549 */
763 switch (reg->space_id) { 550 else if (info->return_object->package.count < 2) {
764 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
765
766 status = acpi_os_read_memory((acpi_physical_address) address,
767 value, width);
768 break;
769
770 case ACPI_ADR_SPACE_SYSTEM_IO:
771
772 status =
773 acpi_os_read_port((acpi_io_address) address, value, width);
774 break;
775
776 default:
777 ACPI_ERROR((AE_INFO, 551 ACPI_ERROR((AE_INFO,
778 "Unsupported address space: %X", reg->space_id)); 552 "Sleep State return package does not have at least two elements"));
779 return (AE_BAD_PARAMETER); 553 status = AE_AML_NO_OPERAND;
780 } 554 }
781 555
782 ACPI_DEBUG_PRINT((ACPI_DB_IO, 556 /* The first two elements must both be of type Integer */
783 "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n",
784 *value, width, ACPI_FORMAT_UINT64(address),
785 acpi_ut_get_region_name(reg->space_id)));
786
787 return (status);
788}
789
790/******************************************************************************
791 *
792 * FUNCTION: acpi_hw_low_level_write
793 *
794 * PARAMETERS: Width - 8, 16, or 32
795 * Value - To be written
796 * Reg - GAS register structure
797 *
798 * RETURN: Status
799 *
800 * DESCRIPTION: Write to either memory or IO space.
801 *
802 ******************************************************************************/
803
804acpi_status
805acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address * reg)
806{
807 u64 address;
808 acpi_status status;
809
810 ACPI_FUNCTION_NAME(hw_low_level_write);
811
812 /*
813 * Must have a valid pointer to a GAS structure, and
814 * a non-zero address within. However, don't return an error
815 * because the PM1A/B code must not fail if B isn't present.
816 */
817 if (!reg) {
818 return (AE_OK);
819 }
820 557
821 /* Get a local copy of the address. Handles possible alignment issues */ 558 else if ((ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[0])
559 != ACPI_TYPE_INTEGER) ||
560 (ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[1])
561 != ACPI_TYPE_INTEGER)) {
562 ACPI_ERROR((AE_INFO,
563 "Sleep State return package elements are not both Integers (%s, %s)",
564 acpi_ut_get_object_type_name(info->return_object->
565 package.elements[0]),
566 acpi_ut_get_object_type_name(info->return_object->
567 package.elements[1])));
568 status = AE_AML_OPERAND_TYPE;
569 } else {
570 /* Valid _Sx_ package size, type, and value */
822 571
823 ACPI_MOVE_64_TO_64(&address, &reg->address); 572 *sleep_type_a = (u8)
824 if (!address) { 573 (info->return_object->package.elements[0])->integer.value;
825 return (AE_OK); 574 *sleep_type_b = (u8)
575 (info->return_object->package.elements[1])->integer.value;
826 } 576 }
827 577
828 /* 578 if (ACPI_FAILURE(status)) {
829 * Two address spaces supported: Memory or IO. 579 ACPI_EXCEPTION((AE_INFO, status,
830 * PCI_Config is not supported here because the GAS struct is insufficient 580 "While evaluating SleepState [%s], bad Sleep object %p type %s",
831 */ 581 info->pathname, info->return_object,
832 switch (reg->space_id) { 582 acpi_ut_get_object_type_name(info->
833 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 583 return_object)));
834
835 status = acpi_os_write_memory((acpi_physical_address) address,
836 value, width);
837 break;
838
839 case ACPI_ADR_SPACE_SYSTEM_IO:
840
841 status = acpi_os_write_port((acpi_io_address) address, value,
842 width);
843 break;
844
845 default:
846 ACPI_ERROR((AE_INFO,
847 "Unsupported address space: %X", reg->space_id));
848 return (AE_BAD_PARAMETER);
849 } 584 }
850 585
851 ACPI_DEBUG_PRINT((ACPI_DB_IO, 586 acpi_ut_remove_reference(info->return_object);
852 "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n",
853 value, width, ACPI_FORMAT_UINT64(address),
854 acpi_ut_get_region_name(reg->space_id)));
855 587
856 return (status); 588 cleanup:
589 ACPI_FREE(info);
590 return_ACPI_STATUS(status);
857} 591}
592
593ACPI_EXPORT_SYMBOL(acpi_get_sleep_type_data)
diff --git a/drivers/acpi/namespace/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index c39a7f68b889..88303ebe924c 100644
--- a/drivers/acpi/namespace/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -42,9 +42,10 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/amlcode.h> 45#include "accommon.h"
46#include <acpi/acnamesp.h> 46#include "amlcode.h"
47#include <acpi/acdispat.h> 47#include "acnamesp.h"
48#include "acdispat.h"
48 49
49#define _COMPONENT ACPI_NAMESPACE 50#define _COMPONENT ACPI_NAMESPACE
50ACPI_MODULE_NAME("nsaccess") 51ACPI_MODULE_NAME("nsaccess")
@@ -165,12 +166,9 @@ acpi_status acpi_ns_root_initialize(void)
165 166
166 obj_desc->method.method_flags = 167 obj_desc->method.method_flags =
167 AML_METHOD_INTERNAL_ONLY; 168 AML_METHOD_INTERNAL_ONLY;
168
169#ifndef ACPI_DUMP_APP
170 obj_desc->method.implementation = 169 obj_desc->method.implementation =
171 acpi_ut_osi_implementation; 170 acpi_ut_osi_implementation;
172#endif 171#endif
173#endif
174 break; 172 break;
175 173
176 case ACPI_TYPE_INTEGER: 174 case ACPI_TYPE_INTEGER:
@@ -521,11 +519,11 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
521 } 519 }
522 520
523 /* 521 /*
524 * Search namespace for each segment of the name. Loop through and 522 * Search namespace for each segment of the name. Loop through and
525 * verify (or add to the namespace) each name segment. 523 * verify (or add to the namespace) each name segment.
526 * 524 *
527 * The object type is significant only at the last name 525 * The object type is significant only at the last name
528 * segment. (We don't care about the types along the path, only 526 * segment. (We don't care about the types along the path, only
529 * the type of the final target object.) 527 * the type of the final target object.)
530 */ 528 */
531 this_search_type = ACPI_TYPE_ANY; 529 this_search_type = ACPI_TYPE_ANY;
@@ -591,6 +589,10 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
591 * segments). 589 * segments).
592 */ 590 */
593 if (this_node->type == ACPI_TYPE_LOCAL_ALIAS) { 591 if (this_node->type == ACPI_TYPE_LOCAL_ALIAS) {
592 if (!this_node->object) {
593 return_ACPI_STATUS(AE_NOT_EXIST);
594 }
595
594 if (acpi_ns_opens_scope 596 if (acpi_ns_opens_scope
595 (((struct acpi_namespace_node *)this_node-> 597 (((struct acpi_namespace_node *)this_node->
596 object)->type)) { 598 object)->type)) {
diff --git a/drivers/acpi/namespace/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 3a1740ac2edc..f976d848fe82 100644
--- a/drivers/acpi/namespace/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include "accommon.h"
46#include "acnamesp.h"
46 47
47#define _COMPONENT ACPI_NAMESPACE 48#define _COMPONENT ACPI_NAMESPACE
48ACPI_MODULE_NAME("nsalloc") 49ACPI_MODULE_NAME("nsalloc")
diff --git a/drivers/acpi/namespace/nsdump.c b/drivers/acpi/acpica/nsdump.c
index cc0ae39440e4..0da33c8e9ba2 100644
--- a/drivers/acpi/namespace/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include "accommon.h"
46#include "acnamesp.h"
46 47
47#define _COMPONENT ACPI_NAMESPACE 48#define _COMPONENT ACPI_NAMESPACE
48ACPI_MODULE_NAME("nsdump") 49ACPI_MODULE_NAME("nsdump")
diff --git a/drivers/acpi/namespace/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 428f50fde11a..41994fe7fbb8 100644
--- a/drivers/acpi/namespace/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -42,6 +42,7 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include "accommon.h"
45 46
46/* TBD: This entire module is apparently obsolete and should be removed */ 47/* TBD: This entire module is apparently obsolete and should be removed */
47 48
@@ -49,7 +50,7 @@
49ACPI_MODULE_NAME("nsdumpdv") 50ACPI_MODULE_NAME("nsdumpdv")
50#ifdef ACPI_OBSOLETE_FUNCTIONS 51#ifdef ACPI_OBSOLETE_FUNCTIONS
51#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) 52#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
52#include <acpi/acnamesp.h> 53#include "acnamesp.h"
53/******************************************************************************* 54/*******************************************************************************
54 * 55 *
55 * FUNCTION: acpi_ns_dump_one_device 56 * FUNCTION: acpi_ns_dump_one_device
diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/acpica/nseval.c
index 4cdf03ac2b46..0f3d5f9b5966 100644
--- a/drivers/acpi/namespace/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -42,9 +42,10 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acparser.h> 45#include "accommon.h"
46#include <acpi/acinterp.h> 46#include "acparser.h"
47#include <acpi/acnamesp.h> 47#include "acinterp.h"
48#include "acnamesp.h"
48 49
49#define _COMPONENT ACPI_NAMESPACE 50#define _COMPONENT ACPI_NAMESPACE
50ACPI_MODULE_NAME("nseval") 51ACPI_MODULE_NAME("nseval")
@@ -89,6 +90,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
89 /* Initialize the return value to an invalid object */ 90 /* Initialize the return value to an invalid object */
90 91
91 info->return_object = NULL; 92 info->return_object = NULL;
93 info->param_count = 0;
92 94
93 /* 95 /*
94 * Get the actual namespace node for the target object. Handles these cases: 96 * Get the actual namespace node for the target object. Handles these cases:
@@ -141,41 +143,17 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
141 return_ACPI_STATUS(AE_NULL_OBJECT); 143 return_ACPI_STATUS(AE_NULL_OBJECT);
142 } 144 }
143 145
144 /* 146 /* Count the number of arguments being passed to the method */
145 * Calculate the number of arguments being passed to the method
146 */
147 147
148 info->param_count = 0;
149 if (info->parameters) { 148 if (info->parameters) {
150 while (info->parameters[info->param_count]) 149 while (info->parameters[info->param_count]) {
150 if (info->param_count > ACPI_METHOD_MAX_ARG) {
151 return_ACPI_STATUS(AE_LIMIT);
152 }
151 info->param_count++; 153 info->param_count++;
154 }
152 } 155 }
153 156
154 /*
155 * Warning if too few or too many arguments have been passed by the
156 * caller. We don't want to abort here with an error because an
157 * incorrect number of arguments may not cause the method to fail.
158 * However, the method will fail if there are too few arguments passed
159 * and the method attempts to use one of the missing ones.
160 */
161
162 if (info->param_count < info->obj_desc->method.param_count) {
163 ACPI_WARNING((AE_INFO,
164 "Insufficient arguments - "
165 "method [%4.4s] needs %d, found %d",
166 acpi_ut_get_node_name(info->resolved_node),
167 info->obj_desc->method.param_count,
168 info->param_count));
169 } else if (info->param_count >
170 info->obj_desc->method.param_count) {
171 ACPI_WARNING((AE_INFO,
172 "Excess arguments - "
173 "method [%4.4s] needs %d, found %d",
174 acpi_ut_get_node_name(info->
175 resolved_node),
176 info->obj_desc->method.param_count,
177 info->param_count));
178 }
179 157
180 ACPI_DUMP_PATHNAME(info->resolved_node, "Execute Method:", 158 ACPI_DUMP_PATHNAME(info->resolved_node, "Execute Method:",
181 ACPI_LV_INFO, _COMPONENT); 159 ACPI_LV_INFO, _COMPONENT);
@@ -264,32 +242,13 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
264 } 242 }
265 } 243 }
266 244
267 /* Validation of return values for ACPI-predefined methods and objects */ 245 /*
268 246 * Check input argument count against the ASL-defined count for a method.
269 if ((status == AE_OK) || (status == AE_CTRL_RETURN_VALUE)) { 247 * Also check predefined names: argument count and return value against
270 /* 248 * the ACPI specification. Some incorrect return value types are repaired.
271 * If this is the first evaluation, check the return value. This 249 */
272 * ensures that any warnings will only be emitted during the very 250 (void)acpi_ns_check_predefined_names(node, info->param_count,
273 * first evaluation of the object. 251 status, &info->return_object);
274 */
275 if (!(node->flags & ANOBJ_EVALUATED)) {
276 /*
277 * Check for a predefined ACPI name. If found, validate the
278 * returned object.
279 *
280 * Note: Ignore return status for now, emit warnings if there are
281 * problems with the returned object. May change later to abort
282 * the method on invalid return object.
283 */
284 (void)acpi_ns_check_predefined_names(node,
285 info->
286 return_object);
287 }
288
289 /* Mark the node as having been evaluated */
290
291 node->flags |= ANOBJ_EVALUATED;
292 }
293 252
294 /* Check if there is a return value that must be dealt with */ 253 /* Check if there is a return value that must be dealt with */
295 254
diff --git a/drivers/acpi/namespace/nsinit.c b/drivers/acpi/acpica/nsinit.c
index e4c57510d798..13501cb81863 100644
--- a/drivers/acpi/namespace/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -42,9 +42,10 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include "accommon.h"
46#include <acpi/acdispat.h> 46#include "acnamesp.h"
47#include <acpi/acinterp.h> 47#include "acdispat.h"
48#include "acinterp.h"
48#include <linux/nmi.h> 49#include <linux/nmi.h>
49 50
50#define _COMPONENT ACPI_NAMESPACE 51#define _COMPONENT ACPI_NAMESPACE
diff --git a/drivers/acpi/namespace/nsload.c b/drivers/acpi/acpica/nsload.c
index a4a412b7c029..a0ba9e12379e 100644
--- a/drivers/acpi/namespace/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -42,9 +42,10 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include "accommon.h"
46#include <acpi/acdispat.h> 46#include "acnamesp.h"
47#include <acpi/actables.h> 47#include "acdispat.h"
48#include "actables.h"
48 49
49#define _COMPONENT ACPI_NAMESPACE 50#define _COMPONENT ACPI_NAMESPACE
50ACPI_MODULE_NAME("nsload") 51ACPI_MODULE_NAME("nsload")
diff --git a/drivers/acpi/namespace/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 42a39a7c96e9..ae3dc10a7e81 100644
--- a/drivers/acpi/namespace/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -42,8 +42,9 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/amlcode.h> 45#include "accommon.h"
46#include <acpi/acnamesp.h> 46#include "amlcode.h"
47#include "acnamesp.h"
47 48
48#define _COMPONENT ACPI_NAMESPACE 49#define _COMPONENT ACPI_NAMESPACE
49ACPI_MODULE_NAME("nsnames") 50ACPI_MODULE_NAME("nsnames")
diff --git a/drivers/acpi/namespace/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 15fe09e24f71..08a97a57f8f9 100644
--- a/drivers/acpi/namespace/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -43,7 +43,8 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acnamesp.h> 46#include "accommon.h"
47#include "acnamesp.h"
47 48
48#define _COMPONENT ACPI_NAMESPACE 49#define _COMPONENT ACPI_NAMESPACE
49ACPI_MODULE_NAME("nsobject") 50ACPI_MODULE_NAME("nsobject")
diff --git a/drivers/acpi/namespace/nsparse.c b/drivers/acpi/acpica/nsparse.c
index a82271a9dbb3..b9e8d0070b6f 100644
--- a/drivers/acpi/namespace/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -42,10 +42,11 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include "accommon.h"
46#include <acpi/acparser.h> 46#include "acnamesp.h"
47#include <acpi/acdispat.h> 47#include "acparser.h"
48#include <acpi/actables.h> 48#include "acdispat.h"
49#include "actables.h"
49 50
50#define _COMPONENT ACPI_NAMESPACE 51#define _COMPONENT ACPI_NAMESPACE
51ACPI_MODULE_NAME("nsparse") 52ACPI_MODULE_NAME("nsparse")
diff --git a/drivers/acpi/namespace/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 0f17cf0898c9..452703290d35 100644
--- a/drivers/acpi/namespace/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -43,8 +43,9 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acnamesp.h> 46#include "accommon.h"
47#include <acpi/acpredef.h> 47#include "acnamesp.h"
48#include "acpredef.h"
48 49
49#define _COMPONENT ACPI_NAMESPACE 50#define _COMPONENT ACPI_NAMESPACE
50ACPI_MODULE_NAME("nspredef") 51ACPI_MODULE_NAME("nspredef")
@@ -72,7 +73,7 @@ ACPI_MODULE_NAME("nspredef")
72/* Local prototypes */ 73/* Local prototypes */
73static acpi_status 74static acpi_status
74acpi_ns_check_package(char *pathname, 75acpi_ns_check_package(char *pathname,
75 union acpi_operand_object *return_object, 76 union acpi_operand_object **return_object_ptr,
76 const union acpi_predefined_info *predefined); 77 const union acpi_predefined_info *predefined);
77 78
78static acpi_status 79static acpi_status
@@ -82,13 +83,18 @@ acpi_ns_check_package_elements(char *pathname,
82 83
83static acpi_status 84static acpi_status
84acpi_ns_check_object_type(char *pathname, 85acpi_ns_check_object_type(char *pathname,
85 union acpi_operand_object *return_object, 86 union acpi_operand_object **return_object_ptr,
86 u32 expected_btypes, u32 package_index); 87 u32 expected_btypes, u32 package_index);
87 88
88static acpi_status 89static acpi_status
89acpi_ns_check_reference(char *pathname, 90acpi_ns_check_reference(char *pathname,
90 union acpi_operand_object *return_object); 91 union acpi_operand_object *return_object);
91 92
93static acpi_status
94acpi_ns_repair_object(u32 expected_btypes,
95 u32 package_index,
96 union acpi_operand_object **return_object_ptr);
97
92/* 98/*
93 * Names for the types that can be returned by the predefined objects. 99 * Names for the types that can be returned by the predefined objects.
94 * Used for warning messages. Must be in the same order as the ACPI_RTYPEs 100 * Used for warning messages. Must be in the same order as the ACPI_RTYPEs
@@ -108,8 +114,8 @@ static const char *acpi_rtype_names[] = {
108 * FUNCTION: acpi_ns_check_predefined_names 114 * FUNCTION: acpi_ns_check_predefined_names
109 * 115 *
110 * PARAMETERS: Node - Namespace node for the method/object 116 * PARAMETERS: Node - Namespace node for the method/object
111 * return_object - Object returned from the evaluation of this 117 * return_object_ptr - Pointer to the object returned from the
112 * method/object 118 * evaluation of a method or object
113 * 119 *
114 * RETURN: Status 120 * RETURN: Status
115 * 121 *
@@ -119,8 +125,11 @@ static const char *acpi_rtype_names[] = {
119 125
120acpi_status 126acpi_status
121acpi_ns_check_predefined_names(struct acpi_namespace_node *node, 127acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
122 union acpi_operand_object *return_object) 128 u32 user_param_count,
129 acpi_status return_status,
130 union acpi_operand_object **return_object_ptr)
123{ 131{
132 union acpi_operand_object *return_object = *return_object_ptr;
124 acpi_status status = AE_OK; 133 acpi_status status = AE_OK;
125 const union acpi_predefined_info *predefined; 134 const union acpi_predefined_info *predefined;
126 char *pathname; 135 char *pathname;
@@ -128,12 +137,6 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
128 /* Match the name for this method/object against the predefined list */ 137 /* Match the name for this method/object against the predefined list */
129 138
130 predefined = acpi_ns_check_for_predefined_name(node); 139 predefined = acpi_ns_check_for_predefined_name(node);
131 if (!predefined) {
132
133 /* Name was not one of the predefined names */
134
135 return (AE_OK);
136 }
137 140
138 /* Get the full pathname to the object, for use in error messages */ 141 /* Get the full pathname to the object, for use in error messages */
139 142
@@ -143,10 +146,37 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
143 } 146 }
144 147
145 /* 148 /*
146 * Check that the parameter count for this method is in accordance 149 * Check that the parameter count for this method matches the ASL
147 * with the ACPI specification. 150 * definition. For predefined names, ensure that both the caller and
151 * the method itself are in accordance with the ACPI specification.
148 */ 152 */
149 acpi_ns_check_parameter_count(pathname, node, predefined); 153 acpi_ns_check_parameter_count(pathname, node, user_param_count,
154 predefined);
155
156 /* If not a predefined name, we cannot validate the return object */
157
158 if (!predefined) {
159 goto exit;
160 }
161
162 /* If the method failed, we cannot validate the return object */
163
164 if ((return_status != AE_OK) && (return_status != AE_CTRL_RETURN_VALUE)) {
165 goto exit;
166 }
167
168 /*
169 * Only validate the return value on the first successful evaluation of
170 * the method. This ensures that any warnings will only be emitted during
171 * the very first evaluation of the method/object.
172 */
173 if (node->flags & ANOBJ_EVALUATED) {
174 goto exit;
175 }
176
177 /* Mark the node as having been successfully evaluated */
178
179 node->flags |= ANOBJ_EVALUATED;
150 180
151 /* 181 /*
152 * If there is no return value, check if we require a return value for 182 * If there is no return value, check if we require a return value for
@@ -171,7 +201,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
171 * We have a return value, but if one wasn't expected, just exit, this is 201 * We have a return value, but if one wasn't expected, just exit, this is
172 * not a problem 202 * not a problem
173 * 203 *
174 * For example, if "Implicit return value" is enabled, methods will 204 * For example, if the "Implicit Return" feature is enabled, methods will
175 * always return a value 205 * always return a value
176 */ 206 */
177 if (!predefined->info.expected_btypes) { 207 if (!predefined->info.expected_btypes) {
@@ -182,7 +212,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
182 * Check that the type of the return object is what is expected for 212 * Check that the type of the return object is what is expected for
183 * this predefined name 213 * this predefined name
184 */ 214 */
185 status = acpi_ns_check_object_type(pathname, return_object, 215 status = acpi_ns_check_object_type(pathname, return_object_ptr,
186 predefined->info.expected_btypes, 216 predefined->info.expected_btypes,
187 ACPI_NOT_PACKAGE); 217 ACPI_NOT_PACKAGE);
188 if (ACPI_FAILURE(status)) { 218 if (ACPI_FAILURE(status)) {
@@ -193,11 +223,12 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
193 223
194 if (ACPI_GET_OBJECT_TYPE(return_object) == ACPI_TYPE_PACKAGE) { 224 if (ACPI_GET_OBJECT_TYPE(return_object) == ACPI_TYPE_PACKAGE) {
195 status = 225 status =
196 acpi_ns_check_package(pathname, return_object, predefined); 226 acpi_ns_check_package(pathname, return_object_ptr,
227 predefined);
197 } 228 }
198 229
199 exit: 230 exit:
200 if (pathname) { 231 if (pathname != predefined->info.name) {
201 ACPI_FREE(pathname); 232 ACPI_FREE(pathname);
202 } 233 }
203 234
@@ -210,6 +241,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
210 * 241 *
211 * PARAMETERS: Pathname - Full pathname to the node (for error msgs) 242 * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
212 * Node - Namespace node for the method/object 243 * Node - Namespace node for the method/object
244 * user_param_count - Number of args passed in by the caller
213 * Predefined - Pointer to entry in predefined name table 245 * Predefined - Pointer to entry in predefined name table
214 * 246 *
215 * RETURN: None 247 * RETURN: None
@@ -223,32 +255,76 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
223void 255void
224acpi_ns_check_parameter_count(char *pathname, 256acpi_ns_check_parameter_count(char *pathname,
225 struct acpi_namespace_node *node, 257 struct acpi_namespace_node *node,
258 u32 user_param_count,
226 const union acpi_predefined_info *predefined) 259 const union acpi_predefined_info *predefined)
227{ 260{
228 u32 param_count; 261 u32 param_count;
229 u32 required_params_current; 262 u32 required_params_current;
230 u32 required_params_old; 263 u32 required_params_old;
231 264
232 /* 265 /* Methods have 0-7 parameters. All other types have zero. */
233 * Check that the ASL-defined parameter count is what is expected for 266
234 * this predefined name.
235 *
236 * Methods have 0-7 parameters. All other types have zero.
237 */
238 param_count = 0; 267 param_count = 0;
239 if (node->type == ACPI_TYPE_METHOD) { 268 if (node->type == ACPI_TYPE_METHOD) {
240 param_count = node->object->method.param_count; 269 param_count = node->object->method.param_count;
241 } 270 }
242 271
243 /* Validate parameter count - allow two different legal counts (_SCP) */ 272 /* Argument count check for non-predefined methods/objects */
273
274 if (!predefined) {
275 /*
276 * Warning if too few or too many arguments have been passed by the
277 * caller. An incorrect number of arguments may not cause the method
278 * to fail. However, the method will fail if there are too few
279 * arguments and the method attempts to use one of the missing ones.
280 */
281 if (user_param_count < param_count) {
282 ACPI_WARNING((AE_INFO,
283 "%s: Insufficient arguments - needs %d, found %d",
284 pathname, param_count, user_param_count));
285 } else if (user_param_count > param_count) {
286 ACPI_WARNING((AE_INFO,
287 "%s: Excess arguments - needs %d, found %d",
288 pathname, param_count, user_param_count));
289 }
290 return;
291 }
292
293 /* Allow two different legal argument counts (_SCP, etc.) */
244 294
245 required_params_current = predefined->info.param_count & 0x0F; 295 required_params_current = predefined->info.param_count & 0x0F;
246 required_params_old = predefined->info.param_count >> 4; 296 required_params_old = predefined->info.param_count >> 4;
247 297
298 if (user_param_count != ACPI_UINT32_MAX) {
299
300 /* Validate the user-supplied parameter count */
301
302 if ((user_param_count != required_params_current) &&
303 (user_param_count != required_params_old)) {
304 ACPI_WARNING((AE_INFO,
305 "%s: Parameter count mismatch - caller passed %d, ACPI requires %d",
306 pathname, user_param_count,
307 required_params_current));
308 }
309 }
310
311 /*
312 * Only validate the argument count on the first successful evaluation of
313 * the method. This ensures that any warnings will only be emitted during
314 * the very first evaluation of the method/object.
315 */
316 if (node->flags & ANOBJ_EVALUATED) {
317 return;
318 }
319
320 /*
321 * Check that the ASL-defined parameter count is what is expected for
322 * this predefined name.
323 */
248 if ((param_count != required_params_current) && 324 if ((param_count != required_params_current) &&
249 (param_count != required_params_old)) { 325 (param_count != required_params_old)) {
250 ACPI_WARNING((AE_INFO, 326 ACPI_WARNING((AE_INFO,
251 "%s: Parameter count mismatch - ASL declared %d, expected %d", 327 "%s: Parameter count mismatch - ASL declared %d, ACPI requires %d",
252 pathname, param_count, required_params_current)); 328 pathname, param_count, required_params_current));
253 } 329 }
254} 330}
@@ -307,8 +383,8 @@ const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
307 * FUNCTION: acpi_ns_check_package 383 * FUNCTION: acpi_ns_check_package
308 * 384 *
309 * PARAMETERS: Pathname - Full pathname to the node (for error msgs) 385 * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
310 * return_object - Object returned from the evaluation of a 386 * return_object_ptr - Pointer to the object returned from the
311 * method or object 387 * evaluation of a method or object
312 * Predefined - Pointer to entry in predefined name table 388 * Predefined - Pointer to entry in predefined name table
313 * 389 *
314 * RETURN: Status 390 * RETURN: Status
@@ -320,9 +396,10 @@ const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
320 396
321static acpi_status 397static acpi_status
322acpi_ns_check_package(char *pathname, 398acpi_ns_check_package(char *pathname,
323 union acpi_operand_object *return_object, 399 union acpi_operand_object **return_object_ptr,
324 const union acpi_predefined_info *predefined) 400 const union acpi_predefined_info *predefined)
325{ 401{
402 union acpi_operand_object *return_object = *return_object_ptr;
326 const union acpi_predefined_info *package; 403 const union acpi_predefined_info *package;
327 union acpi_operand_object *sub_package; 404 union acpi_operand_object *sub_package;
328 union acpi_operand_object **elements; 405 union acpi_operand_object **elements;
@@ -408,7 +485,7 @@ acpi_ns_check_package(char *pathname,
408 * elements must be of the same type 485 * elements must be of the same type
409 */ 486 */
410 for (i = 0; i < count; i++) { 487 for (i = 0; i < count; i++) {
411 status = acpi_ns_check_object_type(pathname, *elements, 488 status = acpi_ns_check_object_type(pathname, elements,
412 package->ret_info. 489 package->ret_info.
413 object_type1, i); 490 object_type1, i);
414 if (ACPI_FAILURE(status)) { 491 if (ACPI_FAILURE(status)) {
@@ -441,7 +518,7 @@ acpi_ns_check_package(char *pathname,
441 518
442 status = 519 status =
443 acpi_ns_check_object_type(pathname, 520 acpi_ns_check_object_type(pathname,
444 *elements, 521 elements,
445 package-> 522 package->
446 ret_info3. 523 ret_info3.
447 object_type[i], 524 object_type[i],
@@ -454,7 +531,7 @@ acpi_ns_check_package(char *pathname,
454 531
455 status = 532 status =
456 acpi_ns_check_object_type(pathname, 533 acpi_ns_check_object_type(pathname,
457 *elements, 534 elements,
458 package-> 535 package->
459 ret_info3. 536 ret_info3.
460 tail_object_type, 537 tail_object_type,
@@ -471,7 +548,7 @@ acpi_ns_check_package(char *pathname,
471 548
472 /* First element is the (Integer) count of sub-packages to follow */ 549 /* First element is the (Integer) count of sub-packages to follow */
473 550
474 status = acpi_ns_check_object_type(pathname, *elements, 551 status = acpi_ns_check_object_type(pathname, elements,
475 ACPI_RTYPE_INTEGER, 0); 552 ACPI_RTYPE_INTEGER, 0);
476 if (ACPI_FAILURE(status)) { 553 if (ACPI_FAILURE(status)) {
477 return (status); 554 return (status);
@@ -509,7 +586,7 @@ acpi_ns_check_package(char *pathname,
509 /* Each sub-object must be of type Package */ 586 /* Each sub-object must be of type Package */
510 587
511 status = 588 status =
512 acpi_ns_check_object_type(pathname, sub_package, 589 acpi_ns_check_object_type(pathname, &sub_package,
513 ACPI_RTYPE_PACKAGE, i); 590 ACPI_RTYPE_PACKAGE, i);
514 if (ACPI_FAILURE(status)) { 591 if (ACPI_FAILURE(status)) {
515 return (status); 592 return (status);
@@ -567,12 +644,8 @@ acpi_ns_check_package(char *pathname,
567 for (j = 0; j < expected_count; j++) { 644 for (j = 0; j < expected_count; j++) {
568 status = 645 status =
569 acpi_ns_check_object_type(pathname, 646 acpi_ns_check_object_type(pathname,
570 sub_elements 647 &sub_elements[j],
571 [j], 648 package->ret_info2.object_type[j], j);
572 package->
573 ret_info2.
574 object_type
575 [j], j);
576 if (ACPI_FAILURE(status)) { 649 if (ACPI_FAILURE(status)) {
577 return (status); 650 return (status);
578 } 651 }
@@ -611,7 +684,7 @@ acpi_ns_check_package(char *pathname,
611 684
612 status = 685 status =
613 acpi_ns_check_object_type(pathname, 686 acpi_ns_check_object_type(pathname,
614 *sub_elements, 687 sub_elements,
615 ACPI_RTYPE_INTEGER, 688 ACPI_RTYPE_INTEGER,
616 0); 689 0);
617 if (ACPI_FAILURE(status)) { 690 if (ACPI_FAILURE(status)) {
@@ -708,7 +781,7 @@ acpi_ns_check_package_elements(char *pathname,
708 * The second group can have a count of zero. 781 * The second group can have a count of zero.
709 */ 782 */
710 for (i = 0; i < count1; i++) { 783 for (i = 0; i < count1; i++) {
711 status = acpi_ns_check_object_type(pathname, *this_element, 784 status = acpi_ns_check_object_type(pathname, this_element,
712 type1, i); 785 type1, i);
713 if (ACPI_FAILURE(status)) { 786 if (ACPI_FAILURE(status)) {
714 return (status); 787 return (status);
@@ -717,7 +790,7 @@ acpi_ns_check_package_elements(char *pathname,
717 } 790 }
718 791
719 for (i = 0; i < count2; i++) { 792 for (i = 0; i < count2; i++) {
720 status = acpi_ns_check_object_type(pathname, *this_element, 793 status = acpi_ns_check_object_type(pathname, this_element,
721 type2, (i + count1)); 794 type2, (i + count1));
722 if (ACPI_FAILURE(status)) { 795 if (ACPI_FAILURE(status)) {
723 return (status); 796 return (status);
@@ -733,8 +806,8 @@ acpi_ns_check_package_elements(char *pathname,
733 * FUNCTION: acpi_ns_check_object_type 806 * FUNCTION: acpi_ns_check_object_type
734 * 807 *
735 * PARAMETERS: Pathname - Full pathname to the node (for error msgs) 808 * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
736 * return_object - Object return from the execution of this 809 * return_object_ptr - Pointer to the object returned from the
737 * method/object 810 * evaluation of a method or object
738 * expected_btypes - Bitmap of expected return type(s) 811 * expected_btypes - Bitmap of expected return type(s)
739 * package_index - Index of object within parent package (if 812 * package_index - Index of object within parent package (if
740 * applicable - ACPI_NOT_PACKAGE otherwise) 813 * applicable - ACPI_NOT_PACKAGE otherwise)
@@ -748,9 +821,10 @@ acpi_ns_check_package_elements(char *pathname,
748 821
749static acpi_status 822static acpi_status
750acpi_ns_check_object_type(char *pathname, 823acpi_ns_check_object_type(char *pathname,
751 union acpi_operand_object *return_object, 824 union acpi_operand_object **return_object_ptr,
752 u32 expected_btypes, u32 package_index) 825 u32 expected_btypes, u32 package_index)
753{ 826{
827 union acpi_operand_object *return_object = *return_object_ptr;
754 acpi_status status = AE_OK; 828 acpi_status status = AE_OK;
755 u32 return_btype; 829 u32 return_btype;
756 char type_buffer[48]; /* Room for 5 types */ 830 char type_buffer[48]; /* Room for 5 types */
@@ -814,6 +888,14 @@ acpi_ns_check_object_type(char *pathname,
814 /* Is the object one of the expected types? */ 888 /* Is the object one of the expected types? */
815 889
816 if (!(return_btype & expected_btypes)) { 890 if (!(return_btype & expected_btypes)) {
891
892 /* Type mismatch -- attempt repair of the returned object */
893
894 status = acpi_ns_repair_object(expected_btypes, package_index,
895 return_object_ptr);
896 if (ACPI_SUCCESS(status)) {
897 return (status);
898 }
817 goto type_error_exit; 899 goto type_error_exit;
818 } 900 }
819 901
@@ -898,3 +980,86 @@ acpi_ns_check_reference(char *pathname,
898 980
899 return (AE_AML_OPERAND_TYPE); 981 return (AE_AML_OPERAND_TYPE);
900} 982}
983
984/*******************************************************************************
985 *
986 * FUNCTION: acpi_ns_repair_object
987 *
988 * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
989 * package_index - Used to determine if target is in a package
990 * return_object_ptr - Pointer to the object returned from the
991 * evaluation of a method or object
992 *
993 * RETURN: Status. AE_OK if repair was successful.
994 *
995 * DESCRIPTION: Attempt to repair/convert a return object of a type that was
996 * not expected.
997 *
998 ******************************************************************************/
999
1000static acpi_status
1001acpi_ns_repair_object(u32 expected_btypes,
1002 u32 package_index,
1003 union acpi_operand_object **return_object_ptr)
1004{
1005 union acpi_operand_object *return_object = *return_object_ptr;
1006 union acpi_operand_object *new_object;
1007 acpi_size length;
1008
1009 switch (ACPI_GET_OBJECT_TYPE(return_object)) {
1010 case ACPI_TYPE_BUFFER:
1011
1012 if (!(expected_btypes & ACPI_RTYPE_STRING)) {
1013 return (AE_AML_OPERAND_TYPE);
1014 }
1015
1016 /*
1017 * Have a Buffer, expected a String, convert. Use a to_string
1018 * conversion, no transform performed on the buffer data. The best
1019 * example of this is the _BIF method, where the string data from
1020 * the battery is often (incorrectly) returned as buffer object(s).
1021 */
1022 length = 0;
1023 while ((length < return_object->buffer.length) &&
1024 (return_object->buffer.pointer[length])) {
1025 length++;
1026 }
1027
1028 /* Allocate a new string object */
1029
1030 new_object = acpi_ut_create_string_object(length);
1031 if (!new_object) {
1032 return (AE_NO_MEMORY);
1033 }
1034
1035 /*
1036 * Copy the raw buffer data with no transform. String is already NULL
1037 * terminated at Length+1.
1038 */
1039 ACPI_MEMCPY(new_object->string.pointer,
1040 return_object->buffer.pointer, length);
1041
1042 /* Install the new return object */
1043
1044 acpi_ut_remove_reference(return_object);
1045 *return_object_ptr = new_object;
1046
1047 /*
1048 * If the object is a package element, we need to:
1049 * 1. Decrement the reference count of the orignal object, it was
1050 * incremented when building the package
1051 * 2. Increment the reference count of the new object, it will be
1052 * decremented when releasing the package
1053 */
1054 if (package_index != ACPI_NOT_PACKAGE) {
1055 acpi_ut_remove_reference(return_object);
1056 acpi_ut_add_reference(new_object);
1057 }
1058 return (AE_OK);
1059
1060 default:
1061 break;
1062 }
1063
1064 return (AE_AML_OPERAND_TYPE);
1065}
diff --git a/drivers/acpi/namespace/nssearch.c b/drivers/acpi/acpica/nssearch.c
index a9a80bf811b3..6fea13f3f52d 100644
--- a/drivers/acpi/namespace/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include "accommon.h"
46#include "acnamesp.h"
46 47
47#define _COMPONENT ACPI_NAMESPACE 48#define _COMPONENT ACPI_NAMESPACE
48ACPI_MODULE_NAME("nssearch") 49ACPI_MODULE_NAME("nssearch")
diff --git a/drivers/acpi/namespace/nsutils.c b/drivers/acpi/acpica/nsutils.c
index b0817e1127b1..3e1149bf4aa5 100644
--- a/drivers/acpi/namespace/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -43,9 +43,10 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acnamesp.h> 46#include "accommon.h"
47#include <acpi/amlcode.h> 47#include "acnamesp.h"
48#include <acpi/actables.h> 48#include "amlcode.h"
49#include "actables.h"
49 50
50#define _COMPONENT ACPI_NAMESPACE 51#define _COMPONENT ACPI_NAMESPACE
51ACPI_MODULE_NAME("nsutils") 52ACPI_MODULE_NAME("nsutils")
@@ -314,9 +315,15 @@ void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info)
314 * 315 *
315 * strlen() + 1 covers the first name_seg, which has no path separator 316 * strlen() + 1 covers the first name_seg, which has no path separator
316 */ 317 */
317 if (acpi_ns_valid_root_prefix(next_external_char[0])) { 318 if (acpi_ns_valid_root_prefix(*next_external_char)) {
318 info->fully_qualified = TRUE; 319 info->fully_qualified = TRUE;
319 next_external_char++; 320 next_external_char++;
321
322 /* Skip redundant root_prefix, like \\_SB.PCI0.SBRG.EC0 */
323
324 while (acpi_ns_valid_root_prefix(*next_external_char)) {
325 next_external_char++;
326 }
320 } else { 327 } else {
321 /* 328 /*
322 * Handle Carat prefixes 329 * Handle Carat prefixes
diff --git a/drivers/acpi/namespace/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 3c905ce26d7d..200895fa2728 100644
--- a/drivers/acpi/namespace/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include "accommon.h"
46#include "acnamesp.h"
46 47
47#define _COMPONENT ACPI_NAMESPACE 48#define _COMPONENT ACPI_NAMESPACE
48ACPI_MODULE_NAME("nswalk") 49ACPI_MODULE_NAME("nswalk")
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index a085cc39c055..22a7171ac1ed 100644
--- a/drivers/acpi/namespace/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -43,8 +43,9 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acnamesp.h> 46#include "accommon.h"
47#include <acpi/acinterp.h> 47#include "acnamesp.h"
48#include "acinterp.h"
48 49
49#define _COMPONENT ACPI_NAMESPACE 50#define _COMPONENT ACPI_NAMESPACE
50ACPI_MODULE_NAME("nsxfeval") 51ACPI_MODULE_NAME("nsxfeval")
diff --git a/drivers/acpi/namespace/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 5efa4e7ddb0b..9589fea24997 100644
--- a/drivers/acpi/namespace/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -43,7 +43,8 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acnamesp.h> 46#include "accommon.h"
47#include "acnamesp.h"
47 48
48#define _COMPONENT ACPI_NAMESPACE 49#define _COMPONENT ACPI_NAMESPACE
49ACPI_MODULE_NAME("nsxfname") 50ACPI_MODULE_NAME("nsxfname")
diff --git a/drivers/acpi/namespace/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 2b375ee80cef..1c7efc15225f 100644
--- a/drivers/acpi/namespace/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -43,7 +43,8 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acnamesp.h> 46#include "accommon.h"
47#include "acnamesp.h"
47 48
48#define _COMPONENT ACPI_NAMESPACE 49#define _COMPONENT ACPI_NAMESPACE
49ACPI_MODULE_NAME("nsxfobj") 50ACPI_MODULE_NAME("nsxfobj")
diff --git a/drivers/acpi/parser/psargs.c b/drivers/acpi/acpica/psargs.c
index d830b29b85b1..b161f3544b51 100644
--- a/drivers/acpi/parser/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -42,10 +42,11 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acparser.h> 45#include "accommon.h"
46#include <acpi/amlcode.h> 46#include "acparser.h"
47#include <acpi/acnamesp.h> 47#include "amlcode.h"
48#include <acpi/acdispat.h> 48#include "acnamesp.h"
49#include "acdispat.h"
49 50
50#define _COMPONENT ACPI_PARSER 51#define _COMPONENT ACPI_PARSER
51ACPI_MODULE_NAME("psargs") 52ACPI_MODULE_NAME("psargs")
diff --git a/drivers/acpi/parser/psloop.c b/drivers/acpi/acpica/psloop.c
index 4647039a0d8a..c5f6ce19a401 100644
--- a/drivers/acpi/parser/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -50,9 +50,10 @@
50 */ 50 */
51 51
52#include <acpi/acpi.h> 52#include <acpi/acpi.h>
53#include <acpi/acparser.h> 53#include "accommon.h"
54#include <acpi/acdispat.h> 54#include "acparser.h"
55#include <acpi/amlcode.h> 55#include "acdispat.h"
56#include "amlcode.h"
56 57
57#define _COMPONENT ACPI_PARSER 58#define _COMPONENT ACPI_PARSER
58ACPI_MODULE_NAME("psloop") 59ACPI_MODULE_NAME("psloop")
diff --git a/drivers/acpi/parser/psopcode.c b/drivers/acpi/acpica/psopcode.c
index f425ab30eae8..3bc3a60194d6 100644
--- a/drivers/acpi/parser/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -42,9 +42,10 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acparser.h> 45#include "accommon.h"
46#include <acpi/acopcode.h> 46#include "acparser.h"
47#include <acpi/amlcode.h> 47#include "acopcode.h"
48#include "amlcode.h"
48 49
49#define _COMPONENT ACPI_PARSER 50#define _COMPONENT ACPI_PARSER
50ACPI_MODULE_NAME("psopcode") 51ACPI_MODULE_NAME("psopcode")
diff --git a/drivers/acpi/parser/psparse.c b/drivers/acpi/acpica/psparse.c
index 68e932f215ea..70838e9b608c 100644
--- a/drivers/acpi/parser/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -51,11 +51,12 @@
51 */ 51 */
52 52
53#include <acpi/acpi.h> 53#include <acpi/acpi.h>
54#include <acpi/acparser.h> 54#include "accommon.h"
55#include <acpi/acdispat.h> 55#include "acparser.h"
56#include <acpi/amlcode.h> 56#include "acdispat.h"
57#include <acpi/acnamesp.h> 57#include "amlcode.h"
58#include <acpi/acinterp.h> 58#include "acnamesp.h"
59#include "acinterp.h"
59 60
60#define _COMPONENT ACPI_PARSER 61#define _COMPONENT ACPI_PARSER
61ACPI_MODULE_NAME("psparse") 62ACPI_MODULE_NAME("psparse")
@@ -447,10 +448,22 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
447 walk_state, walk_state->parser_state.aml, 448 walk_state, walk_state->parser_state.aml,
448 walk_state->parser_state.aml_size)); 449 walk_state->parser_state.aml_size));
449 450
451 if (!walk_state->parser_state.aml) {
452 return_ACPI_STATUS(AE_NULL_OBJECT);
453 }
454
450 /* Create and initialize a new thread state */ 455 /* Create and initialize a new thread state */
451 456
452 thread = acpi_ut_create_thread_state(); 457 thread = acpi_ut_create_thread_state();
453 if (!thread) { 458 if (!thread) {
459 if (walk_state->method_desc) {
460
461 /* Executing a control method - additional cleanup */
462
463 acpi_ds_terminate_control_method(
464 walk_state->method_desc, walk_state);
465 }
466
454 acpi_ds_delete_walk_state(walk_state); 467 acpi_ds_delete_walk_state(walk_state);
455 return_ACPI_STATUS(AE_NO_MEMORY); 468 return_ACPI_STATUS(AE_NO_MEMORY);
456 } 469 }
diff --git a/drivers/acpi/parser/psscope.c b/drivers/acpi/acpica/psscope.c
index ee50e67c9443..2feca5ca9581 100644
--- a/drivers/acpi/parser/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acparser.h> 45#include "accommon.h"
46#include "acparser.h"
46 47
47#define _COMPONENT ACPI_PARSER 48#define _COMPONENT ACPI_PARSER
48ACPI_MODULE_NAME("psscope") 49ACPI_MODULE_NAME("psscope")
diff --git a/drivers/acpi/parser/pstree.c b/drivers/acpi/acpica/pstree.c
index 1dd355ddd182..4d3389118ec3 100644
--- a/drivers/acpi/parser/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -42,8 +42,9 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acparser.h> 45#include "accommon.h"
46#include <acpi/amlcode.h> 46#include "acparser.h"
47#include "amlcode.h"
47 48
48#define _COMPONENT ACPI_PARSER 49#define _COMPONENT ACPI_PARSER
49ACPI_MODULE_NAME("pstree") 50ACPI_MODULE_NAME("pstree")
diff --git a/drivers/acpi/parser/psutils.c b/drivers/acpi/acpica/psutils.c
index 7cf1f65cd5bb..e636e078ad3d 100644
--- a/drivers/acpi/parser/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -42,8 +42,9 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acparser.h> 45#include "accommon.h"
46#include <acpi/amlcode.h> 46#include "acparser.h"
47#include "amlcode.h"
47 48
48#define _COMPONENT ACPI_PARSER 49#define _COMPONENT ACPI_PARSER
49ACPI_MODULE_NAME("psutils") 50ACPI_MODULE_NAME("psutils")
diff --git a/drivers/acpi/parser/pswalk.c b/drivers/acpi/acpica/pswalk.c
index 8b86ad5a3201..78b8b791f2ae 100644
--- a/drivers/acpi/parser/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acparser.h> 45#include "accommon.h"
46#include "acparser.h"
46 47
47#define _COMPONENT ACPI_PARSER 48#define _COMPONENT ACPI_PARSER
48ACPI_MODULE_NAME("pswalk") 49ACPI_MODULE_NAME("pswalk")
diff --git a/drivers/acpi/parser/psxface.c b/drivers/acpi/acpica/psxface.c
index 270469aae842..ff06032c0f06 100644
--- a/drivers/acpi/parser/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -42,9 +42,11 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acparser.h> 45#include "accommon.h"
46#include <acpi/acdispat.h> 46#include "acparser.h"
47#include <acpi/acinterp.h> 47#include "acdispat.h"
48#include "acinterp.h"
49#include "amlcode.h"
48 50
49#define _COMPONENT ACPI_PARSER 51#define _COMPONENT ACPI_PARSER
50ACPI_MODULE_NAME("psxface") 52ACPI_MODULE_NAME("psxface")
@@ -278,6 +280,38 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
278 goto cleanup; 280 goto cleanup;
279 } 281 }
280 282
283 /* Invoke an internal method if necessary */
284
285 if (info->obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
286 status = info->obj_desc->method.implementation(walk_state);
287 info->return_object = walk_state->return_desc;
288
289 /* Cleanup states */
290
291 acpi_ds_scope_stack_clear(walk_state);
292 acpi_ps_cleanup_scope(&walk_state->parser_state);
293 acpi_ds_terminate_control_method(walk_state->method_desc,
294 walk_state);
295 acpi_ds_delete_walk_state(walk_state);
296 goto cleanup;
297 }
298
299 /*
300 * Start method evaluation with an implicit return of zero.
301 * This is done for Windows compatibility.
302 */
303 if (acpi_gbl_enable_interpreter_slack) {
304 walk_state->implicit_return_obj =
305 acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
306 if (!walk_state->implicit_return_obj) {
307 status = AE_NO_MEMORY;
308 acpi_ds_delete_walk_state(walk_state);
309 goto cleanup;
310 }
311
312 walk_state->implicit_return_obj->integer.value = 0;
313 }
314
281 /* Parse the AML */ 315 /* Parse the AML */
282 316
283 status = acpi_ps_parse_aml(walk_state); 317 status = acpi_ps_parse_aml(walk_state);
diff --git a/drivers/acpi/resources/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 7f96332822bf..1e437bfd8db5 100644
--- a/drivers/acpi/resources/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acresrc.h> 45#include "accommon.h"
46#include "acresrc.h"
46 47
47#define _COMPONENT ACPI_RESOURCES 48#define _COMPONENT ACPI_RESOURCES
48ACPI_MODULE_NAME("rsaddr") 49ACPI_MODULE_NAME("rsaddr")
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 8eaaecf92009..52865ee6bc77 100644
--- a/drivers/acpi/resources/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -42,8 +42,9 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acresrc.h> 45#include "accommon.h"
46#include <acpi/acnamesp.h> 46#include "acresrc.h"
47#include "acnamesp.h"
47 48
48#define _COMPONENT ACPI_RESOURCES 49#define _COMPONENT ACPI_RESOURCES
49ACPI_MODULE_NAME("rscalc") 50ACPI_MODULE_NAME("rscalc")
diff --git a/drivers/acpi/resources/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 08b8d73e6ee5..61566b1a0616 100644
--- a/drivers/acpi/resources/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -42,8 +42,9 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acresrc.h> 45#include "accommon.h"
46#include <acpi/acnamesp.h> 46#include "acresrc.h"
47#include "acnamesp.h"
47 48
48#define _COMPONENT ACPI_RESOURCES 49#define _COMPONENT ACPI_RESOURCES
49ACPI_MODULE_NAME("rscreate") 50ACPI_MODULE_NAME("rscreate")
diff --git a/drivers/acpi/resources/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 6bbbb7b8941a..3f0ca5a12d34 100644
--- a/drivers/acpi/resources/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acresrc.h> 45#include "accommon.h"
46#include "acresrc.h"
46 47
47#define _COMPONENT ACPI_RESOURCES 48#define _COMPONENT ACPI_RESOURCES
48ACPI_MODULE_NAME("rsdump") 49ACPI_MODULE_NAME("rsdump")
diff --git a/drivers/acpi/resources/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index 3f0a1fedbe0e..77b25fdb459c 100644
--- a/drivers/acpi/resources/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acresrc.h> 45#include "accommon.h"
46#include "acresrc.h"
46 47
47#define _COMPONENT ACPI_RESOURCES 48#define _COMPONENT ACPI_RESOURCES
48ACPI_MODULE_NAME("rsinfo") 49ACPI_MODULE_NAME("rsinfo")
diff --git a/drivers/acpi/resources/rsio.c b/drivers/acpi/acpica/rsio.c
index b66d42e7402e..35a49aa95609 100644
--- a/drivers/acpi/resources/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acresrc.h> 45#include "accommon.h"
46#include "acresrc.h"
46 47
47#define _COMPONENT ACPI_RESOURCES 48#define _COMPONENT ACPI_RESOURCES
48ACPI_MODULE_NAME("rsio") 49ACPI_MODULE_NAME("rsio")
diff --git a/drivers/acpi/resources/rsirq.c b/drivers/acpi/acpica/rsirq.c
index a8805efc0366..2e0256983aa6 100644
--- a/drivers/acpi/resources/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acresrc.h> 45#include "accommon.h"
46#include "acresrc.h"
46 47
47#define _COMPONENT ACPI_RESOURCES 48#define _COMPONENT ACPI_RESOURCES
48ACPI_MODULE_NAME("rsirq") 49ACPI_MODULE_NAME("rsirq")
diff --git a/drivers/acpi/resources/rslist.c b/drivers/acpi/acpica/rslist.c
index b78c7e797a19..1b1dbc69f087 100644
--- a/drivers/acpi/resources/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acresrc.h> 45#include "accommon.h"
46#include "acresrc.h"
46 47
47#define _COMPONENT ACPI_RESOURCES 48#define _COMPONENT ACPI_RESOURCES
48ACPI_MODULE_NAME("rslist") 49ACPI_MODULE_NAME("rslist")
diff --git a/drivers/acpi/resources/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index 63b21abd90bb..ddc76cebdc92 100644
--- a/drivers/acpi/resources/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acresrc.h> 45#include "accommon.h"
46#include "acresrc.h"
46 47
47#define _COMPONENT ACPI_RESOURCES 48#define _COMPONENT ACPI_RESOURCES
48ACPI_MODULE_NAME("rsmemory") 49ACPI_MODULE_NAME("rsmemory")
diff --git a/drivers/acpi/resources/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 96a6c0353255..5bc49a553284 100644
--- a/drivers/acpi/resources/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acresrc.h> 45#include "accommon.h"
46#include "acresrc.h"
46 47
47#define _COMPONENT ACPI_RESOURCES 48#define _COMPONENT ACPI_RESOURCES
48ACPI_MODULE_NAME("rsmisc") 49ACPI_MODULE_NAME("rsmisc")
diff --git a/drivers/acpi/resources/rsutils.c b/drivers/acpi/acpica/rsutils.c
index f7b3bcd59ba7..bc03d5966829 100644
--- a/drivers/acpi/resources/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -42,8 +42,9 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include "accommon.h"
46#include <acpi/acresrc.h> 46#include "acnamesp.h"
47#include "acresrc.h"
47 48
48#define _COMPONENT ACPI_RESOURCES 49#define _COMPONENT ACPI_RESOURCES
49ACPI_MODULE_NAME("rsutils") 50ACPI_MODULE_NAME("rsutils")
diff --git a/drivers/acpi/resources/rsxface.c b/drivers/acpi/acpica/rsxface.c
index f59f4c4e034c..69a2aa5b5d83 100644
--- a/drivers/acpi/resources/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -42,8 +42,9 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acresrc.h> 45#include "accommon.h"
46#include <acpi/acnamesp.h> 46#include "acresrc.h"
47#include "acnamesp.h"
47 48
48#define _COMPONENT ACPI_RESOURCES 49#define _COMPONENT ACPI_RESOURCES
49ACPI_MODULE_NAME("rsxface") 50ACPI_MODULE_NAME("rsxface")
diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 2817158fb6a1..3636e4f8fb73 100644
--- a/drivers/acpi/tables/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -42,15 +42,16 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/actables.h> 45#include "accommon.h"
46#include "actables.h"
46 47
47#define _COMPONENT ACPI_TABLES 48#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbfadt") 49ACPI_MODULE_NAME("tbfadt")
49 50
50/* Local prototypes */ 51/* Local prototypes */
51static void inline 52static inline void
52acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, 53acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
53 u8 byte_width, u64 address); 54 u8 space_id, u8 byte_width, u64 address);
54 55
55static void acpi_tb_convert_fadt(void); 56static void acpi_tb_convert_fadt(void);
56 57
@@ -60,9 +61,10 @@ static void acpi_tb_validate_fadt(void);
60 61
61typedef struct acpi_fadt_info { 62typedef struct acpi_fadt_info {
62 char *name; 63 char *name;
63 u8 target; 64 u8 address64;
64 u8 source; 65 u8 address32;
65 u8 length; 66 u8 length;
67 u8 default_length;
66 u8 type; 68 u8 type;
67 69
68} acpi_fadt_info; 70} acpi_fadt_info;
@@ -71,37 +73,61 @@ typedef struct acpi_fadt_info {
71#define ACPI_FADT_SEPARATE_LENGTH 2 73#define ACPI_FADT_SEPARATE_LENGTH 2
72 74
73static struct acpi_fadt_info fadt_info_table[] = { 75static struct acpi_fadt_info fadt_info_table[] = {
74 {"Pm1aEventBlock", ACPI_FADT_OFFSET(xpm1a_event_block), 76 {"Pm1aEventBlock",
77 ACPI_FADT_OFFSET(xpm1a_event_block),
75 ACPI_FADT_OFFSET(pm1a_event_block), 78 ACPI_FADT_OFFSET(pm1a_event_block),
76 ACPI_FADT_OFFSET(pm1_event_length), ACPI_FADT_REQUIRED}, 79 ACPI_FADT_OFFSET(pm1_event_length),
80 ACPI_PM1_REGISTER_WIDTH * 2, /* Enable + Status register */
81 ACPI_FADT_REQUIRED},
77 82
78 {"Pm1bEventBlock", ACPI_FADT_OFFSET(xpm1b_event_block), 83 {"Pm1bEventBlock",
84 ACPI_FADT_OFFSET(xpm1b_event_block),
79 ACPI_FADT_OFFSET(pm1b_event_block), 85 ACPI_FADT_OFFSET(pm1b_event_block),
80 ACPI_FADT_OFFSET(pm1_event_length), 0}, 86 ACPI_FADT_OFFSET(pm1_event_length),
87 ACPI_PM1_REGISTER_WIDTH * 2, /* Enable + Status register */
88 0},
81 89
82 {"Pm1aControlBlock", ACPI_FADT_OFFSET(xpm1a_control_block), 90 {"Pm1aControlBlock",
91 ACPI_FADT_OFFSET(xpm1a_control_block),
83 ACPI_FADT_OFFSET(pm1a_control_block), 92 ACPI_FADT_OFFSET(pm1a_control_block),
84 ACPI_FADT_OFFSET(pm1_control_length), ACPI_FADT_REQUIRED}, 93 ACPI_FADT_OFFSET(pm1_control_length),
94 ACPI_PM1_REGISTER_WIDTH,
95 ACPI_FADT_REQUIRED},
85 96
86 {"Pm1bControlBlock", ACPI_FADT_OFFSET(xpm1b_control_block), 97 {"Pm1bControlBlock",
98 ACPI_FADT_OFFSET(xpm1b_control_block),
87 ACPI_FADT_OFFSET(pm1b_control_block), 99 ACPI_FADT_OFFSET(pm1b_control_block),
88 ACPI_FADT_OFFSET(pm1_control_length), 0}, 100 ACPI_FADT_OFFSET(pm1_control_length),
101 ACPI_PM1_REGISTER_WIDTH,
102 0},
89 103
90 {"Pm2ControlBlock", ACPI_FADT_OFFSET(xpm2_control_block), 104 {"Pm2ControlBlock",
105 ACPI_FADT_OFFSET(xpm2_control_block),
91 ACPI_FADT_OFFSET(pm2_control_block), 106 ACPI_FADT_OFFSET(pm2_control_block),
92 ACPI_FADT_OFFSET(pm2_control_length), ACPI_FADT_SEPARATE_LENGTH}, 107 ACPI_FADT_OFFSET(pm2_control_length),
108 ACPI_PM2_REGISTER_WIDTH,
109 ACPI_FADT_SEPARATE_LENGTH},
93 110
94 {"PmTimerBlock", ACPI_FADT_OFFSET(xpm_timer_block), 111 {"PmTimerBlock",
112 ACPI_FADT_OFFSET(xpm_timer_block),
95 ACPI_FADT_OFFSET(pm_timer_block), 113 ACPI_FADT_OFFSET(pm_timer_block),
96 ACPI_FADT_OFFSET(pm_timer_length), ACPI_FADT_REQUIRED}, 114 ACPI_FADT_OFFSET(pm_timer_length),
115 ACPI_PM_TIMER_WIDTH,
116 ACPI_FADT_REQUIRED},
97 117
98 {"Gpe0Block", ACPI_FADT_OFFSET(xgpe0_block), 118 {"Gpe0Block",
119 ACPI_FADT_OFFSET(xgpe0_block),
99 ACPI_FADT_OFFSET(gpe0_block), 120 ACPI_FADT_OFFSET(gpe0_block),
100 ACPI_FADT_OFFSET(gpe0_block_length), ACPI_FADT_SEPARATE_LENGTH}, 121 ACPI_FADT_OFFSET(gpe0_block_length),
122 0,
123 ACPI_FADT_SEPARATE_LENGTH},
101 124
102 {"Gpe1Block", ACPI_FADT_OFFSET(xgpe1_block), 125 {"Gpe1Block",
126 ACPI_FADT_OFFSET(xgpe1_block),
103 ACPI_FADT_OFFSET(gpe1_block), 127 ACPI_FADT_OFFSET(gpe1_block),
104 ACPI_FADT_OFFSET(gpe1_block_length), ACPI_FADT_SEPARATE_LENGTH} 128 ACPI_FADT_OFFSET(gpe1_block_length),
129 0,
130 ACPI_FADT_SEPARATE_LENGTH}
105}; 131};
106 132
107#define ACPI_FADT_INFO_ENTRIES (sizeof (fadt_info_table) / sizeof (struct acpi_fadt_info)) 133#define ACPI_FADT_INFO_ENTRIES (sizeof (fadt_info_table) / sizeof (struct acpi_fadt_info))
@@ -122,9 +148,9 @@ static struct acpi_fadt_info fadt_info_table[] = {
122 * 148 *
123 ******************************************************************************/ 149 ******************************************************************************/
124 150
125static void inline 151static inline void
126acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, 152acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
127 u8 byte_width, u64 address) 153 u8 space_id, u8 byte_width, u64 address)
128{ 154{
129 155
130 /* 156 /*
@@ -135,10 +161,10 @@ acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
135 161
136 /* All other fields are byte-wide */ 162 /* All other fields are byte-wide */
137 163
138 generic_address->space_id = ACPI_ADR_SPACE_SYSTEM_IO; 164 generic_address->space_id = space_id;
139 generic_address->bit_width = byte_width << 3; 165 generic_address->bit_width = (u8)ACPI_MUL_8(byte_width);
140 generic_address->bit_offset = 0; 166 generic_address->bit_offset = 0;
141 generic_address->access_width = 0; 167 generic_address->access_width = 0; /* Access width ANY */
142} 168}
143 169
144/******************************************************************************* 170/*******************************************************************************
@@ -225,7 +251,8 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
225 */ 251 */
226 if (length > sizeof(struct acpi_table_fadt)) { 252 if (length > sizeof(struct acpi_table_fadt)) {
227 ACPI_WARNING((AE_INFO, 253 ACPI_WARNING((AE_INFO,
228 "FADT (revision %u) is longer than ACPI 2.0 version, truncating length 0x%X to 0x%zX", 254 "FADT (revision %u) is longer than ACPI 2.0 version, "
255 "truncating length 0x%X to 0x%zX",
229 table->revision, (unsigned)length, 256 table->revision, (unsigned)length,
230 sizeof(struct acpi_table_fadt))); 257 sizeof(struct acpi_table_fadt)));
231 } 258 }
@@ -244,7 +271,6 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
244 * 2) Validate some of the important values within the FADT 271 * 2) Validate some of the important values within the FADT
245 */ 272 */
246 acpi_tb_convert_fadt(); 273 acpi_tb_convert_fadt();
247 acpi_tb_validate_fadt();
248} 274}
249 275
250/******************************************************************************* 276/*******************************************************************************
@@ -278,22 +304,36 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
278 304
279static void acpi_tb_convert_fadt(void) 305static void acpi_tb_convert_fadt(void)
280{ 306{
281 u8 pm1_register_length; 307 u8 pm1_register_bit_width;
282 struct acpi_generic_address *target; 308 u8 pm1_register_byte_width;
309 struct acpi_generic_address *target64;
283 u32 i; 310 u32 i;
284 311
285 /* Update the local FADT table header length */ 312 /* Update the local FADT table header length */
286 313
287 acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt); 314 acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
288 315
289 /* Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary */ 316 /*
290 317 * Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary.
318 * Later code will always use the X 64-bit field. Also, check for an
319 * address mismatch between the 32-bit and 64-bit address fields
320 * (FIRMWARE_CTRL/X_FIRMWARE_CTRL, DSDT/X_DSDT) which would indicate
321 * the presence of two FACS or two DSDT tables.
322 */
291 if (!acpi_gbl_FADT.Xfacs) { 323 if (!acpi_gbl_FADT.Xfacs) {
292 acpi_gbl_FADT.Xfacs = (u64) acpi_gbl_FADT.facs; 324 acpi_gbl_FADT.Xfacs = (u64) acpi_gbl_FADT.facs;
325 } else if (acpi_gbl_FADT.facs &&
326 (acpi_gbl_FADT.Xfacs != (u64) acpi_gbl_FADT.facs)) {
327 ACPI_WARNING((AE_INFO,
328 "32/64 FACS address mismatch in FADT - two FACS tables!"));
293 } 329 }
294 330
295 if (!acpi_gbl_FADT.Xdsdt) { 331 if (!acpi_gbl_FADT.Xdsdt) {
296 acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt; 332 acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt;
333 } else if (acpi_gbl_FADT.dsdt &&
334 (acpi_gbl_FADT.Xdsdt != (u64) acpi_gbl_FADT.dsdt)) {
335 ACPI_WARNING((AE_INFO,
336 "32/64 DSDT address mismatch in FADT - two DSDT tables!"));
297 } 337 }
298 338
299 /* 339 /*
@@ -312,18 +352,23 @@ static void acpi_tb_convert_fadt(void)
312 } 352 }
313 353
314 /* 354 /*
315 * Expand the ACPI 1.0 32-bit V1.0 addresses to the ACPI 2.0 64-bit "X" 355 * Expand the ACPI 1.0 32-bit addresses to the ACPI 2.0 64-bit "X"
316 * generic address structures as necessary. 356 * generic address structures as necessary. Later code will always use
357 * the 64-bit address structures.
317 */ 358 */
318 for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) { 359 for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
319 target = 360 target64 =
320 ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT, 361 ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT,
321 fadt_info_table[i].target); 362 fadt_info_table[i].address64);
322 363
323 /* Expand only if the X target is null */ 364 /* Expand only if the 64-bit X target is null */
324 365
325 if (!target->address) { 366 if (!target64->address) {
326 acpi_tb_init_generic_address(target, 367
368 /* The space_id is always I/O for the 32-bit legacy address fields */
369
370 acpi_tb_init_generic_address(target64,
371 ACPI_ADR_SPACE_SYSTEM_IO,
327 *ACPI_ADD_PTR(u8, 372 *ACPI_ADD_PTR(u8,
328 &acpi_gbl_FADT, 373 &acpi_gbl_FADT,
329 fadt_info_table 374 fadt_info_table
@@ -332,11 +377,64 @@ static void acpi_tb_convert_fadt(void)
332 &acpi_gbl_FADT, 377 &acpi_gbl_FADT,
333 fadt_info_table 378 fadt_info_table
334 [i]. 379 [i].
335 source)); 380 address32));
381 }
382 }
383
384 /* Validate FADT values now, before we make any changes */
385
386 acpi_tb_validate_fadt();
387
388 /*
389 * Optionally check all register lengths against the default values and
390 * update them if they are incorrect.
391 */
392 if (acpi_gbl_use_default_register_widths) {
393 for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
394 target64 =
395 ACPI_ADD_PTR(struct acpi_generic_address,
396 &acpi_gbl_FADT,
397 fadt_info_table[i].address64);
398
399 /*
400 * If a valid register (Address != 0) and the (default_length > 0)
401 * (Not a GPE register), then check the width against the default.
402 */
403 if ((target64->address) &&
404 (fadt_info_table[i].default_length > 0) &&
405 (fadt_info_table[i].default_length !=
406 target64->bit_width)) {
407 ACPI_WARNING((AE_INFO,
408 "Invalid length for %s: %d, using default %d",
409 fadt_info_table[i].name,
410 target64->bit_width,
411 fadt_info_table[i].
412 default_length));
413
414 /* Incorrect size, set width to the default */
415
416 target64->bit_width =
417 fadt_info_table[i].default_length;
418 }
336 } 419 }
337 } 420 }
338 421
339 /* 422 /*
423 * Get the length of the individual PM1 registers (enable and status).
424 * Each register is defined to be (event block length / 2).
425 */
426 pm1_register_bit_width =
427 (u8)ACPI_DIV_2(acpi_gbl_FADT.xpm1a_event_block.bit_width);
428 pm1_register_byte_width = (u8)ACPI_DIV_8(pm1_register_bit_width);
429
430 /*
431 * Adjust the lengths of the PM1 Event Blocks so that they can be used to
432 * access the PM1 status register(s). Use (width / 2)
433 */
434 acpi_gbl_FADT.xpm1a_event_block.bit_width = pm1_register_bit_width;
435 acpi_gbl_FADT.xpm1b_event_block.bit_width = pm1_register_bit_width;
436
437 /*
340 * Calculate separate GAS structs for the PM1 Enable registers. 438 * Calculate separate GAS structs for the PM1 Enable registers.
341 * These addresses do not appear (directly) in the FADT, so it is 439 * These addresses do not appear (directly) in the FADT, so it is
342 * useful to calculate them once, here. 440 * useful to calculate them once, here.
@@ -356,14 +454,14 @@ static void acpi_tb_convert_fadt(void)
356 " PM1_EVT_LEN (%u)\n", 454 " PM1_EVT_LEN (%u)\n",
357 acpi_gbl_FADT.xpm1a_event_block.bit_width, 455 acpi_gbl_FADT.xpm1a_event_block.bit_width,
358 acpi_gbl_FADT.pm1_event_length); 456 acpi_gbl_FADT.pm1_event_length);
359 pm1_register_length = (u8) ACPI_DIV_2(acpi_gbl_FADT.pm1_event_length);
360 457
361 /* The PM1A register block is required */ 458 /* The PM1A register block is required */
362 459
363 acpi_tb_init_generic_address(&acpi_gbl_xpm1a_enable, 460 acpi_tb_init_generic_address(&acpi_gbl_xpm1a_enable,
364 pm1_register_length, 461 acpi_gbl_FADT.xpm1a_event_block.space_id,
462 pm1_register_byte_width,
365 (acpi_gbl_FADT.xpm1a_event_block.address + 463 (acpi_gbl_FADT.xpm1a_event_block.address +
366 pm1_register_length)); 464 pm1_register_byte_width));
367 /* Don't forget to copy space_id of the GAS */ 465 /* Don't forget to copy space_id of the GAS */
368 acpi_gbl_xpm1a_enable.space_id = 466 acpi_gbl_xpm1a_enable.space_id =
369 acpi_gbl_FADT.xpm1a_event_block.space_id; 467 acpi_gbl_FADT.xpm1a_event_block.space_id;
@@ -379,9 +477,10 @@ static void acpi_tb_convert_fadt(void)
379 acpi_gbl_FADT.xpm1b_event_block.bit_width, 477 acpi_gbl_FADT.xpm1b_event_block.bit_width,
380 acpi_gbl_FADT.pm1_event_length); 478 acpi_gbl_FADT.pm1_event_length);
381 acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable, 479 acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable,
382 pm1_register_length, 480 acpi_gbl_FADT.xpm1b_event_block.space_id,
481 pm1_register_byte_width,
383 (acpi_gbl_FADT.xpm1b_event_block. 482 (acpi_gbl_FADT.xpm1b_event_block.
384 address + pm1_register_length)); 483 address + pm1_register_byte_width));
385 /* Don't forget to copy space_id of the GAS */ 484 /* Don't forget to copy space_id of the GAS */
386 acpi_gbl_xpm1b_enable.space_id = 485 acpi_gbl_xpm1b_enable.space_id =
387 acpi_gbl_FADT.xpm1b_event_block.space_id; 486 acpi_gbl_FADT.xpm1b_event_block.space_id;
@@ -411,26 +510,63 @@ static void acpi_tb_convert_fadt(void)
411 510
412static void acpi_tb_validate_fadt(void) 511static void acpi_tb_validate_fadt(void)
413{ 512{
513 char *name;
414 u32 *address32; 514 u32 *address32;
415 struct acpi_generic_address *address64; 515 struct acpi_generic_address *address64;
416 u8 length; 516 u8 length;
417 u32 i; 517 u32 i;
418 518
419 /* Examine all of the 64-bit extended address fields (X fields) */ 519 /*
520 * Check for FACS and DSDT address mismatches. An address mismatch between
521 * the 32-bit and 64-bit address fields (FIRMWARE_CTRL/X_FIRMWARE_CTRL and
522 * DSDT/X_DSDT) would indicate the presence of two FACS or two DSDT tables.
523 */
524 if (acpi_gbl_FADT.facs &&
525 (acpi_gbl_FADT.Xfacs != (u64) acpi_gbl_FADT.facs)) {
526 ACPI_WARNING((AE_INFO,
527 "32/64X FACS address mismatch in FADT - "
528 "two FACS tables! %8.8X/%8.8X%8.8X",
529 acpi_gbl_FADT.facs,
530 ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xfacs)));
531 }
420 532
421 for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) { 533 if (acpi_gbl_FADT.dsdt &&
534 (acpi_gbl_FADT.Xdsdt != (u64) acpi_gbl_FADT.dsdt)) {
535 ACPI_WARNING((AE_INFO,
536 "32/64X DSDT address mismatch in FADT - "
537 "two DSDT tables! %8.8X/%8.8X%8.8X",
538 acpi_gbl_FADT.dsdt,
539 ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xdsdt)));
540 }
422 541
423 /* Generate pointers to the 32-bit and 64-bit addresses and get the length */ 542 /* Examine all of the 64-bit extended address fields (X fields) */
424 543
425 address64 = 544 for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
426 ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT, 545 /*
427 fadt_info_table[i].target); 546 * Generate pointers to the 32-bit and 64-bit addresses, get the
547 * register length (width), and the register name
548 */
549 address64 = ACPI_ADD_PTR(struct acpi_generic_address,
550 &acpi_gbl_FADT,
551 fadt_info_table[i].address64);
428 address32 = 552 address32 =
429 ACPI_ADD_PTR(u32, &acpi_gbl_FADT, 553 ACPI_ADD_PTR(u32, &acpi_gbl_FADT,
430 fadt_info_table[i].source); 554 fadt_info_table[i].address32);
431 length = 555 length =
432 *ACPI_ADD_PTR(u8, &acpi_gbl_FADT, 556 *ACPI_ADD_PTR(u8, &acpi_gbl_FADT,
433 fadt_info_table[i].length); 557 fadt_info_table[i].length);
558 name = fadt_info_table[i].name;
559
560 /*
561 * For each extended field, check for length mismatch between the
562 * legacy length field and the corresponding 64-bit X length field.
563 */
564 if (address64 && (address64->bit_width != ACPI_MUL_8(length))) {
565 ACPI_WARNING((AE_INFO,
566 "32/64X length mismatch in %s: %d/%d",
567 name, ACPI_MUL_8(length),
568 address64->bit_width));
569 }
434 570
435 if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) { 571 if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) {
436 /* 572 /*
@@ -439,8 +575,8 @@ static void acpi_tb_validate_fadt(void)
439 */ 575 */
440 if (!address64->address || !length) { 576 if (!address64->address || !length) {
441 ACPI_ERROR((AE_INFO, 577 ACPI_ERROR((AE_INFO,
442 "Required field \"%s\" has zero address and/or length: %8.8X%8.8X/%X", 578 "Required field %s has zero address and/or length: %8.8X%8.8X/%X",
443 fadt_info_table[i].name, 579 name,
444 ACPI_FORMAT_UINT64(address64-> 580 ACPI_FORMAT_UINT64(address64->
445 address), 581 address),
446 length)); 582 length));
@@ -453,8 +589,8 @@ static void acpi_tb_validate_fadt(void)
453 if ((address64->address && !length) 589 if ((address64->address && !length)
454 || (!address64->address && length)) { 590 || (!address64->address && length)) {
455 ACPI_WARNING((AE_INFO, 591 ACPI_WARNING((AE_INFO,
456 "Optional field \"%s\" has zero address or length: %8.8X%8.8X/%X", 592 "Optional field %s has zero address or length: %8.8X%8.8X/%X",
457 fadt_info_table[i].name, 593 name,
458 ACPI_FORMAT_UINT64(address64-> 594 ACPI_FORMAT_UINT64(address64->
459 address), 595 address),
460 length)); 596 length));
@@ -466,8 +602,8 @@ static void acpi_tb_validate_fadt(void)
466 if (address64->address && *address32 && 602 if (address64->address && *address32 &&
467 (address64->address != (u64) * address32)) { 603 (address64->address != (u64) * address32)) {
468 ACPI_ERROR((AE_INFO, 604 ACPI_ERROR((AE_INFO,
469 "32/64X address mismatch in \"%s\": [%8.8X] [%8.8X%8.8X], using 64X", 605 "32/64X address mismatch in %s: %8.8X/%8.8X%8.8X, using 64X",
470 fadt_info_table[i].name, *address32, 606 name, *address32,
471 ACPI_FORMAT_UINT64(address64->address))); 607 ACPI_FORMAT_UINT64(address64->address)));
472 } 608 }
473 } 609 }
diff --git a/drivers/acpi/tables/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 531584defbb8..1054dfd49207 100644
--- a/drivers/acpi/tables/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/actables.h> 45#include "accommon.h"
46#include "actables.h"
46 47
47#define _COMPONENT ACPI_TABLES 48#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbfind") 49ACPI_MODULE_NAME("tbfind")
diff --git a/drivers/acpi/tables/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 18747ce8dd2f..37374b21969d 100644
--- a/drivers/acpi/tables/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -42,8 +42,9 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include "accommon.h"
46#include <acpi/actables.h> 46#include "acnamesp.h"
47#include "actables.h"
47 48
48#define _COMPONENT ACPI_TABLES 49#define _COMPONENT ACPI_TABLES
49ACPI_MODULE_NAME("tbinstal") 50ACPI_MODULE_NAME("tbinstal")
diff --git a/drivers/acpi/tables/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 0cc92ef5236f..9684cc827930 100644
--- a/drivers/acpi/tables/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/actables.h> 45#include "accommon.h"
46#include "actables.h"
46 47
47#define _COMPONENT ACPI_TABLES 48#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbutils") 49ACPI_MODULE_NAME("tbutils")
@@ -113,6 +114,30 @@ acpi_tb_check_xsdt(acpi_physical_address address)
113 114
114/******************************************************************************* 115/*******************************************************************************
115 * 116 *
117 * FUNCTION: acpi_tb_initialize_facs
118 *
119 * PARAMETERS: None
120 *
121 * RETURN: Status
122 *
123 * DESCRIPTION: Create a permanent mapping for the FADT and save it in a global
124 * for accessing the Global Lock and Firmware Waking Vector
125 *
126 ******************************************************************************/
127
128acpi_status acpi_tb_initialize_facs(void)
129{
130 acpi_status status;
131
132 status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
133 ACPI_CAST_INDIRECT_PTR(struct
134 acpi_table_header,
135 &acpi_gbl_FACS));
136 return status;
137}
138
139/*******************************************************************************
140 *
116 * FUNCTION: acpi_tb_tables_loaded 141 * FUNCTION: acpi_tb_tables_loaded
117 * 142 *
118 * PARAMETERS: None 143 * PARAMETERS: None
@@ -420,7 +445,8 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags)
420 445
421 /* Differentiate between RSDT and XSDT root tables */ 446 /* Differentiate between RSDT and XSDT root tables */
422 447
423 if (rsdp->revision > 1 && rsdp->xsdt_physical_address) { 448 if (rsdp->revision > 1 && rsdp->xsdt_physical_address
449 && !acpi_rsdt_forced) {
424 /* 450 /*
425 * Root table is an XSDT (64-bit physical addresses). We must use the 451 * Root table is an XSDT (64-bit physical addresses). We must use the
426 * XSDT if the revision is > 1 and the XSDT pointer is present, as per 452 * XSDT if the revision is > 1 and the XSDT pointer is present, as per
diff --git a/drivers/acpi/tables/tbxface.c b/drivers/acpi/acpica/tbxface.c
index fd7770aa1061..c3e841f3cde9 100644
--- a/drivers/acpi/tables/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -43,8 +43,9 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acnamesp.h> 46#include "accommon.h"
47#include <acpi/actables.h> 47#include "acnamesp.h"
48#include "actables.h"
48 49
49#define _COMPONENT ACPI_TABLES 50#define _COMPONENT ACPI_TABLES
50ACPI_MODULE_NAME("tbxface") 51ACPI_MODULE_NAME("tbxface")
diff --git a/drivers/acpi/tables/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 2d157e0f98d2..b7fc8dd43341 100644
--- a/drivers/acpi/tables/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/actables.h> 45#include "accommon.h"
46#include "actables.h"
46 47
47#define _COMPONENT ACPI_TABLES 48#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbxfroot") 49ACPI_MODULE_NAME("tbxfroot")
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 241c535c1753..7580f6b3069e 100644
--- a/drivers/acpi/utilities/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acdebug.h> 45#include "accommon.h"
46#include "acdebug.h"
46 47
47#define _COMPONENT ACPI_UTILITIES 48#define _COMPONENT ACPI_UTILITIES
48ACPI_MODULE_NAME("utalloc") 49ACPI_MODULE_NAME("utalloc")
diff --git a/drivers/acpi/utilities/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 5b2f7c27b705..b0dcfd3c872a 100644
--- a/drivers/acpi/utilities/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include "accommon.h"
46#include "acnamesp.h"
46 47
47 48
48#define _COMPONENT ACPI_UTILITIES 49#define _COMPONENT ACPI_UTILITIES
diff --git a/drivers/acpi/utilities/utdebug.c b/drivers/acpi/acpica/utdebug.c
index fd66ecb6741e..38821f53042c 100644
--- a/drivers/acpi/utilities/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -42,6 +42,7 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include "accommon.h"
45 46
46#define _COMPONENT ACPI_UTILITIES 47#define _COMPONENT ACPI_UTILITIES
47ACPI_MODULE_NAME("utdebug") 48ACPI_MODULE_NAME("utdebug")
@@ -136,7 +137,7 @@ static const char *acpi_ut_trim_function_name(const char *function_name)
136 137
137/******************************************************************************* 138/*******************************************************************************
138 * 139 *
139 * FUNCTION: acpi_ut_debug_print 140 * FUNCTION: acpi_debug_print
140 * 141 *
141 * PARAMETERS: requested_debug_level - Requested debug print level 142 * PARAMETERS: requested_debug_level - Requested debug print level
142 * line_number - Caller's line number (for error output) 143 * line_number - Caller's line number (for error output)
@@ -154,11 +155,11 @@ static const char *acpi_ut_trim_function_name(const char *function_name)
154 ******************************************************************************/ 155 ******************************************************************************/
155 156
156void ACPI_INTERNAL_VAR_XFACE 157void ACPI_INTERNAL_VAR_XFACE
157acpi_ut_debug_print(u32 requested_debug_level, 158acpi_debug_print(u32 requested_debug_level,
158 u32 line_number, 159 u32 line_number,
159 const char *function_name, 160 const char *function_name,
160 const char *module_name, 161 const char *module_name,
161 u32 component_id, const char *format, ...) 162 u32 component_id, const char *format, ...)
162{ 163{
163 acpi_thread_id thread_id; 164 acpi_thread_id thread_id;
164 va_list args; 165 va_list args;
@@ -205,11 +206,11 @@ acpi_ut_debug_print(u32 requested_debug_level,
205 va_end(args); 206 va_end(args);
206} 207}
207 208
208ACPI_EXPORT_SYMBOL(acpi_ut_debug_print) 209ACPI_EXPORT_SYMBOL(acpi_debug_print)
209 210
210/******************************************************************************* 211/*******************************************************************************
211 * 212 *
212 * FUNCTION: acpi_ut_debug_print_raw 213 * FUNCTION: acpi_debug_print_raw
213 * 214 *
214 * PARAMETERS: requested_debug_level - Requested debug print level 215 * PARAMETERS: requested_debug_level - Requested debug print level
215 * line_number - Caller's line number 216 * line_number - Caller's line number
@@ -226,11 +227,11 @@ ACPI_EXPORT_SYMBOL(acpi_ut_debug_print)
226 * 227 *
227 ******************************************************************************/ 228 ******************************************************************************/
228void ACPI_INTERNAL_VAR_XFACE 229void ACPI_INTERNAL_VAR_XFACE
229acpi_ut_debug_print_raw(u32 requested_debug_level, 230acpi_debug_print_raw(u32 requested_debug_level,
230 u32 line_number, 231 u32 line_number,
231 const char *function_name, 232 const char *function_name,
232 const char *module_name, 233 const char *module_name,
233 u32 component_id, const char *format, ...) 234 u32 component_id, const char *format, ...)
234{ 235{
235 va_list args; 236 va_list args;
236 237
@@ -244,7 +245,7 @@ acpi_ut_debug_print_raw(u32 requested_debug_level,
244 va_end(args); 245 va_end(args);
245} 246}
246 247
247ACPI_EXPORT_SYMBOL(acpi_ut_debug_print_raw) 248ACPI_EXPORT_SYMBOL(acpi_debug_print_raw)
248 249
249/******************************************************************************* 250/*******************************************************************************
250 * 251 *
@@ -270,9 +271,9 @@ acpi_ut_trace(u32 line_number,
270 acpi_gbl_nesting_level++; 271 acpi_gbl_nesting_level++;
271 acpi_ut_track_stack_ptr(); 272 acpi_ut_track_stack_ptr();
272 273
273 acpi_ut_debug_print(ACPI_LV_FUNCTIONS, 274 acpi_debug_print(ACPI_LV_FUNCTIONS,
274 line_number, function_name, module_name, 275 line_number, function_name, module_name, component_id,
275 component_id, "%s\n", acpi_gbl_fn_entry_str); 276 "%s\n", acpi_gbl_fn_entry_str);
276} 277}
277 278
278ACPI_EXPORT_SYMBOL(acpi_ut_trace) 279ACPI_EXPORT_SYMBOL(acpi_ut_trace)
@@ -301,10 +302,9 @@ acpi_ut_trace_ptr(u32 line_number,
301 acpi_gbl_nesting_level++; 302 acpi_gbl_nesting_level++;
302 acpi_ut_track_stack_ptr(); 303 acpi_ut_track_stack_ptr();
303 304
304 acpi_ut_debug_print(ACPI_LV_FUNCTIONS, 305 acpi_debug_print(ACPI_LV_FUNCTIONS,
305 line_number, function_name, module_name, 306 line_number, function_name, module_name, component_id,
306 component_id, "%s %p\n", acpi_gbl_fn_entry_str, 307 "%s %p\n", acpi_gbl_fn_entry_str, pointer);
307 pointer);
308} 308}
309 309
310/******************************************************************************* 310/*******************************************************************************
@@ -333,10 +333,9 @@ acpi_ut_trace_str(u32 line_number,
333 acpi_gbl_nesting_level++; 333 acpi_gbl_nesting_level++;
334 acpi_ut_track_stack_ptr(); 334 acpi_ut_track_stack_ptr();
335 335
336 acpi_ut_debug_print(ACPI_LV_FUNCTIONS, 336 acpi_debug_print(ACPI_LV_FUNCTIONS,
337 line_number, function_name, module_name, 337 line_number, function_name, module_name, component_id,
338 component_id, "%s %s\n", acpi_gbl_fn_entry_str, 338 "%s %s\n", acpi_gbl_fn_entry_str, string);
339 string);
340} 339}
341 340
342/******************************************************************************* 341/*******************************************************************************
@@ -365,10 +364,9 @@ acpi_ut_trace_u32(u32 line_number,
365 acpi_gbl_nesting_level++; 364 acpi_gbl_nesting_level++;
366 acpi_ut_track_stack_ptr(); 365 acpi_ut_track_stack_ptr();
367 366
368 acpi_ut_debug_print(ACPI_LV_FUNCTIONS, 367 acpi_debug_print(ACPI_LV_FUNCTIONS,
369 line_number, function_name, module_name, 368 line_number, function_name, module_name, component_id,
370 component_id, "%s %08X\n", acpi_gbl_fn_entry_str, 369 "%s %08X\n", acpi_gbl_fn_entry_str, integer);
371 integer);
372} 370}
373 371
374/******************************************************************************* 372/*******************************************************************************
@@ -393,9 +391,9 @@ acpi_ut_exit(u32 line_number,
393 const char *module_name, u32 component_id) 391 const char *module_name, u32 component_id)
394{ 392{
395 393
396 acpi_ut_debug_print(ACPI_LV_FUNCTIONS, 394 acpi_debug_print(ACPI_LV_FUNCTIONS,
397 line_number, function_name, module_name, 395 line_number, function_name, module_name, component_id,
398 component_id, "%s\n", acpi_gbl_fn_exit_str); 396 "%s\n", acpi_gbl_fn_exit_str);
399 397
400 acpi_gbl_nesting_level--; 398 acpi_gbl_nesting_level--;
401} 399}
@@ -426,17 +424,16 @@ acpi_ut_status_exit(u32 line_number,
426{ 424{
427 425
428 if (ACPI_SUCCESS(status)) { 426 if (ACPI_SUCCESS(status)) {
429 acpi_ut_debug_print(ACPI_LV_FUNCTIONS, 427 acpi_debug_print(ACPI_LV_FUNCTIONS,
430 line_number, function_name, module_name, 428 line_number, function_name, module_name,
431 component_id, "%s %s\n", 429 component_id, "%s %s\n", acpi_gbl_fn_exit_str,
432 acpi_gbl_fn_exit_str, 430 acpi_format_exception(status));
433 acpi_format_exception(status));
434 } else { 431 } else {
435 acpi_ut_debug_print(ACPI_LV_FUNCTIONS, 432 acpi_debug_print(ACPI_LV_FUNCTIONS,
436 line_number, function_name, module_name, 433 line_number, function_name, module_name,
437 component_id, "%s ****Exception****: %s\n", 434 component_id, "%s ****Exception****: %s\n",
438 acpi_gbl_fn_exit_str, 435 acpi_gbl_fn_exit_str,
439 acpi_format_exception(status)); 436 acpi_format_exception(status));
440 } 437 }
441 438
442 acpi_gbl_nesting_level--; 439 acpi_gbl_nesting_level--;
@@ -467,10 +464,10 @@ acpi_ut_value_exit(u32 line_number,
467 u32 component_id, acpi_integer value) 464 u32 component_id, acpi_integer value)
468{ 465{
469 466
470 acpi_ut_debug_print(ACPI_LV_FUNCTIONS, 467 acpi_debug_print(ACPI_LV_FUNCTIONS,
471 line_number, function_name, module_name, 468 line_number, function_name, module_name, component_id,
472 component_id, "%s %8.8X%8.8X\n", 469 "%s %8.8X%8.8X\n", acpi_gbl_fn_exit_str,
473 acpi_gbl_fn_exit_str, ACPI_FORMAT_UINT64(value)); 470 ACPI_FORMAT_UINT64(value));
474 471
475 acpi_gbl_nesting_level--; 472 acpi_gbl_nesting_level--;
476} 473}
@@ -499,9 +496,9 @@ acpi_ut_ptr_exit(u32 line_number,
499 const char *module_name, u32 component_id, u8 *ptr) 496 const char *module_name, u32 component_id, u8 *ptr)
500{ 497{
501 498
502 acpi_ut_debug_print(ACPI_LV_FUNCTIONS, 499 acpi_debug_print(ACPI_LV_FUNCTIONS,
503 line_number, function_name, module_name, 500 line_number, function_name, module_name, component_id,
504 component_id, "%s %p\n", acpi_gbl_fn_exit_str, ptr); 501 "%s %p\n", acpi_gbl_fn_exit_str, ptr);
505 502
506 acpi_gbl_nesting_level--; 503 acpi_gbl_nesting_level--;
507} 504}
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/acpica/utdelete.c
index d197c6b29e17..a0be9e39531e 100644
--- a/drivers/acpi/utilities/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -42,9 +42,10 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acinterp.h> 45#include "accommon.h"
46#include <acpi/acnamesp.h> 46#include "acinterp.h"
47#include <acpi/acevents.h> 47#include "acnamesp.h"
48#include "acevents.h"
48 49
49#define _COMPONENT ACPI_UTILITIES 50#define _COMPONENT ACPI_UTILITIES
50ACPI_MODULE_NAME("utdelete") 51ACPI_MODULE_NAME("utdelete")
diff --git a/drivers/acpi/utilities/uteval.c b/drivers/acpi/acpica/uteval.c
index 352747e49c7a..da9450bc60f7 100644
--- a/drivers/acpi/utilities/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -42,8 +42,9 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include "accommon.h"
46#include <acpi/acinterp.h> 46#include "acnamesp.h"
47#include "acinterp.h"
47 48
48#define _COMPONENT ACPI_UTILITIES 49#define _COMPONENT ACPI_UTILITIES
49ACPI_MODULE_NAME("uteval") 50ACPI_MODULE_NAME("uteval")
@@ -129,7 +130,7 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state)
129 130
130 /* The interface is supported */ 131 /* The interface is supported */
131 132
132 return_ACPI_STATUS(AE_CTRL_TERMINATE); 133 return_ACPI_STATUS(AE_OK);
133 } 134 }
134 } 135 }
135 136
@@ -143,13 +144,13 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state)
143 144
144 /* The interface is supported */ 145 /* The interface is supported */
145 146
146 return_ACPI_STATUS(AE_CTRL_TERMINATE); 147 return_ACPI_STATUS(AE_OK);
147 } 148 }
148 149
149 /* The interface is not supported */ 150 /* The interface is not supported */
150 151
151 return_desc->integer.value = 0; 152 return_desc->integer.value = 0;
152 return_ACPI_STATUS(AE_CTRL_TERMINATE); 153 return_ACPI_STATUS(AE_OK);
153} 154}
154 155
155/******************************************************************************* 156/*******************************************************************************
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 17ed5ac840f7..a3ab9d9da299 100644
--- a/drivers/acpi/utilities/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -44,11 +44,11 @@
44#define DEFINE_ACPI_GLOBALS 44#define DEFINE_ACPI_GLOBALS
45 45
46#include <acpi/acpi.h> 46#include <acpi/acpi.h>
47#include <acpi/acnamesp.h> 47#include "accommon.h"
48#include "acnamesp.h"
48 49
49ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
50#define _COMPONENT ACPI_UTILITIES 50#define _COMPONENT ACPI_UTILITIES
51 ACPI_MODULE_NAME("utglobal") 51ACPI_MODULE_NAME("utglobal")
52 52
53/******************************************************************************* 53/*******************************************************************************
54 * 54 *
@@ -352,7 +352,7 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
352 "PCI_Config", 352 "PCI_Config",
353 "EmbeddedControl", 353 "EmbeddedControl",
354 "SMBus", 354 "SMBus",
355 "CMOS", 355 "SystemCMOS",
356 "PCIBARTarget", 356 "PCIBARTarget",
357 "DataTable" 357 "DataTable"
358}; 358};
@@ -756,6 +756,7 @@ acpi_status acpi_ut_init_globals(void)
756 acpi_gbl_gpe_xrupt_list_head = NULL; 756 acpi_gbl_gpe_xrupt_list_head = NULL;
757 acpi_gbl_gpe_fadt_blocks[0] = NULL; 757 acpi_gbl_gpe_fadt_blocks[0] = NULL;
758 acpi_gbl_gpe_fadt_blocks[1] = NULL; 758 acpi_gbl_gpe_fadt_blocks[1] = NULL;
759 acpi_current_gpe_count = 0;
759 760
760 /* Global handlers */ 761 /* Global handlers */
761 762
@@ -771,6 +772,7 @@ acpi_status acpi_ut_init_globals(void)
771 acpi_gbl_global_lock_mutex = NULL; 772 acpi_gbl_global_lock_mutex = NULL;
772 acpi_gbl_global_lock_acquired = FALSE; 773 acpi_gbl_global_lock_acquired = FALSE;
773 acpi_gbl_global_lock_handle = 0; 774 acpi_gbl_global_lock_handle = 0;
775 acpi_gbl_global_lock_present = FALSE;
774 776
775 /* Miscellaneous variables */ 777 /* Miscellaneous variables */
776 778
@@ -815,5 +817,7 @@ acpi_status acpi_ut_init_globals(void)
815 return_ACPI_STATUS(AE_OK); 817 return_ACPI_STATUS(AE_OK);
816} 818}
817 819
820ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
818ACPI_EXPORT_SYMBOL(acpi_dbg_level) 821ACPI_EXPORT_SYMBOL(acpi_dbg_level)
819ACPI_EXPORT_SYMBOL(acpi_dbg_layer) 822ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
823ACPI_EXPORT_SYMBOL(acpi_current_gpe_count)
diff --git a/drivers/acpi/utilities/utinit.c b/drivers/acpi/acpica/utinit.c
index cae515fc02d3..a54ca84eb362 100644
--- a/drivers/acpi/utilities/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -42,9 +42,10 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include "accommon.h"
46#include <acpi/acevents.h> 46#include "acnamesp.h"
47#include <acpi/actables.h> 47#include "acevents.h"
48#include "actables.h"
48 49
49#define _COMPONENT ACPI_UTILITIES 50#define _COMPONENT ACPI_UTILITIES
50ACPI_MODULE_NAME("utinit") 51ACPI_MODULE_NAME("utinit")
diff --git a/drivers/acpi/utilities/utmath.c b/drivers/acpi/acpica/utmath.c
index c927324fdd26..c9f682d640ef 100644
--- a/drivers/acpi/utilities/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -42,6 +42,7 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include "accommon.h"
45 46
46#define _COMPONENT ACPI_UTILITIES 47#define _COMPONENT ACPI_UTILITIES
47ACPI_MODULE_NAME("utmath") 48ACPI_MODULE_NAME("utmath")
diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 9089a158a874..c1f7f4e1a72d 100644
--- a/drivers/acpi/utilities/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -44,7 +44,8 @@
44#include <linux/module.h> 44#include <linux/module.h>
45 45
46#include <acpi/acpi.h> 46#include <acpi/acpi.h>
47#include <acpi/acnamesp.h> 47#include "accommon.h"
48#include "acnamesp.h"
48 49
49#define _COMPONENT ACPI_UTILITIES 50#define _COMPONENT ACPI_UTILITIES
50ACPI_MODULE_NAME("utmisc") 51ACPI_MODULE_NAME("utmisc")
@@ -1016,7 +1017,7 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
1016 1017
1017/******************************************************************************* 1018/*******************************************************************************
1018 * 1019 *
1019 * FUNCTION: acpi_ut_error, acpi_ut_warning, acpi_ut_info 1020 * FUNCTION: acpi_error, acpi_exception, acpi_warning, acpi_info
1020 * 1021 *
1021 * PARAMETERS: module_name - Caller's module name (for error output) 1022 * PARAMETERS: module_name - Caller's module name (for error output)
1022 * line_number - Caller's line number (for error output) 1023 * line_number - Caller's line number (for error output)
@@ -1029,7 +1030,7 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
1029 ******************************************************************************/ 1030 ******************************************************************************/
1030 1031
1031void ACPI_INTERNAL_VAR_XFACE 1032void ACPI_INTERNAL_VAR_XFACE
1032acpi_ut_error(const char *module_name, u32 line_number, const char *format, ...) 1033acpi_error(const char *module_name, u32 line_number, const char *format, ...)
1033{ 1034{
1034 va_list args; 1035 va_list args;
1035 1036
@@ -1042,8 +1043,8 @@ acpi_ut_error(const char *module_name, u32 line_number, const char *format, ...)
1042} 1043}
1043 1044
1044void ACPI_INTERNAL_VAR_XFACE 1045void ACPI_INTERNAL_VAR_XFACE
1045acpi_ut_exception(const char *module_name, 1046acpi_exception(const char *module_name,
1046 u32 line_number, acpi_status status, const char *format, ...) 1047 u32 line_number, acpi_status status, const char *format, ...)
1047{ 1048{
1048 va_list args; 1049 va_list args;
1049 1050
@@ -1056,11 +1057,8 @@ acpi_ut_exception(const char *module_name,
1056 va_end(args); 1057 va_end(args);
1057} 1058}
1058 1059
1059EXPORT_SYMBOL(acpi_ut_exception);
1060
1061void ACPI_INTERNAL_VAR_XFACE 1060void ACPI_INTERNAL_VAR_XFACE
1062acpi_ut_warning(const char *module_name, 1061acpi_warning(const char *module_name, u32 line_number, const char *format, ...)
1063 u32 line_number, const char *format, ...)
1064{ 1062{
1065 va_list args; 1063 va_list args;
1066 1064
@@ -1073,7 +1071,7 @@ acpi_ut_warning(const char *module_name,
1073} 1071}
1074 1072
1075void ACPI_INTERNAL_VAR_XFACE 1073void ACPI_INTERNAL_VAR_XFACE
1076acpi_ut_info(const char *module_name, u32 line_number, const char *format, ...) 1074acpi_info(const char *module_name, u32 line_number, const char *format, ...)
1077{ 1075{
1078 va_list args; 1076 va_list args;
1079 1077
@@ -1088,3 +1086,8 @@ acpi_ut_info(const char *module_name, u32 line_number, const char *format, ...)
1088 acpi_os_printf("\n"); 1086 acpi_os_printf("\n");
1089 va_end(args); 1087 va_end(args);
1090} 1088}
1089
1090ACPI_EXPORT_SYMBOL(acpi_error)
1091ACPI_EXPORT_SYMBOL(acpi_exception)
1092ACPI_EXPORT_SYMBOL(acpi_warning)
1093ACPI_EXPORT_SYMBOL(acpi_info)
diff --git a/drivers/acpi/utilities/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 7331dde9e1b3..14eb52c4d647 100644
--- a/drivers/acpi/utilities/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -42,6 +42,7 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include "accommon.h"
45 46
46#define _COMPONENT ACPI_UTILITIES 47#define _COMPONENT ACPI_UTILITIES
47ACPI_MODULE_NAME("utmutex") 48ACPI_MODULE_NAME("utmutex")
diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/acpica/utobject.c
index 4bef3cfbaccb..fd5ea7543e5b 100644
--- a/drivers/acpi/utilities/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include "accommon.h"
46#include "acnamesp.h"
46 47
47#define _COMPONENT ACPI_UTILITIES 48#define _COMPONENT ACPI_UTILITIES
48ACPI_MODULE_NAME("utobject") 49ACPI_MODULE_NAME("utobject")
diff --git a/drivers/acpi/utilities/utresrc.c b/drivers/acpi/acpica/utresrc.c
index c3e3e1308edc..91b7c00236f4 100644
--- a/drivers/acpi/utilities/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -42,7 +42,8 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/amlresrc.h> 45#include "accommon.h"
46#include "amlresrc.h"
46 47
47#define _COMPONENT ACPI_UTILITIES 48#define _COMPONENT ACPI_UTILITIES
48ACPI_MODULE_NAME("utresrc") 49ACPI_MODULE_NAME("utresrc")
diff --git a/drivers/acpi/utilities/utstate.c b/drivers/acpi/acpica/utstate.c
index 63a6d3d77d88..0440c958f5a4 100644
--- a/drivers/acpi/utilities/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -42,6 +42,7 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include "accommon.h"
45 46
46#define _COMPONENT ACPI_UTILITIES 47#define _COMPONENT ACPI_UTILITIES
47ACPI_MODULE_NAME("utstate") 48ACPI_MODULE_NAME("utstate")
diff --git a/drivers/acpi/utilities/utxface.c b/drivers/acpi/acpica/utxface.c
index c198a4d40583..078a22728c6b 100644
--- a/drivers/acpi/utilities/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -42,9 +42,11 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acevents.h> 45#include "accommon.h"
46#include <acpi/acnamesp.h> 46#include "acevents.h"
47#include <acpi/acdebug.h> 47#include "acnamesp.h"
48#include "acdebug.h"
49#include "actables.h"
48 50
49#define _COMPONENT ACPI_UTILITIES 51#define _COMPONENT ACPI_UTILITIES
50ACPI_MODULE_NAME("utxface") 52ACPI_MODULE_NAME("utxface")
@@ -148,6 +150,16 @@ acpi_status acpi_enable_subsystem(u32 flags)
148 } 150 }
149 151
150 /* 152 /*
153 * Obtain a permanent mapping for the FACS. This is required for the
154 * Global Lock and the Firmware Waking Vector
155 */
156 status = acpi_tb_initialize_facs();
157 if (ACPI_FAILURE(status)) {
158 ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
159 return_ACPI_STATUS(status);
160 }
161
162 /*
151 * Install the default op_region handlers. These are installed unless 163 * Install the default op_region handlers. These are installed unless
152 * other handlers have already been installed via the 164 * other handlers have already been installed via the
153 * install_address_space_handler interface. 165 * install_address_space_handler interface.
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 1423b0c0cd2e..65132f920459 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -471,7 +471,7 @@ static void sysfs_remove_battery(struct acpi_battery *battery)
471 471
472static int acpi_battery_update(struct acpi_battery *battery) 472static int acpi_battery_update(struct acpi_battery *battery)
473{ 473{
474 int result; 474 int result, old_present = acpi_battery_present(battery);
475 result = acpi_battery_get_status(battery); 475 result = acpi_battery_get_status(battery);
476 if (result) 476 if (result)
477 return result; 477 return result;
@@ -482,7 +482,8 @@ static int acpi_battery_update(struct acpi_battery *battery)
482 return 0; 482 return 0;
483 } 483 }
484#endif 484#endif
485 if (!battery->update_time) { 485 if (!battery->update_time ||
486 old_present != acpi_battery_present(battery)) {
486 result = acpi_battery_get_info(battery); 487 result = acpi_battery_get_info(battery);
487 if (result) 488 if (result)
488 return result; 489 return result;
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
index 307963bd1043..332fe4b21708 100644
--- a/drivers/acpi/cm_sbs.c
+++ b/drivers/acpi/cm_sbs.c
@@ -27,9 +27,6 @@
27#include <linux/seq_file.h> 27#include <linux/seq_file.h>
28#include <acpi/acpi_bus.h> 28#include <acpi/acpi_bus.h>
29#include <acpi/acpi_drivers.h> 29#include <acpi/acpi_drivers.h>
30#include <acpi/acmacros.h>
31#include <acpi/actypes.h>
32#include <acpi/acutils.h>
33 30
34ACPI_MODULE_NAME("cm_sbs"); 31ACPI_MODULE_NAME("cm_sbs");
35#define ACPI_AC_CLASS "ac_adapter" 32#define ACPI_AC_CLASS "ac_adapter"
diff --git a/drivers/acpi/debug.c b/drivers/acpi/debug.c
index c48396892008..20223cbd0d1c 100644
--- a/drivers/acpi/debug.c
+++ b/drivers/acpi/debug.c
@@ -9,7 +9,6 @@
9#include <linux/moduleparam.h> 9#include <linux/moduleparam.h>
10#include <asm/uaccess.h> 10#include <asm/uaccess.h>
11#include <acpi/acpi_drivers.h> 11#include <acpi/acpi_drivers.h>
12#include <acpi/acglobal.h>
13 12
14#define _COMPONENT ACPI_SYSTEM_COMPONENT 13#define _COMPONENT ACPI_SYSTEM_COMPONENT
15ACPI_MODULE_NAME("debug"); 14ACPI_MODULE_NAME("debug");
diff --git a/drivers/acpi/dispatcher/Makefile b/drivers/acpi/dispatcher/Makefile
deleted file mode 100644
index eb7e602a83cd..000000000000
--- a/drivers/acpi/dispatcher/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
1#
2# Makefile for all Linux ACPI interpreter subdirectories
3#
4
5obj-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
6 dsmethod.o dsobject.o dsutils.o dswload.o dswstate.o \
7 dsinit.o
8
9EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 30f3ef236ecb..8dfcbb8aff73 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -42,7 +42,6 @@
42#include <asm/io.h> 42#include <asm/io.h>
43#include <acpi/acpi_bus.h> 43#include <acpi/acpi_bus.h>
44#include <acpi/acpi_drivers.h> 44#include <acpi/acpi_drivers.h>
45#include <acpi/actypes.h>
46 45
47#define ACPI_EC_CLASS "embedded_controller" 46#define ACPI_EC_CLASS "embedded_controller"
48#define ACPI_EC_DEVICE_NAME "Embedded Controller" 47#define ACPI_EC_DEVICE_NAME "Embedded Controller"
@@ -370,7 +369,7 @@ unlock:
370 * Note: samsung nv5000 doesn't work with ec burst mode. 369 * Note: samsung nv5000 doesn't work with ec burst mode.
371 * http://bugzilla.kernel.org/show_bug.cgi?id=4980 370 * http://bugzilla.kernel.org/show_bug.cgi?id=4980
372 */ 371 */
373int acpi_ec_burst_enable(struct acpi_ec *ec) 372static int acpi_ec_burst_enable(struct acpi_ec *ec)
374{ 373{
375 u8 d; 374 u8 d;
376 struct transaction t = {.command = ACPI_EC_BURST_ENABLE, 375 struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
@@ -380,7 +379,7 @@ int acpi_ec_burst_enable(struct acpi_ec *ec)
380 return acpi_ec_transaction(ec, &t, 0); 379 return acpi_ec_transaction(ec, &t, 0);
381} 380}
382 381
383int acpi_ec_burst_disable(struct acpi_ec *ec) 382static int acpi_ec_burst_disable(struct acpi_ec *ec)
384{ 383{
385 struct transaction t = {.command = ACPI_EC_BURST_DISABLE, 384 struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
386 .wdata = NULL, .rdata = NULL, 385 .wdata = NULL, .rdata = NULL,
@@ -756,10 +755,15 @@ static acpi_status
756acpi_ec_register_query_methods(acpi_handle handle, u32 level, 755acpi_ec_register_query_methods(acpi_handle handle, u32 level,
757 void *context, void **return_value) 756 void *context, void **return_value)
758{ 757{
759 struct acpi_namespace_node *node = handle; 758 char node_name[5];
759 struct acpi_buffer buffer = { sizeof(node_name), node_name };
760 struct acpi_ec *ec = context; 760 struct acpi_ec *ec = context;
761 int value = 0; 761 int value = 0;
762 if (sscanf(node->name.ascii, "_Q%x", &value) == 1) { 762 acpi_status status;
763
764 status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
765
766 if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1) {
763 acpi_ec_add_query_handler(ec, value, handle, NULL, NULL); 767 acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
764 } 768 }
765 return AE_OK; 769 return AE_OK;
@@ -978,9 +982,9 @@ static const struct acpi_device_id ec_device_ids[] = {
978 982
979int __init acpi_ec_ecdt_probe(void) 983int __init acpi_ec_ecdt_probe(void)
980{ 984{
981 int ret;
982 acpi_status status; 985 acpi_status status;
983 struct acpi_table_ecdt *ecdt_ptr; 986 struct acpi_table_ecdt *ecdt_ptr;
987 acpi_handle dummy;
984 988
985 boot_ec = make_acpi_ec(); 989 boot_ec = make_acpi_ec();
986 if (!boot_ec) 990 if (!boot_ec)
@@ -1006,30 +1010,31 @@ int __init acpi_ec_ecdt_probe(void)
1006 boot_ec->gpe = ecdt_ptr->gpe; 1010 boot_ec->gpe = ecdt_ptr->gpe;
1007 boot_ec->handle = ACPI_ROOT_OBJECT; 1011 boot_ec->handle = ACPI_ROOT_OBJECT;
1008 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle); 1012 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle);
1009 } else { 1013 /* Add some basic check against completely broken table */
1010 /* This workaround is needed only on some broken machines, 1014 if (boot_ec->data_addr != boot_ec->command_addr)
1011 * which require early EC, but fail to provide ECDT */ 1015 goto install;
1012 acpi_handle x; 1016 /* fall through */
1013 printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
1014 status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
1015 boot_ec, NULL);
1016 /* Check that acpi_get_devices actually find something */
1017 if (ACPI_FAILURE(status) || !boot_ec->handle)
1018 goto error;
1019 /* We really need to limit this workaround, the only ASUS,
1020 * which needs it, has fake EC._INI method, so use it as flag.
1021 * Keep boot_ec struct as it will be needed soon.
1022 */
1023 if (ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI", &x)))
1024 return -ENODEV;
1025 } 1017 }
1026 1018 /* This workaround is needed only on some broken machines,
1027 ret = ec_install_handlers(boot_ec); 1019 * which require early EC, but fail to provide ECDT */
1028 if (!ret) { 1020 printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
1021 status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
1022 boot_ec, NULL);
1023 /* Check that acpi_get_devices actually find something */
1024 if (ACPI_FAILURE(status) || !boot_ec->handle)
1025 goto error;
1026 /* We really need to limit this workaround, the only ASUS,
1027 * which needs it, has fake EC._INI method, so use it as flag.
1028 * Keep boot_ec struct as it will be needed soon.
1029 */
1030 if (ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI", &dummy)))
1031 return -ENODEV;
1032install:
1033 if (!ec_install_handlers(boot_ec)) {
1029 first_ec = boot_ec; 1034 first_ec = boot_ec;
1030 return 0; 1035 return 0;
1031 } 1036 }
1032 error: 1037error:
1033 kfree(boot_ec); 1038 kfree(boot_ec);
1034 boot_ec = NULL; 1039 boot_ec = NULL;
1035 return -ENODEV; 1040 return -ENODEV;
diff --git a/drivers/acpi/events/Makefile b/drivers/acpi/events/Makefile
deleted file mode 100644
index d29f2ee449cc..000000000000
--- a/drivers/acpi/events/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
1#
2# Makefile for all Linux ACPI interpreter subdirectories
3#
4
5obj-y := evevent.o evregion.o evsci.o evxfevnt.o \
6 evmisc.o evrgnini.o evxface.o evxfregn.o \
7 evgpe.o evgpeblk.o
8
9EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/executer/Makefile b/drivers/acpi/executer/Makefile
deleted file mode 100644
index e09998aa012f..000000000000
--- a/drivers/acpi/executer/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
1#
2# Makefile for all Linux ACPI interpreter subdirectories
3#
4
5obj-y := exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
6 exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
7 excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \
8 exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o
9
10EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/hardware/Makefile b/drivers/acpi/hardware/Makefile
deleted file mode 100644
index 438ad373b9ad..000000000000
--- a/drivers/acpi/hardware/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
1#
2# Makefile for all Linux ACPI interpreter subdirectories
3#
4
5obj-y := hwacpi.o hwgpe.o hwregs.o hwsleep.o
6
7obj-$(ACPI_FUTURE_USAGE) += hwtimer.o
8
9EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/main.c
index 28a691cc625e..7e3c609cbef2 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/main.c
@@ -101,13 +101,26 @@ void __init acpi_old_suspend_ordering(void)
101 * cases. 101 * cases.
102 */ 102 */
103static bool set_sci_en_on_resume; 103static bool set_sci_en_on_resume;
104/*
105 * The ACPI specification wants us to save NVS memory regions during hibernation
106 * and to restore them during the subsequent resume. However, it is not certain
107 * if this mechanism is going to work on all machines, so we allow the user to
108 * disable this mechanism using the 'acpi_sleep=s4_nonvs' kernel command line
109 * option.
110 */
111static bool s4_no_nvs;
112
113void __init acpi_s4_no_nvs(void)
114{
115 s4_no_nvs = true;
116}
104 117
105/** 118/**
106 * acpi_pm_disable_gpes - Disable the GPEs. 119 * acpi_pm_disable_gpes - Disable the GPEs.
107 */ 120 */
108static int acpi_pm_disable_gpes(void) 121static int acpi_pm_disable_gpes(void)
109{ 122{
110 acpi_hw_disable_all_gpes(); 123 acpi_disable_all_gpes();
111 return 0; 124 return 0;
112} 125}
113 126
@@ -135,7 +148,7 @@ static int acpi_pm_prepare(void)
135 int error = __acpi_pm_prepare(); 148 int error = __acpi_pm_prepare();
136 149
137 if (!error) 150 if (!error)
138 acpi_hw_disable_all_gpes(); 151 acpi_disable_all_gpes();
139 return error; 152 return error;
140} 153}
141 154
@@ -267,7 +280,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
267 * (like wakeup GPE) haven't handler, this can avoid such GPE misfire. 280 * (like wakeup GPE) haven't handler, this can avoid such GPE misfire.
268 * acpi_leave_sleep_state will reenable specific GPEs later 281 * acpi_leave_sleep_state will reenable specific GPEs later
269 */ 282 */
270 acpi_hw_disable_all_gpes(); 283 acpi_disable_all_gpes();
271 284
272 local_irq_restore(flags); 285 local_irq_restore(flags);
273 printk(KERN_DEBUG "Back to C!\n"); 286 printk(KERN_DEBUG "Back to C!\n");
@@ -394,9 +407,25 @@ void __init acpi_no_s4_hw_signature(void)
394 407
395static int acpi_hibernation_begin(void) 408static int acpi_hibernation_begin(void)
396{ 409{
397 acpi_target_sleep_state = ACPI_STATE_S4; 410 int error;
398 acpi_sleep_tts_switch(acpi_target_sleep_state); 411
399 return 0; 412 error = s4_no_nvs ? 0 : hibernate_nvs_alloc();
413 if (!error) {
414 acpi_target_sleep_state = ACPI_STATE_S4;
415 acpi_sleep_tts_switch(acpi_target_sleep_state);
416 }
417
418 return error;
419}
420
421static int acpi_hibernation_pre_snapshot(void)
422{
423 int error = acpi_pm_prepare();
424
425 if (!error)
426 hibernate_nvs_save();
427
428 return error;
400} 429}
401 430
402static int acpi_hibernation_enter(void) 431static int acpi_hibernation_enter(void)
@@ -417,6 +446,12 @@ static int acpi_hibernation_enter(void)
417 return ACPI_SUCCESS(status) ? 0 : -EFAULT; 446 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
418} 447}
419 448
449static void acpi_hibernation_finish(void)
450{
451 hibernate_nvs_free();
452 acpi_pm_finish();
453}
454
420static void acpi_hibernation_leave(void) 455static void acpi_hibernation_leave(void)
421{ 456{
422 /* 457 /*
@@ -432,18 +467,20 @@ static void acpi_hibernation_leave(void)
432 "cannot resume!\n"); 467 "cannot resume!\n");
433 panic("ACPI S4 hardware signature mismatch"); 468 panic("ACPI S4 hardware signature mismatch");
434 } 469 }
470 /* Restore the NVS memory area */
471 hibernate_nvs_restore();
435} 472}
436 473
437static void acpi_pm_enable_gpes(void) 474static void acpi_pm_enable_gpes(void)
438{ 475{
439 acpi_hw_enable_all_runtime_gpes(); 476 acpi_enable_all_runtime_gpes();
440} 477}
441 478
442static struct platform_hibernation_ops acpi_hibernation_ops = { 479static struct platform_hibernation_ops acpi_hibernation_ops = {
443 .begin = acpi_hibernation_begin, 480 .begin = acpi_hibernation_begin,
444 .end = acpi_pm_end, 481 .end = acpi_pm_end,
445 .pre_snapshot = acpi_pm_prepare, 482 .pre_snapshot = acpi_hibernation_pre_snapshot,
446 .finish = acpi_pm_finish, 483 .finish = acpi_hibernation_finish,
447 .prepare = acpi_pm_prepare, 484 .prepare = acpi_pm_prepare,
448 .enter = acpi_hibernation_enter, 485 .enter = acpi_hibernation_enter,
449 .leave = acpi_hibernation_leave, 486 .leave = acpi_hibernation_leave,
@@ -469,8 +506,22 @@ static int acpi_hibernation_begin_old(void)
469 506
470 error = acpi_sleep_prepare(ACPI_STATE_S4); 507 error = acpi_sleep_prepare(ACPI_STATE_S4);
471 508
509 if (!error) {
510 if (!s4_no_nvs)
511 error = hibernate_nvs_alloc();
512 if (!error)
513 acpi_target_sleep_state = ACPI_STATE_S4;
514 }
515 return error;
516}
517
518static int acpi_hibernation_pre_snapshot_old(void)
519{
520 int error = acpi_pm_disable_gpes();
521
472 if (!error) 522 if (!error)
473 acpi_target_sleep_state = ACPI_STATE_S4; 523 hibernate_nvs_save();
524
474 return error; 525 return error;
475} 526}
476 527
@@ -481,8 +532,8 @@ static int acpi_hibernation_begin_old(void)
481static struct platform_hibernation_ops acpi_hibernation_ops_old = { 532static struct platform_hibernation_ops acpi_hibernation_ops_old = {
482 .begin = acpi_hibernation_begin_old, 533 .begin = acpi_hibernation_begin_old,
483 .end = acpi_pm_end, 534 .end = acpi_pm_end,
484 .pre_snapshot = acpi_pm_disable_gpes, 535 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
485 .finish = acpi_pm_finish, 536 .finish = acpi_hibernation_finish,
486 .prepare = acpi_pm_disable_gpes, 537 .prepare = acpi_pm_disable_gpes,
487 .enter = acpi_hibernation_enter, 538 .enter = acpi_hibernation_enter,
488 .leave = acpi_hibernation_leave, 539 .leave = acpi_hibernation_leave,
@@ -622,7 +673,7 @@ static void acpi_power_off_prepare(void)
622{ 673{
623 /* Prepare to power off the system */ 674 /* Prepare to power off the system */
624 acpi_sleep_prepare(ACPI_STATE_S5); 675 acpi_sleep_prepare(ACPI_STATE_S5);
625 acpi_hw_disable_all_gpes(); 676 acpi_disable_all_gpes();
626} 677}
627 678
628static void acpi_power_off(void) 679static void acpi_power_off(void)
@@ -671,7 +722,7 @@ int __init acpi_sleep_init(void)
671 sleep_states[ACPI_STATE_S4] = 1; 722 sleep_states[ACPI_STATE_S4] = 1;
672 printk(" S4"); 723 printk(" S4");
673 if (!nosigcheck) { 724 if (!nosigcheck) {
674 acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS, 725 acpi_get_table(ACPI_SIG_FACS, 1,
675 (struct acpi_table_header **)&facs); 726 (struct acpi_table_header **)&facs);
676 if (facs) 727 if (facs)
677 s4_hardware_signature = 728 s4_hardware_signature =
diff --git a/drivers/acpi/namespace/Makefile b/drivers/acpi/namespace/Makefile
deleted file mode 100644
index 371a2daf837f..000000000000
--- a/drivers/acpi/namespace/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
1#
2# Makefile for all Linux ACPI interpreter subdirectories
3#
4
5obj-y := nsaccess.o nsload.o nssearch.o nsxfeval.o \
6 nsalloc.o nseval.o nsnames.o nsutils.o nsxfname.o \
7 nsdump.o nsinit.o nsobject.o nswalk.o nsxfobj.o \
8 nsparse.o nspredef.o
9
10obj-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
11
12EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 25ceae9191ef..c5e292aab0e3 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -29,7 +29,6 @@
29#include <linux/errno.h> 29#include <linux/errno.h>
30#include <linux/acpi.h> 30#include <linux/acpi.h>
31#include <acpi/acpi_bus.h> 31#include <acpi/acpi_bus.h>
32#include <acpi/acmacros.h>
33 32
34#define ACPI_NUMA 0x80000000 33#define ACPI_NUMA 0x80000000
35#define _COMPONENT ACPI_NUMA 34#define _COMPONENT ACPI_NUMA
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index c8111424dcb8..6729a4992f2b 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -726,7 +726,7 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
726 726
727 dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); 727 dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
728 if (!dpc) 728 if (!dpc)
729 return_ACPI_STATUS(AE_NO_MEMORY); 729 return AE_NO_MEMORY;
730 730
731 dpc->function = function; 731 dpc->function = function;
732 dpc->context = context; 732 dpc->context = context;
@@ -747,7 +747,7 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
747 status = AE_ERROR; 747 status = AE_ERROR;
748 kfree(dpc); 748 kfree(dpc);
749 } 749 }
750 return_ACPI_STATUS(status); 750 return status;
751} 751}
752 752
753acpi_status acpi_os_execute(acpi_execute_type type, 753acpi_status acpi_os_execute(acpi_execute_type type,
diff --git a/drivers/acpi/parser/Makefile b/drivers/acpi/parser/Makefile
deleted file mode 100644
index db24ee09cf11..000000000000
--- a/drivers/acpi/parser/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
1#
2# Makefile for all Linux ACPI interpreter subdirectories
3#
4
5obj-y := psargs.o psparse.o psloop.o pstree.o pswalk.o \
6 psopcode.o psscope.o psutils.o psxface.o
7
8EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
index 4b252ea0e952..95650f83ce2e 100644
--- a/drivers/acpi/pci_bind.c
+++ b/drivers/acpi/pci_bind.c
@@ -99,7 +99,7 @@ acpi_status acpi_get_pci_id(acpi_handle handle, struct acpi_pci_id *id)
99 */ 99 */
100 100
101 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 101 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
102 "Device %s has PCI address %02x:%02x:%02x.%02x\n", 102 "Device %s has PCI address %04x:%02x:%02x.%d\n",
103 acpi_device_bid(device), id->segment, id->bus, 103 acpi_device_bid(device), id->segment, id->bus,
104 id->device, id->function)); 104 id->device, id->function));
105 105
@@ -111,12 +111,11 @@ EXPORT_SYMBOL(acpi_get_pci_id);
111int acpi_pci_bind(struct acpi_device *device) 111int acpi_pci_bind(struct acpi_device *device)
112{ 112{
113 int result = 0; 113 int result = 0;
114 acpi_status status = AE_OK; 114 acpi_status status;
115 struct acpi_pci_data *data = NULL; 115 struct acpi_pci_data *data;
116 struct acpi_pci_data *pdata = NULL; 116 struct acpi_pci_data *pdata;
117 char *pathname = NULL; 117 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
118 struct acpi_buffer buffer = { 0, NULL }; 118 acpi_handle handle;
119 acpi_handle handle = NULL;
120 struct pci_dev *dev; 119 struct pci_dev *dev;
121 struct pci_bus *bus; 120 struct pci_bus *bus;
122 121
@@ -124,21 +123,18 @@ int acpi_pci_bind(struct acpi_device *device)
124 if (!device || !device->parent) 123 if (!device || !device->parent)
125 return -EINVAL; 124 return -EINVAL;
126 125
127 pathname = kzalloc(ACPI_PATHNAME_MAX, GFP_KERNEL);
128 if (!pathname)
129 return -ENOMEM;
130 buffer.length = ACPI_PATHNAME_MAX;
131 buffer.pointer = pathname;
132
133 data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL); 126 data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL);
134 if (!data) { 127 if (!data)
135 kfree(pathname);
136 return -ENOMEM; 128 return -ENOMEM;
129
130 status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
131 if (ACPI_FAILURE(status)) {
132 kfree(data);
133 return -ENODEV;
137 } 134 }
138 135
139 acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
140 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI device [%s]...\n", 136 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI device [%s]...\n",
141 pathname)); 137 (char *)buffer.pointer));
142 138
143 /* 139 /*
144 * Segment & Bus 140 * Segment & Bus
@@ -166,7 +162,7 @@ int acpi_pci_bind(struct acpi_device *device)
166 data->id.device = device->pnp.bus_address >> 16; 162 data->id.device = device->pnp.bus_address >> 16;
167 data->id.function = device->pnp.bus_address & 0xFFFF; 163 data->id.function = device->pnp.bus_address & 0xFFFF;
168 164
169 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "...to %02x:%02x:%02x.%02x\n", 165 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "...to %04x:%02x:%02x.%d\n",
170 data->id.segment, data->id.bus, data->id.device, 166 data->id.segment, data->id.bus, data->id.device,
171 data->id.function)); 167 data->id.function));
172 168
@@ -196,7 +192,7 @@ int acpi_pci_bind(struct acpi_device *device)
196 } 192 }
197 if (!data->dev) { 193 if (!data->dev) {
198 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 194 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
199 "Device %02x:%02x:%02x.%02x not present in PCI namespace\n", 195 "Device %04x:%02x:%02x.%d not present in PCI namespace\n",
200 data->id.segment, data->id.bus, 196 data->id.segment, data->id.bus,
201 data->id.device, data->id.function)); 197 data->id.device, data->id.function));
202 result = -ENODEV; 198 result = -ENODEV;
@@ -204,7 +200,7 @@ int acpi_pci_bind(struct acpi_device *device)
204 } 200 }
205 if (!data->dev->bus) { 201 if (!data->dev->bus) {
206 printk(KERN_ERR PREFIX 202 printk(KERN_ERR PREFIX
207 "Device %02x:%02x:%02x.%02x has invalid 'bus' field\n", 203 "Device %04x:%02x:%02x.%d has invalid 'bus' field\n",
208 data->id.segment, data->id.bus, 204 data->id.segment, data->id.bus,
209 data->id.device, data->id.function); 205 data->id.device, data->id.function);
210 result = -ENODEV; 206 result = -ENODEV;
@@ -219,7 +215,7 @@ int acpi_pci_bind(struct acpi_device *device)
219 */ 215 */
220 if (data->dev->subordinate) { 216 if (data->dev->subordinate) {
221 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 217 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
222 "Device %02x:%02x:%02x.%02x is a PCI bridge\n", 218 "Device %04x:%02x:%02x.%d is a PCI bridge\n",
223 data->id.segment, data->id.bus, 219 data->id.segment, data->id.bus,
224 data->id.device, data->id.function)); 220 data->id.device, data->id.function));
225 data->bus = data->dev->subordinate; 221 data->bus = data->dev->subordinate;
@@ -262,7 +258,7 @@ int acpi_pci_bind(struct acpi_device *device)
262 } 258 }
263 259
264 end: 260 end:
265 kfree(pathname); 261 kfree(buffer.pointer);
266 if (result) 262 if (result)
267 kfree(data); 263 kfree(data);
268 264
@@ -272,25 +268,21 @@ int acpi_pci_bind(struct acpi_device *device)
272static int acpi_pci_unbind(struct acpi_device *device) 268static int acpi_pci_unbind(struct acpi_device *device)
273{ 269{
274 int result = 0; 270 int result = 0;
275 acpi_status status = AE_OK; 271 acpi_status status;
276 struct acpi_pci_data *data = NULL; 272 struct acpi_pci_data *data;
277 char *pathname = NULL; 273 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
278 struct acpi_buffer buffer = { 0, NULL };
279 274
280 275
281 if (!device || !device->parent) 276 if (!device || !device->parent)
282 return -EINVAL; 277 return -EINVAL;
283 278
284 pathname = kzalloc(ACPI_PATHNAME_MAX, GFP_KERNEL); 279 status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
285 if (!pathname) 280 if (ACPI_FAILURE(status))
286 return -ENOMEM; 281 return -ENODEV;
287 282
288 buffer.length = ACPI_PATHNAME_MAX;
289 buffer.pointer = pathname;
290 acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
291 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unbinding PCI device [%s]...\n", 283 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unbinding PCI device [%s]...\n",
292 pathname)); 284 (char *) buffer.pointer));
293 kfree(pathname); 285 kfree(buffer.pointer);
294 286
295 status = 287 status =
296 acpi_get_data(device->handle, acpi_pci_data_handler, 288 acpi_get_data(device->handle, acpi_pci_data_handler,
@@ -322,50 +314,44 @@ acpi_pci_bind_root(struct acpi_device *device,
322 struct acpi_pci_id *id, struct pci_bus *bus) 314 struct acpi_pci_id *id, struct pci_bus *bus)
323{ 315{
324 int result = 0; 316 int result = 0;
325 acpi_status status = AE_OK; 317 acpi_status status;
326 struct acpi_pci_data *data = NULL; 318 struct acpi_pci_data *data = NULL;
327 char *pathname = NULL; 319 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
328 struct acpi_buffer buffer = { 0, NULL };
329
330 pathname = kzalloc(ACPI_PATHNAME_MAX, GFP_KERNEL);
331 if (!pathname)
332 return -ENOMEM;
333
334 buffer.length = ACPI_PATHNAME_MAX;
335 buffer.pointer = pathname;
336 320
337 if (!device || !id || !bus) { 321 if (!device || !id || !bus) {
338 kfree(pathname);
339 return -EINVAL; 322 return -EINVAL;
340 } 323 }
341 324
342 data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL); 325 data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL);
343 if (!data) { 326 if (!data)
344 kfree(pathname);
345 return -ENOMEM; 327 return -ENOMEM;
346 }
347 328
348 data->id = *id; 329 data->id = *id;
349 data->bus = bus; 330 data->bus = bus;
350 device->ops.bind = acpi_pci_bind; 331 device->ops.bind = acpi_pci_bind;
351 device->ops.unbind = acpi_pci_unbind; 332 device->ops.unbind = acpi_pci_unbind;
352 333
353 acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer); 334 status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
335 if (ACPI_FAILURE(status)) {
336 kfree (data);
337 return -ENODEV;
338 }
354 339
355 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI root bridge [%s] to " 340 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI root bridge [%s] to "
356 "%02x:%02x\n", pathname, id->segment, id->bus)); 341 "%04x:%02x\n", (char *)buffer.pointer,
342 id->segment, id->bus));
357 343
358 status = acpi_attach_data(device->handle, acpi_pci_data_handler, data); 344 status = acpi_attach_data(device->handle, acpi_pci_data_handler, data);
359 if (ACPI_FAILURE(status)) { 345 if (ACPI_FAILURE(status)) {
360 ACPI_EXCEPTION((AE_INFO, status, 346 ACPI_EXCEPTION((AE_INFO, status,
361 "Unable to attach ACPI-PCI context to device %s", 347 "Unable to attach ACPI-PCI context to device %s",
362 pathname)); 348 (char *)buffer.pointer));
363 result = -ENODEV; 349 result = -ENODEV;
364 goto end; 350 goto end;
365 } 351 }
366 352
367 end: 353 end:
368 kfree(pathname); 354 kfree(buffer.pointer);
369 if (result != 0) 355 if (result != 0)
370 kfree(data); 356 kfree(data);
371 357
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index bf79d83bdfbb..891bdf6679f3 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -4,6 +4,8 @@
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 Dominik Brodowski <devel@brodo.de> 6 * Copyright (C) 2002 Dominik Brodowski <devel@brodo.de>
7 * (c) Copyright 2008 Hewlett-Packard Development Company, L.P.
8 * Bjorn Helgaas <bjorn.helgaas@hp.com>
7 * 9 *
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 * 11 *
@@ -41,29 +43,36 @@
41#define _COMPONENT ACPI_PCI_COMPONENT 43#define _COMPONENT ACPI_PCI_COMPONENT
42ACPI_MODULE_NAME("pci_irq"); 44ACPI_MODULE_NAME("pci_irq");
43 45
44static struct acpi_prt_list acpi_prt; 46struct acpi_prt_entry {
47 struct list_head list;
48 struct acpi_pci_id id;
49 u8 pin;
50 acpi_handle link;
51 u32 index; /* GSI, or link _CRS index */
52};
53
54static LIST_HEAD(acpi_prt_list);
45static DEFINE_SPINLOCK(acpi_prt_lock); 55static DEFINE_SPINLOCK(acpi_prt_lock);
46 56
57static inline char pin_name(int pin)
58{
59 return 'A' + pin - 1;
60}
61
47/* -------------------------------------------------------------------------- 62/* --------------------------------------------------------------------------
48 PCI IRQ Routing Table (PRT) Support 63 PCI IRQ Routing Table (PRT) Support
49 -------------------------------------------------------------------------- */ 64 -------------------------------------------------------------------------- */
50 65
51static struct acpi_prt_entry *acpi_pci_irq_find_prt_entry(int segment, 66static struct acpi_prt_entry *acpi_pci_irq_find_prt_entry(struct pci_dev *dev,
52 int bus, 67 int pin)
53 int device, int pin)
54{ 68{
55 struct acpi_prt_entry *entry = NULL; 69 struct acpi_prt_entry *entry;
56 70 int segment = pci_domain_nr(dev->bus);
57 if (!acpi_prt.count) 71 int bus = dev->bus->number;
58 return NULL; 72 int device = PCI_SLOT(dev->devfn);
59 73
60 /*
61 * Parse through all PRT entries looking for a match on the specified
62 * PCI device's segment, bus, device, and pin (don't care about func).
63 *
64 */
65 spin_lock(&acpi_prt_lock); 74 spin_lock(&acpi_prt_lock);
66 list_for_each_entry(entry, &acpi_prt.entries, node) { 75 list_for_each_entry(entry, &acpi_prt_list, list) {
67 if ((segment == entry->id.segment) 76 if ((segment == entry->id.segment)
68 && (bus == entry->id.bus) 77 && (bus == entry->id.bus)
69 && (device == entry->id.device) 78 && (device == entry->id.device)
@@ -72,7 +81,6 @@ static struct acpi_prt_entry *acpi_pci_irq_find_prt_entry(int segment,
72 return entry; 81 return entry;
73 } 82 }
74 } 83 }
75
76 spin_unlock(&acpi_prt_lock); 84 spin_unlock(&acpi_prt_lock);
77 return NULL; 85 return NULL;
78} 86}
@@ -124,25 +132,27 @@ struct prt_quirk {
124 char *actual_source; 132 char *actual_source;
125}; 133};
126 134
135#define PCI_INTX_PIN(c) (c - 'A' + 1)
136
127/* 137/*
128 * These systems have incorrect _PRT entries. The BIOS claims the PCI 138 * These systems have incorrect _PRT entries. The BIOS claims the PCI
129 * interrupt at the listed segment/bus/device/pin is connected to the first 139 * interrupt at the listed segment/bus/device/pin is connected to the first
130 * link device, but it is actually connected to the second. 140 * link device, but it is actually connected to the second.
131 */ 141 */
132static struct prt_quirk prt_quirks[] = { 142static struct prt_quirk prt_quirks[] = {
133 { medion_md9580, 0, 0, 9, 'A', 143 { medion_md9580, 0, 0, 9, PCI_INTX_PIN('A'),
134 "\\_SB_.PCI0.ISA_.LNKA", 144 "\\_SB_.PCI0.ISA_.LNKA",
135 "\\_SB_.PCI0.ISA_.LNKB"}, 145 "\\_SB_.PCI0.ISA_.LNKB"},
136 { dell_optiplex, 0, 0, 0xd, 'A', 146 { dell_optiplex, 0, 0, 0xd, PCI_INTX_PIN('A'),
137 "\\_SB_.LNKB", 147 "\\_SB_.LNKB",
138 "\\_SB_.LNKA"}, 148 "\\_SB_.LNKA"},
139 { hp_t5710, 0, 0, 1, 'A', 149 { hp_t5710, 0, 0, 1, PCI_INTX_PIN('A'),
140 "\\_SB_.PCI0.LNK1", 150 "\\_SB_.PCI0.LNK1",
141 "\\_SB_.PCI0.LNK3"}, 151 "\\_SB_.PCI0.LNK3"},
142}; 152};
143 153
144static void 154static void do_prt_fixups(struct acpi_prt_entry *entry,
145do_prt_fixups(struct acpi_prt_entry *entry, struct acpi_pci_routing_table *prt) 155 struct acpi_pci_routing_table *prt)
146{ 156{
147 int i; 157 int i;
148 struct prt_quirk *quirk; 158 struct prt_quirk *quirk;
@@ -158,42 +168,43 @@ do_prt_fixups(struct acpi_prt_entry *entry, struct acpi_pci_routing_table *prt)
158 entry->id.segment == quirk->segment && 168 entry->id.segment == quirk->segment &&
159 entry->id.bus == quirk->bus && 169 entry->id.bus == quirk->bus &&
160 entry->id.device == quirk->device && 170 entry->id.device == quirk->device &&
161 entry->pin + 'A' == quirk->pin && 171 entry->pin == quirk->pin &&
162 !strcmp(prt->source, quirk->source) && 172 !strcmp(prt->source, quirk->source) &&
163 strlen(prt->source) >= strlen(quirk->actual_source)) { 173 strlen(prt->source) >= strlen(quirk->actual_source)) {
164 printk(KERN_WARNING PREFIX "firmware reports " 174 printk(KERN_WARNING PREFIX "firmware reports "
165 "%04x:%02x:%02x PCI INT %c connected to %s; " 175 "%04x:%02x:%02x PCI INT %c connected to %s; "
166 "changing to %s\n", 176 "changing to %s\n",
167 entry->id.segment, entry->id.bus, 177 entry->id.segment, entry->id.bus,
168 entry->id.device, 'A' + entry->pin, 178 entry->id.device, pin_name(entry->pin),
169 prt->source, quirk->actual_source); 179 prt->source, quirk->actual_source);
170 strcpy(prt->source, quirk->actual_source); 180 strcpy(prt->source, quirk->actual_source);
171 } 181 }
172 } 182 }
173} 183}
174 184
175static int 185static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus,
176acpi_pci_irq_add_entry(acpi_handle handle, 186 struct acpi_pci_routing_table *prt)
177 int segment, int bus, struct acpi_pci_routing_table *prt)
178{ 187{
179 struct acpi_prt_entry *entry = NULL; 188 struct acpi_prt_entry *entry;
180
181
182 if (!prt)
183 return -EINVAL;
184 189
185 entry = kzalloc(sizeof(struct acpi_prt_entry), GFP_KERNEL); 190 entry = kzalloc(sizeof(struct acpi_prt_entry), GFP_KERNEL);
186 if (!entry) 191 if (!entry)
187 return -ENOMEM; 192 return -ENOMEM;
188 193
194 /*
195 * Note that the _PRT uses 0=INTA, 1=INTB, etc, while PCI uses
196 * 1=INTA, 2=INTB. We use the PCI encoding throughout, so convert
197 * it here.
198 */
189 entry->id.segment = segment; 199 entry->id.segment = segment;
190 entry->id.bus = bus; 200 entry->id.bus = bus;
191 entry->id.device = (prt->address >> 16) & 0xFFFF; 201 entry->id.device = (prt->address >> 16) & 0xFFFF;
192 entry->id.function = prt->address & 0xFFFF; 202 entry->pin = prt->pin + 1;
193 entry->pin = prt->pin;
194 203
195 do_prt_fixups(entry, prt); 204 do_prt_fixups(entry, prt);
196 205
206 entry->index = prt->source_index;
207
197 /* 208 /*
198 * Type 1: Dynamic 209 * Type 1: Dynamic
199 * --------------- 210 * ---------------
@@ -207,10 +218,9 @@ acpi_pci_irq_add_entry(acpi_handle handle,
207 * (e.g. exists somewhere 'below' this _PRT entry in the ACPI 218 * (e.g. exists somewhere 'below' this _PRT entry in the ACPI
208 * namespace). 219 * namespace).
209 */ 220 */
210 if (prt->source[0]) { 221 if (prt->source[0])
211 acpi_get_handle(handle, prt->source, &entry->link.handle); 222 acpi_get_handle(handle, prt->source, &entry->link);
212 entry->link.index = prt->source_index; 223
213 }
214 /* 224 /*
215 * Type 2: Static 225 * Type 2: Static
216 * -------------- 226 * --------------
@@ -218,84 +228,38 @@ acpi_pci_irq_add_entry(acpi_handle handle,
218 * the IRQ value, which is hardwired to specific interrupt inputs on 228 * the IRQ value, which is hardwired to specific interrupt inputs on
219 * the interrupt controller. 229 * the interrupt controller.
220 */ 230 */
221 else
222 entry->link.index = prt->source_index;
223 231
224 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO, 232 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO,
225 " %02X:%02X:%02X[%c] -> %s[%d]\n", 233 " %04x:%02x:%02x[%c] -> %s[%d]\n",
226 entry->id.segment, entry->id.bus, 234 entry->id.segment, entry->id.bus,
227 entry->id.device, ('A' + entry->pin), prt->source, 235 entry->id.device, pin_name(entry->pin),
228 entry->link.index)); 236 prt->source, entry->index));
229 237
230 spin_lock(&acpi_prt_lock); 238 spin_lock(&acpi_prt_lock);
231 list_add_tail(&entry->node, &acpi_prt.entries); 239 list_add_tail(&entry->list, &acpi_prt_list);
232 acpi_prt.count++;
233 spin_unlock(&acpi_prt_lock); 240 spin_unlock(&acpi_prt_lock);
234 241
235 return 0; 242 return 0;
236} 243}
237 244
238static void
239acpi_pci_irq_del_entry(int segment, int bus, struct acpi_prt_entry *entry)
240{
241 if (segment == entry->id.segment && bus == entry->id.bus) {
242 acpi_prt.count--;
243 list_del(&entry->node);
244 kfree(entry);
245 }
246}
247
248int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus) 245int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus)
249{ 246{
250 acpi_status status = AE_OK; 247 acpi_status status;
251 char *pathname = NULL; 248 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
252 struct acpi_buffer buffer = { 0, NULL }; 249 struct acpi_pci_routing_table *entry;
253 struct acpi_pci_routing_table *prt = NULL;
254 struct acpi_pci_routing_table *entry = NULL;
255 static int first_time = 1;
256
257
258 pathname = kzalloc(ACPI_PATHNAME_MAX, GFP_KERNEL);
259 if (!pathname)
260 return -ENOMEM;
261
262 if (first_time) {
263 acpi_prt.count = 0;
264 INIT_LIST_HEAD(&acpi_prt.entries);
265 first_time = 0;
266 }
267
268 /*
269 * NOTE: We're given a 'handle' to the _PRT object's parent device
270 * (either a PCI root bridge or PCI-PCI bridge).
271 */
272 250
273 buffer.length = ACPI_PATHNAME_MAX; 251 /* 'handle' is the _PRT's parent (root bridge or PCI-PCI bridge) */
274 buffer.pointer = pathname; 252 status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
275 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); 253 if (ACPI_FAILURE(status))
254 return -ENODEV;
276 255
277 printk(KERN_DEBUG "ACPI: PCI Interrupt Routing Table [%s._PRT]\n", 256 printk(KERN_DEBUG "ACPI: PCI Interrupt Routing Table [%s._PRT]\n",
278 pathname); 257 (char *) buffer.pointer);
279 258
280 /* 259 kfree(buffer.pointer);
281 * Evaluate this _PRT and add its entries to our global list (acpi_prt).
282 */
283 260
284 buffer.length = 0; 261 buffer.length = ACPI_ALLOCATE_BUFFER;
285 buffer.pointer = NULL; 262 buffer.pointer = NULL;
286 kfree(pathname);
287 status = acpi_get_irq_routing_table(handle, &buffer);
288 if (status != AE_BUFFER_OVERFLOW) {
289 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRT [%s]",
290 acpi_format_exception(status)));
291 return -ENODEV;
292 }
293
294 prt = kzalloc(buffer.length, GFP_KERNEL);
295 if (!prt) {
296 return -ENOMEM;
297 }
298 buffer.pointer = prt;
299 263
300 status = acpi_get_irq_routing_table(handle, &buffer); 264 status = acpi_get_irq_routing_table(handle, &buffer);
301 if (ACPI_FAILURE(status)) { 265 if (ACPI_FAILURE(status)) {
@@ -305,36 +269,30 @@ int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus)
305 return -ENODEV; 269 return -ENODEV;
306 } 270 }
307 271
308 entry = prt; 272 entry = buffer.pointer;
309
310 while (entry && (entry->length > 0)) { 273 while (entry && (entry->length > 0)) {
311 acpi_pci_irq_add_entry(handle, segment, bus, entry); 274 acpi_pci_irq_add_entry(handle, segment, bus, entry);
312 entry = (struct acpi_pci_routing_table *) 275 entry = (struct acpi_pci_routing_table *)
313 ((unsigned long)entry + entry->length); 276 ((unsigned long)entry + entry->length);
314 } 277 }
315 278
316 kfree(prt); 279 kfree(buffer.pointer);
317
318 return 0; 280 return 0;
319} 281}
320 282
321void acpi_pci_irq_del_prt(int segment, int bus) 283void acpi_pci_irq_del_prt(int segment, int bus)
322{ 284{
323 struct list_head *node = NULL, *n = NULL; 285 struct acpi_prt_entry *entry, *tmp;
324 struct acpi_prt_entry *entry = NULL;
325
326 if (!acpi_prt.count) {
327 return;
328 }
329 286
330 printk(KERN_DEBUG 287 printk(KERN_DEBUG
331 "ACPI: Delete PCI Interrupt Routing Table for %x:%x\n", segment, 288 "ACPI: Delete PCI Interrupt Routing Table for %04x:%02x\n",
332 bus); 289 segment, bus);
333 spin_lock(&acpi_prt_lock); 290 spin_lock(&acpi_prt_lock);
334 list_for_each_safe(node, n, &acpi_prt.entries) { 291 list_for_each_entry_safe(entry, tmp, &acpi_prt_list, list) {
335 entry = list_entry(node, struct acpi_prt_entry, node); 292 if (segment == entry->id.segment && bus == entry->id.bus) {
336 293 list_del(&entry->list);
337 acpi_pci_irq_del_entry(segment, bus, entry); 294 kfree(entry);
295 }
338 } 296 }
339 spin_unlock(&acpi_prt_lock); 297 spin_unlock(&acpi_prt_lock);
340} 298}
@@ -342,162 +300,26 @@ void acpi_pci_irq_del_prt(int segment, int bus)
342/* -------------------------------------------------------------------------- 300/* --------------------------------------------------------------------------
343 PCI Interrupt Routing Support 301 PCI Interrupt Routing Support
344 -------------------------------------------------------------------------- */ 302 -------------------------------------------------------------------------- */
345typedef int (*irq_lookup_func) (struct acpi_prt_entry *, int *, int *, char **); 303static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
346
347static int
348acpi_pci_allocate_irq(struct acpi_prt_entry *entry,
349 int *triggering, int *polarity, char **link)
350{
351 int irq;
352
353
354 if (entry->link.handle) {
355 irq = acpi_pci_link_allocate_irq(entry->link.handle,
356 entry->link.index, triggering,
357 polarity, link);
358 if (irq < 0) {
359 printk(KERN_WARNING PREFIX
360 "Invalid IRQ link routing entry\n");
361 return -1;
362 }
363 } else {
364 irq = entry->link.index;
365 *triggering = ACPI_LEVEL_SENSITIVE;
366 *polarity = ACPI_ACTIVE_LOW;
367 }
368
369 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found IRQ %d\n", irq));
370 return irq;
371}
372
373static int
374acpi_pci_free_irq(struct acpi_prt_entry *entry,
375 int *triggering, int *polarity, char **link)
376{
377 int irq;
378
379 if (entry->link.handle) {
380 irq = acpi_pci_link_free_irq(entry->link.handle);
381 } else {
382 irq = entry->link.index;
383 }
384 return irq;
385}
386
387#ifdef CONFIG_X86_IO_APIC
388extern int noioapicquirk;
389
390static int bridge_has_boot_interrupt_variant(struct pci_bus *bus)
391{ 304{
392 struct pci_bus *bus_it; 305 struct acpi_prt_entry *entry;
393 306 struct pci_dev *bridge;
394 for (bus_it = bus ; bus_it ; bus_it = bus_it->parent) { 307 u8 bridge_pin, orig_pin = pin;
395 if (!bus_it->self) 308
396 return 0; 309 entry = acpi_pci_irq_find_prt_entry(dev, pin);
397 310 if (entry) {
398 printk(KERN_INFO "vendor=%04x device=%04x\n", bus_it->self->vendor, 311 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %s[%c] _PRT entry\n",
399 bus_it->self->device); 312 pci_name(dev), pin_name(pin)));
400 313 return entry;
401 if (bus_it->self->irq_reroute_variant)
402 return bus_it->self->irq_reroute_variant;
403 }
404 return 0;
405}
406#endif /* CONFIG_X86_IO_APIC */
407
408/*
409 * acpi_pci_irq_lookup
410 * success: return IRQ >= 0
411 * failure: return -1
412 */
413static int
414acpi_pci_irq_lookup(struct pci_bus *bus,
415 int device,
416 int pin,
417 int *triggering,
418 int *polarity, char **link, irq_lookup_func func)
419{
420 struct acpi_prt_entry *entry = NULL;
421 int segment = pci_domain_nr(bus);
422 int bus_nr = bus->number;
423 int ret;
424
425
426 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
427 "Searching for PRT entry for %02x:%02x:%02x[%c]\n",
428 segment, bus_nr, device, ('A' + pin)));
429
430 entry = acpi_pci_irq_find_prt_entry(segment, bus_nr, device, pin);
431 if (!entry) {
432 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PRT entry not found\n"));
433 return -1;
434 }
435
436 ret = func(entry, triggering, polarity, link);
437
438#ifdef CONFIG_X86_IO_APIC
439 /*
440 * Some chipsets (e.g. intel 6700PXH) generate a legacy INTx when the
441 * IRQ entry in the chipset's IO-APIC is masked (as, e.g. the RT kernel
442 * does during interrupt handling). When this INTx generation cannot be
443 * disabled, we reroute these interrupts to their legacy equivalent to
444 * get rid of spurious interrupts.
445 */
446 if (!noioapicquirk) {
447 switch (bridge_has_boot_interrupt_variant(bus)) {
448 case 0:
449 /* no rerouting necessary */
450 break;
451
452 case INTEL_IRQ_REROUTE_VARIANT:
453 /*
454 * Remap according to INTx routing table in 6700PXH
455 * specs, intel order number 302628-002, section
456 * 2.15.2. Other chipsets (80332, ...) have the same
457 * mapping and are handled here as well.
458 */
459 printk(KERN_INFO "pci irq %d -> rerouted to legacy "
460 "irq %d\n", ret, (ret % 4) + 16);
461 ret = (ret % 4) + 16;
462 break;
463
464 default:
465 printk(KERN_INFO "not rerouting irq %d to legacy irq: "
466 "unknown mapping\n", ret);
467 break;
468 }
469 } 314 }
470#endif /* CONFIG_X86_IO_APIC */
471
472 return ret;
473}
474
475/*
476 * acpi_pci_irq_derive
477 * success: return IRQ >= 0
478 * failure: return < 0
479 */
480static int
481acpi_pci_irq_derive(struct pci_dev *dev,
482 int pin,
483 int *triggering,
484 int *polarity, char **link, irq_lookup_func func)
485{
486 struct pci_dev *bridge = dev;
487 int irq = -1;
488 u8 bridge_pin = 0, orig_pin = pin;
489
490
491 if (!dev)
492 return -EINVAL;
493 315
494 /* 316 /*
495 * Attempt to derive an IRQ for this device from a parent bridge's 317 * Attempt to derive an IRQ for this device from a parent bridge's
496 * PCI interrupt routing entry (eg. yenta bridge and add-in card bridge). 318 * PCI interrupt routing entry (eg. yenta bridge and add-in card bridge).
497 */ 319 */
498 while (irq < 0 && bridge->bus->self) { 320 bridge = dev->bus->self;
499 pin = (pin + PCI_SLOT(bridge->devfn)) % 4; 321 while (bridge) {
500 bridge = bridge->bus->self; 322 pin = (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1;
501 323
502 if ((bridge->class >> 8) == PCI_CLASS_BRIDGE_CARDBUS) { 324 if ((bridge->class >> 8) == PCI_CLASS_BRIDGE_CARDBUS) {
503 /* PC card has the same IRQ as its cardbridge */ 325 /* PC card has the same IRQ as its cardbridge */
@@ -506,50 +328,40 @@ acpi_pci_irq_derive(struct pci_dev *dev,
506 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 328 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
507 "No interrupt pin configured for device %s\n", 329 "No interrupt pin configured for device %s\n",
508 pci_name(bridge))); 330 pci_name(bridge)));
509 return -1; 331 return NULL;
510 } 332 }
511 /* Pin is from 0 to 3 */
512 bridge_pin--;
513 pin = bridge_pin; 333 pin = bridge_pin;
514 } 334 }
515 335
516 irq = acpi_pci_irq_lookup(bridge->bus, PCI_SLOT(bridge->devfn), 336 entry = acpi_pci_irq_find_prt_entry(bridge, pin);
517 pin, triggering, polarity, 337 if (entry) {
518 link, func); 338 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
519 } 339 "Derived GSI for %s INT %c from %s\n",
340 pci_name(dev), pin_name(orig_pin),
341 pci_name(bridge)));
342 return entry;
343 }
520 344
521 if (irq < 0) { 345 dev = bridge;
522 dev_warn(&dev->dev, "can't derive routing for PCI INT %c\n", 346 bridge = dev->bus->self;
523 'A' + orig_pin);
524 return -1;
525 } 347 }
526 348
527 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Derive IRQ %d for device %s from %s\n", 349 dev_warn(&dev->dev, "can't derive routing for PCI INT %c\n",
528 irq, pci_name(dev), pci_name(bridge))); 350 pin_name(orig_pin));
529 351 return NULL;
530 return irq;
531} 352}
532 353
533/*
534 * acpi_pci_irq_enable
535 * success: return 0
536 * failure: return < 0
537 */
538
539int acpi_pci_irq_enable(struct pci_dev *dev) 354int acpi_pci_irq_enable(struct pci_dev *dev)
540{ 355{
541 int irq = 0; 356 struct acpi_prt_entry *entry;
542 u8 pin = 0; 357 int gsi;
358 u8 pin;
543 int triggering = ACPI_LEVEL_SENSITIVE; 359 int triggering = ACPI_LEVEL_SENSITIVE;
544 int polarity = ACPI_ACTIVE_LOW; 360 int polarity = ACPI_ACTIVE_LOW;
545 char *link = NULL; 361 char *link = NULL;
546 char link_desc[16]; 362 char link_desc[16];
547 int rc; 363 int rc;
548 364
549
550 if (!dev)
551 return -EINVAL;
552
553 pin = dev->pin; 365 pin = dev->pin;
554 if (!pin) { 366 if (!pin) {
555 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 367 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -557,31 +369,9 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
557 pci_name(dev))); 369 pci_name(dev)));
558 return 0; 370 return 0;
559 } 371 }
560 pin--;
561
562 if (!dev->bus) {
563 dev_err(&dev->dev, "invalid (NULL) 'bus' field\n");
564 return -ENODEV;
565 }
566
567 /*
568 * First we check the PCI IRQ routing table (PRT) for an IRQ. PRT
569 * values override any BIOS-assigned IRQs set during boot.
570 */
571 irq = acpi_pci_irq_lookup(dev->bus, PCI_SLOT(dev->devfn), pin,
572 &triggering, &polarity, &link,
573 acpi_pci_allocate_irq);
574
575 /*
576 * If no PRT entry was found, we'll try to derive an IRQ from the
577 * device's parent bridge.
578 */
579 if (irq < 0)
580 irq = acpi_pci_irq_derive(dev, pin, &triggering,
581 &polarity, &link,
582 acpi_pci_allocate_irq);
583 372
584 if (irq < 0) { 373 entry = acpi_pci_irq_lookup(dev, pin);
374 if (!entry) {
585 /* 375 /*
586 * IDE legacy mode controller IRQs are magic. Why do compat 376 * IDE legacy mode controller IRQs are magic. Why do compat
587 * extensions always make such a nasty mess. 377 * extensions always make such a nasty mess.
@@ -590,12 +380,24 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
590 (dev->class & 0x05) == 0) 380 (dev->class & 0x05) == 0)
591 return 0; 381 return 0;
592 } 382 }
383
384 if (entry) {
385 if (entry->link)
386 gsi = acpi_pci_link_allocate_irq(entry->link,
387 entry->index,
388 &triggering, &polarity,
389 &link);
390 else
391 gsi = entry->index;
392 } else
393 gsi = -1;
394
593 /* 395 /*
594 * No IRQ known to the ACPI subsystem - maybe the BIOS / 396 * No IRQ known to the ACPI subsystem - maybe the BIOS /
595 * driver reported one, then use it. Exit in any case. 397 * driver reported one, then use it. Exit in any case.
596 */ 398 */
597 if (irq < 0) { 399 if (gsi < 0) {
598 dev_warn(&dev->dev, "PCI INT %c: no GSI", 'A' + pin); 400 dev_warn(&dev->dev, "PCI INT %c: no GSI", pin_name(pin));
599 /* Interrupt Line values above 0xF are forbidden */ 401 /* Interrupt Line values above 0xF are forbidden */
600 if (dev->irq > 0 && (dev->irq <= 0xF)) { 402 if (dev->irq > 0 && (dev->irq <= 0xF)) {
601 printk(" - using IRQ %d\n", dev->irq); 403 printk(" - using IRQ %d\n", dev->irq);
@@ -608,10 +410,10 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
608 } 410 }
609 } 411 }
610 412
611 rc = acpi_register_gsi(irq, triggering, polarity); 413 rc = acpi_register_gsi(gsi, triggering, polarity);
612 if (rc < 0) { 414 if (rc < 0) {
613 dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n", 415 dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n",
614 'A' + pin); 416 pin_name(pin));
615 return rc; 417 return rc;
616 } 418 }
617 dev->irq = rc; 419 dev->irq = rc;
@@ -622,7 +424,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
622 link_desc[0] = '\0'; 424 link_desc[0] = '\0';
623 425
624 dev_info(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n", 426 dev_info(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n",
625 'A' + pin, link_desc, irq, 427 pin_name(pin), link_desc, gsi,
626 (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge", 428 (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge",
627 (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq); 429 (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq);
628 430
@@ -636,42 +438,28 @@ void __attribute__ ((weak)) acpi_unregister_gsi(u32 i)
636 438
637void acpi_pci_irq_disable(struct pci_dev *dev) 439void acpi_pci_irq_disable(struct pci_dev *dev)
638{ 440{
639 int gsi = 0; 441 struct acpi_prt_entry *entry;
640 u8 pin = 0; 442 int gsi;
641 int triggering = ACPI_LEVEL_SENSITIVE; 443 u8 pin;
642 int polarity = ACPI_ACTIVE_LOW;
643
644
645 if (!dev || !dev->bus)
646 return;
647 444
648 pin = dev->pin; 445 pin = dev->pin;
649 if (!pin) 446 if (!pin)
650 return; 447 return;
651 pin--;
652 448
653 /* 449 entry = acpi_pci_irq_lookup(dev, pin);
654 * First we check the PCI IRQ routing table (PRT) for an IRQ. 450 if (!entry)
655 */
656 gsi = acpi_pci_irq_lookup(dev->bus, PCI_SLOT(dev->devfn), pin,
657 &triggering, &polarity, NULL,
658 acpi_pci_free_irq);
659 /*
660 * If no PRT entry was found, we'll try to derive an IRQ from the
661 * device's parent bridge.
662 */
663 if (gsi < 0)
664 gsi = acpi_pci_irq_derive(dev, pin,
665 &triggering, &polarity, NULL,
666 acpi_pci_free_irq);
667 if (gsi < 0)
668 return; 451 return;
669 452
453 if (entry->link)
454 gsi = acpi_pci_link_free_irq(entry->link);
455 else
456 gsi = entry->index;
457
670 /* 458 /*
671 * TBD: It might be worth clearing dev->irq by magic constant 459 * TBD: It might be worth clearing dev->irq by magic constant
672 * (e.g. PCI_UNDEFINED_IRQ). 460 * (e.g. PCI_UNDEFINED_IRQ).
673 */ 461 */
674 462
675 dev_info(&dev->dev, "PCI INT %c disabled\n", 'A' + pin); 463 dev_info(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
676 acpi_unregister_gsi(gsi); 464 acpi_unregister_gsi(gsi);
677} 465}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index e52ad91ce2dc..1c6e73c7865e 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -796,10 +796,6 @@ static int irqrouter_resume(struct sys_device *dev)
796 struct list_head *node = NULL; 796 struct list_head *node = NULL;
797 struct acpi_pci_link *link = NULL; 797 struct acpi_pci_link *link = NULL;
798 798
799
800 /* Make sure SCI is enabled again (Apple firmware bug?) */
801 acpi_set_register(ACPI_BITREG_SCI_ENABLE, 1);
802
803 list_for_each(node, &acpi_link.entries) { 799 list_for_each(node, &acpi_link.entries) {
804 link = list_entry(node, struct acpi_pci_link, node); 800 link = list_entry(node, struct acpi_pci_link, node);
805 if (!link) { 801 if (!link) {
@@ -912,7 +908,7 @@ static int __init acpi_irq_nobalance_set(char *str)
912 908
913__setup("acpi_irq_nobalance", acpi_irq_nobalance_set); 909__setup("acpi_irq_nobalance", acpi_irq_nobalance_set);
914 910
915int __init acpi_irq_balance_set(char *str) 911static int __init acpi_irq_balance_set(char *str)
916{ 912{
917 acpi_irq_balance = 1; 913 acpi_irq_balance = 1;
918 return 1; 914 return 1;
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index bb7d50dd2818..c926e7d4a0d6 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -139,6 +139,8 @@ static int acpi_power_get_state(acpi_handle handle, int *state)
139{ 139{
140 acpi_status status = AE_OK; 140 acpi_status status = AE_OK;
141 unsigned long long sta = 0; 141 unsigned long long sta = 0;
142 char node_name[5];
143 struct acpi_buffer buffer = { sizeof(node_name), node_name };
142 144
143 145
144 if (!handle || !state) 146 if (!handle || !state)
@@ -151,8 +153,10 @@ static int acpi_power_get_state(acpi_handle handle, int *state)
151 *state = (sta & 0x01)?ACPI_POWER_RESOURCE_STATE_ON: 153 *state = (sta & 0x01)?ACPI_POWER_RESOURCE_STATE_ON:
152 ACPI_POWER_RESOURCE_STATE_OFF; 154 ACPI_POWER_RESOURCE_STATE_OFF;
153 155
156 acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
157
154 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] is %s\n", 158 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] is %s\n",
155 acpi_ut_get_node_name(handle), 159 node_name,
156 *state ? "on" : "off")); 160 *state ? "on" : "off"));
157 161
158 return 0; 162 return 0;
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/proc.c
index 4dbc2271acf5..428c911dba08 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/proc.c
@@ -28,8 +28,6 @@ static int acpi_system_sleep_seq_show(struct seq_file *seq, void *offset)
28{ 28{
29 int i; 29 int i;
30 30
31 ACPI_FUNCTION_TRACE("acpi_system_sleep_seq_show");
32
33 for (i = 0; i <= ACPI_STATE_S5; i++) { 31 for (i = 0; i <= ACPI_STATE_S5; i++) {
34 if (sleep_states[i]) { 32 if (sleep_states[i]) {
35 seq_printf(seq, "S%d ", i); 33 seq_printf(seq, "S%d ", i);
@@ -86,49 +84,44 @@ acpi_system_write_sleep(struct file *file,
86 84
87#ifdef HAVE_ACPI_LEGACY_ALARM 85#ifdef HAVE_ACPI_LEGACY_ALARM
88 86
87static u32 cmos_bcd_read(int offset, int rtc_control);
88
89static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset) 89static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
90{ 90{
91 u32 sec, min, hr; 91 u32 sec, min, hr;
92 u32 day, mo, yr, cent = 0; 92 u32 day, mo, yr, cent = 0;
93 u32 today = 0;
93 unsigned char rtc_control = 0; 94 unsigned char rtc_control = 0;
94 unsigned long flags; 95 unsigned long flags;
95 96
96 ACPI_FUNCTION_TRACE("acpi_system_alarm_seq_show");
97
98 spin_lock_irqsave(&rtc_lock, flags); 97 spin_lock_irqsave(&rtc_lock, flags);
99 98
100 sec = CMOS_READ(RTC_SECONDS_ALARM);
101 min = CMOS_READ(RTC_MINUTES_ALARM);
102 hr = CMOS_READ(RTC_HOURS_ALARM);
103 rtc_control = CMOS_READ(RTC_CONTROL); 99 rtc_control = CMOS_READ(RTC_CONTROL);
100 sec = cmos_bcd_read(RTC_SECONDS_ALARM, rtc_control);
101 min = cmos_bcd_read(RTC_MINUTES_ALARM, rtc_control);
102 hr = cmos_bcd_read(RTC_HOURS_ALARM, rtc_control);
104 103
105 /* If we ever get an FACP with proper values... */ 104 /* If we ever get an FACP with proper values... */
106 if (acpi_gbl_FADT.day_alarm) 105 if (acpi_gbl_FADT.day_alarm) {
107 /* ACPI spec: only low 6 its should be cared */ 106 /* ACPI spec: only low 6 its should be cared */
108 day = CMOS_READ(acpi_gbl_FADT.day_alarm) & 0x3F; 107 day = CMOS_READ(acpi_gbl_FADT.day_alarm) & 0x3F;
109 else 108 if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
110 day = CMOS_READ(RTC_DAY_OF_MONTH); 109 day = bcd2bin(day);
110 } else
111 day = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
111 if (acpi_gbl_FADT.month_alarm) 112 if (acpi_gbl_FADT.month_alarm)
112 mo = CMOS_READ(acpi_gbl_FADT.month_alarm); 113 mo = cmos_bcd_read(acpi_gbl_FADT.month_alarm, rtc_control);
113 else 114 else {
114 mo = CMOS_READ(RTC_MONTH); 115 mo = cmos_bcd_read(RTC_MONTH, rtc_control);
116 today = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
117 }
115 if (acpi_gbl_FADT.century) 118 if (acpi_gbl_FADT.century)
116 cent = CMOS_READ(acpi_gbl_FADT.century); 119 cent = cmos_bcd_read(acpi_gbl_FADT.century, rtc_control);
117 120
118 yr = CMOS_READ(RTC_YEAR); 121 yr = cmos_bcd_read(RTC_YEAR, rtc_control);
119 122
120 spin_unlock_irqrestore(&rtc_lock, flags); 123 spin_unlock_irqrestore(&rtc_lock, flags);
121 124
122 if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
123 sec = bcd2bin(sec);
124 min = bcd2bin(min);
125 hr = bcd2bin(hr);
126 day = bcd2bin(day);
127 mo = bcd2bin(mo);
128 yr = bcd2bin(yr);
129 cent = bcd2bin(cent);
130 }
131
132 /* we're trusting the FADT (see above) */ 125 /* we're trusting the FADT (see above) */
133 if (!acpi_gbl_FADT.century) 126 if (!acpi_gbl_FADT.century)
134 /* If we're not trusting the FADT, we should at least make it 127 /* If we're not trusting the FADT, we should at least make it
@@ -153,6 +146,20 @@ static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
153 else 146 else
154 yr += cent * 100; 147 yr += cent * 100;
155 148
149 /*
150 * Show correct dates for alarms up to a month into the future.
151 * This solves issues for nearly all situations with the common
152 * 30-day alarm clocks in PC hardware.
153 */
154 if (day < today) {
155 if (mo < 12) {
156 mo += 1;
157 } else {
158 mo = 1;
159 yr += 1;
160 }
161 }
162
156 seq_printf(seq, "%4.4u-", yr); 163 seq_printf(seq, "%4.4u-", yr);
157 (mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo); 164 (mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo);
158 (day > 31) ? seq_puts(seq, "** ") : seq_printf(seq, "%2.2u ", day); 165 (day > 31) ? seq_puts(seq, "** ") : seq_printf(seq, "%2.2u ", day);
@@ -227,13 +234,11 @@ acpi_system_write_alarm(struct file *file,
227 int adjust = 0; 234 int adjust = 0;
228 unsigned char rtc_control = 0; 235 unsigned char rtc_control = 0;
229 236
230 ACPI_FUNCTION_TRACE("acpi_system_write_alarm");
231
232 if (count > sizeof(alarm_string) - 1) 237 if (count > sizeof(alarm_string) - 1)
233 return_VALUE(-EINVAL); 238 return -EINVAL;
234 239
235 if (copy_from_user(alarm_string, buffer, count)) 240 if (copy_from_user(alarm_string, buffer, count))
236 return_VALUE(-EFAULT); 241 return -EFAULT;
237 242
238 alarm_string[count] = '\0'; 243 alarm_string[count] = '\0';
239 244
@@ -334,7 +339,7 @@ acpi_system_write_alarm(struct file *file,
334 339
335 result = 0; 340 result = 0;
336 end: 341 end:
337 return_VALUE(result ? result : count); 342 return result ? result : count;
338} 343}
339#endif /* HAVE_ACPI_LEGACY_ALARM */ 344#endif /* HAVE_ACPI_LEGACY_ALARM */
340 345
diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c
index a6b662c00b67..93f91142d7ad 100644
--- a/drivers/acpi/reboot.c
+++ b/drivers/acpi/reboot.c
@@ -42,7 +42,7 @@ void acpi_reboot(void)
42 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 42 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
43 case ACPI_ADR_SPACE_SYSTEM_IO: 43 case ACPI_ADR_SPACE_SYSTEM_IO:
44 printk(KERN_DEBUG "ACPI MEMORY or I/O RESET_REG.\n"); 44 printk(KERN_DEBUG "ACPI MEMORY or I/O RESET_REG.\n");
45 acpi_hw_low_level_write(8, reset_value, rr); 45 acpi_reset();
46 break; 46 break;
47 } 47 }
48 /* Wait ten seconds */ 48 /* Wait ten seconds */
diff --git a/drivers/acpi/resources/Makefile b/drivers/acpi/resources/Makefile
deleted file mode 100644
index 8de4f69dfa09..000000000000
--- a/drivers/acpi/resources/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
1#
2# Makefile for all Linux ACPI interpreter subdirectories
3#
4
5obj-y := rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \
6 rscalc.o rsirq.o rsmemory.o rsutils.o
7
8obj-$(ACPI_FUTURE_USAGE) += rsdump.o
9
10EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index e53e590252c0..0619734895b2 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -10,7 +10,6 @@
10 10
11#include <acpi/acpi_bus.h> 11#include <acpi/acpi_bus.h>
12#include <acpi/acpi_drivers.h> 12#include <acpi/acpi_drivers.h>
13#include <acpi/actypes.h>
14#include <linux/wait.h> 13#include <linux/wait.h>
15#include <linux/delay.h> 14#include <linux/delay.h>
16#include <linux/interrupt.h> 15#include <linux/interrupt.h>
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 39b7233c3485..c54d7b6c4066 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -10,7 +10,6 @@
10#include <linux/kthread.h> 10#include <linux/kthread.h>
11 11
12#include <acpi/acpi_drivers.h> 12#include <acpi/acpi_drivers.h>
13#include <acpi/acinterp.h> /* for acpi_ex_eisa_id_to_string() */
14 13
15#define _COMPONENT ACPI_BUS_COMPONENT 14#define _COMPONENT ACPI_BUS_COMPONENT
16ACPI_MODULE_NAME("scan"); 15ACPI_MODULE_NAME("scan");
diff --git a/drivers/acpi/sleep/sleep.h b/drivers/acpi/sleep.h
index cfaf8f5b0a14..cfaf8f5b0a14 100644
--- a/drivers/acpi/sleep/sleep.h
+++ b/drivers/acpi/sleep.h
diff --git a/drivers/acpi/sleep/Makefile b/drivers/acpi/sleep/Makefile
deleted file mode 100644
index f1fb888c2d29..000000000000
--- a/drivers/acpi/sleep/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1obj-y := wakeup.o
2obj-y += main.o
3obj-$(CONFIG_ACPI_SLEEP) += proc.o
4
5EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 6e4107f82403..391d0358a592 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -192,65 +192,6 @@ static struct attribute_group interrupt_stats_attr_group = {
192}; 192};
193static struct kobj_attribute *counter_attrs; 193static struct kobj_attribute *counter_attrs;
194 194
195static int count_num_gpes(void)
196{
197 int count = 0;
198 struct acpi_gpe_xrupt_info *gpe_xrupt_info;
199 struct acpi_gpe_block_info *gpe_block;
200 acpi_cpu_flags flags;
201
202 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
203
204 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
205 while (gpe_xrupt_info) {
206 gpe_block = gpe_xrupt_info->gpe_block_list_head;
207 while (gpe_block) {
208 count += gpe_block->register_count *
209 ACPI_GPE_REGISTER_WIDTH;
210 gpe_block = gpe_block->next;
211 }
212 gpe_xrupt_info = gpe_xrupt_info->next;
213 }
214 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
215
216 return count;
217}
218
219static int get_gpe_device(int index, acpi_handle *handle)
220{
221 struct acpi_gpe_xrupt_info *gpe_xrupt_info;
222 struct acpi_gpe_block_info *gpe_block;
223 acpi_cpu_flags flags;
224 struct acpi_namespace_node *node;
225
226 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
227
228 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
229 while (gpe_xrupt_info) {
230 gpe_block = gpe_xrupt_info->gpe_block_list_head;
231 node = gpe_block->node;
232 while (gpe_block) {
233 index -= gpe_block->register_count *
234 ACPI_GPE_REGISTER_WIDTH;
235 if (index < 0) {
236 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
237 /* return NULL if it's FADT GPE */
238 if (node->type != ACPI_TYPE_DEVICE)
239 *handle = NULL;
240 else
241 *handle = node;
242 return 0;
243 }
244 node = gpe_block->node;
245 gpe_block = gpe_block->next;
246 }
247 gpe_xrupt_info = gpe_xrupt_info->next;
248 }
249 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
250
251 return -ENODEV;
252}
253
254static void delete_gpe_attr_array(void) 195static void delete_gpe_attr_array(void)
255{ 196{
256 struct event_counter *tmp = all_counters; 197 struct event_counter *tmp = all_counters;
@@ -309,7 +250,7 @@ static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle)
309 goto end; 250 goto end;
310 251
311 if (index < num_gpes) { 252 if (index < num_gpes) {
312 result = get_gpe_device(index, handle); 253 result = acpi_get_gpe_device(index, handle);
313 if (result) { 254 if (result) {
314 ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND, 255 ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
315 "Invalid GPE 0x%x\n", index)); 256 "Invalid GPE 0x%x\n", index));
@@ -436,7 +377,7 @@ void acpi_irq_stats_init(void)
436 if (all_counters) 377 if (all_counters)
437 return; 378 return;
438 379
439 num_gpes = count_num_gpes(); 380 num_gpes = acpi_current_gpe_count;
440 num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA; 381 num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
441 382
442 all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1), 383 all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1),
diff --git a/drivers/acpi/tables/Makefile b/drivers/acpi/tables/Makefile
deleted file mode 100644
index 7385efa61622..000000000000
--- a/drivers/acpi/tables/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
1#
2# Makefile for all Linux ACPI interpreter subdirectories
3#
4
5obj-y := tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
6
7EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/utilities/Makefile b/drivers/acpi/utilities/Makefile
deleted file mode 100644
index 88eff14c4894..000000000000
--- a/drivers/acpi/utilities/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
1#
2# Makefile for all Linux ACPI interpreter subdirectories
3#
4
5obj-y := utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
6 utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
7 utstate.o utmutex.o utobject.o utcache.o utresrc.o
8
9EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/utilities/utcache.c b/drivers/acpi/utilities/utcache.c
deleted file mode 100644
index 245fa80cf600..000000000000
--- a/drivers/acpi/utilities/utcache.c
+++ /dev/null
@@ -1,314 +0,0 @@
1/******************************************************************************
2 *
3 * Module Name: utcache - local cache allocation routines
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45
46#define _COMPONENT ACPI_UTILITIES
47ACPI_MODULE_NAME("utcache")
48#ifdef ACPI_USE_LOCAL_CACHE
49/*******************************************************************************
50 *
51 * FUNCTION: acpi_os_create_cache
52 *
53 * PARAMETERS: cache_name - Ascii name for the cache
54 * object_size - Size of each cached object
55 * max_depth - Maximum depth of the cache (in objects)
56 * return_cache - Where the new cache object is returned
57 *
58 * RETURN: Status
59 *
60 * DESCRIPTION: Create a cache object
61 *
62 ******************************************************************************/
63acpi_status
64acpi_os_create_cache(char *cache_name,
65 u16 object_size,
66 u16 max_depth, struct acpi_memory_list ** return_cache)
67{
68 struct acpi_memory_list *cache;
69
70 ACPI_FUNCTION_ENTRY();
71
72 if (!cache_name || !return_cache || (object_size < 16)) {
73 return (AE_BAD_PARAMETER);
74 }
75
76 /* Create the cache object */
77
78 cache = acpi_os_allocate(sizeof(struct acpi_memory_list));
79 if (!cache) {
80 return (AE_NO_MEMORY);
81 }
82
83 /* Populate the cache object and return it */
84
85 ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list));
86 cache->link_offset = 8;
87 cache->list_name = cache_name;
88 cache->object_size = object_size;
89 cache->max_depth = max_depth;
90
91 *return_cache = cache;
92 return (AE_OK);
93}
94
95/*******************************************************************************
96 *
97 * FUNCTION: acpi_os_purge_cache
98 *
99 * PARAMETERS: Cache - Handle to cache object
100 *
101 * RETURN: Status
102 *
103 * DESCRIPTION: Free all objects within the requested cache.
104 *
105 ******************************************************************************/
106
107acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache)
108{
109 char *next;
110
111 ACPI_FUNCTION_ENTRY();
112
113 if (!cache) {
114 return (AE_BAD_PARAMETER);
115 }
116
117 /* Walk the list of objects in this cache */
118
119 while (cache->list_head) {
120
121 /* Delete and unlink one cached state object */
122
123 next = *(ACPI_CAST_INDIRECT_PTR(char,
124 &(((char *)cache->
125 list_head)[cache->
126 link_offset])));
127 ACPI_FREE(cache->list_head);
128
129 cache->list_head = next;
130 cache->current_depth--;
131 }
132
133 return (AE_OK);
134}
135
136/*******************************************************************************
137 *
138 * FUNCTION: acpi_os_delete_cache
139 *
140 * PARAMETERS: Cache - Handle to cache object
141 *
142 * RETURN: Status
143 *
144 * DESCRIPTION: Free all objects within the requested cache and delete the
145 * cache object.
146 *
147 ******************************************************************************/
148
149acpi_status acpi_os_delete_cache(struct acpi_memory_list * cache)
150{
151 acpi_status status;
152
153 ACPI_FUNCTION_ENTRY();
154
155 /* Purge all objects in the cache */
156
157 status = acpi_os_purge_cache(cache);
158 if (ACPI_FAILURE(status)) {
159 return (status);
160 }
161
162 /* Now we can delete the cache object */
163
164 ACPI_FREE(cache);
165 return (AE_OK);
166}
167
168/*******************************************************************************
169 *
170 * FUNCTION: acpi_os_release_object
171 *
172 * PARAMETERS: Cache - Handle to cache object
173 * Object - The object to be released
174 *
175 * RETURN: None
176 *
177 * DESCRIPTION: Release an object to the specified cache. If cache is full,
178 * the object is deleted.
179 *
180 ******************************************************************************/
181
182acpi_status
183acpi_os_release_object(struct acpi_memory_list * cache, void *object)
184{
185 acpi_status status;
186
187 ACPI_FUNCTION_ENTRY();
188
189 if (!cache || !object) {
190 return (AE_BAD_PARAMETER);
191 }
192
193 /* If cache is full, just free this object */
194
195 if (cache->current_depth >= cache->max_depth) {
196 ACPI_FREE(object);
197 ACPI_MEM_TRACKING(cache->total_freed++);
198 }
199
200 /* Otherwise put this object back into the cache */
201
202 else {
203 status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
204 if (ACPI_FAILURE(status)) {
205 return (status);
206 }
207
208 /* Mark the object as cached */
209
210 ACPI_MEMSET(object, 0xCA, cache->object_size);
211 ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_CACHED);
212
213 /* Put the object at the head of the cache list */
214
215 *(ACPI_CAST_INDIRECT_PTR(char,
216 &(((char *)object)[cache->
217 link_offset]))) =
218 cache->list_head;
219 cache->list_head = object;
220 cache->current_depth++;
221
222 (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
223 }
224
225 return (AE_OK);
226}
227
228/*******************************************************************************
229 *
230 * FUNCTION: acpi_os_acquire_object
231 *
232 * PARAMETERS: Cache - Handle to cache object
233 *
234 * RETURN: the acquired object. NULL on error
235 *
236 * DESCRIPTION: Get an object from the specified cache. If cache is empty,
237 * the object is allocated.
238 *
239 ******************************************************************************/
240
241void *acpi_os_acquire_object(struct acpi_memory_list *cache)
242{
243 acpi_status status;
244 void *object;
245
246 ACPI_FUNCTION_NAME(os_acquire_object);
247
248 if (!cache) {
249 return (NULL);
250 }
251
252 status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
253 if (ACPI_FAILURE(status)) {
254 return (NULL);
255 }
256
257 ACPI_MEM_TRACKING(cache->requests++);
258
259 /* Check the cache first */
260
261 if (cache->list_head) {
262
263 /* There is an object available, use it */
264
265 object = cache->list_head;
266 cache->list_head = *(ACPI_CAST_INDIRECT_PTR(char,
267 &(((char *)
268 object)[cache->
269 link_offset])));
270
271 cache->current_depth--;
272
273 ACPI_MEM_TRACKING(cache->hits++);
274 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
275 "Object %p from %s cache\n", object,
276 cache->list_name));
277
278 status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
279 if (ACPI_FAILURE(status)) {
280 return (NULL);
281 }
282
283 /* Clear (zero) the previously used Object */
284
285 ACPI_MEMSET(object, 0, cache->object_size);
286 } else {
287 /* The cache is empty, create a new object */
288
289 ACPI_MEM_TRACKING(cache->total_allocated++);
290
291#ifdef ACPI_DBG_TRACK_ALLOCATIONS
292 if ((cache->total_allocated - cache->total_freed) >
293 cache->max_occupied) {
294 cache->max_occupied =
295 cache->total_allocated - cache->total_freed;
296 }
297#endif
298
299 /* Avoid deadlock with ACPI_ALLOCATE_ZEROED */
300
301 status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
302 if (ACPI_FAILURE(status)) {
303 return (NULL);
304 }
305
306 object = ACPI_ALLOCATE_ZEROED(cache->object_size);
307 if (!object) {
308 return (NULL);
309 }
310 }
311
312 return (object);
313}
314#endif /* ACPI_USE_LOCAL_CACHE */
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index baa441929720..f261737636da 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -36,6 +36,7 @@
36#include <linux/backlight.h> 36#include <linux/backlight.h>
37#include <linux/thermal.h> 37#include <linux/thermal.h>
38#include <linux/video_output.h> 38#include <linux/video_output.h>
39#include <linux/sort.h>
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
40 41
41#include <acpi/acpi_bus.h> 42#include <acpi/acpi_bus.h>
@@ -481,6 +482,7 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
481 int status = AE_OK; 482 int status = AE_OK;
482 union acpi_object arg0 = { ACPI_TYPE_INTEGER }; 483 union acpi_object arg0 = { ACPI_TYPE_INTEGER };
483 struct acpi_object_list args = { 1, &arg0 }; 484 struct acpi_object_list args = { 1, &arg0 };
485 int state;
484 486
485 487
486 arg0.integer.value = level; 488 arg0.integer.value = level;
@@ -489,6 +491,10 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
489 status = acpi_evaluate_object(device->dev->handle, "_BCM", 491 status = acpi_evaluate_object(device->dev->handle, "_BCM",
490 &args, NULL); 492 &args, NULL);
491 device->brightness->curr = level; 493 device->brightness->curr = level;
494 for (state = 2; state < device->brightness->count; state++)
495 if (level == device->brightness->levels[state])
496 device->backlight->props.brightness = state - 2;
497
492 return status; 498 return status;
493} 499}
494 500
@@ -626,6 +632,16 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
626} 632}
627 633
628/* 634/*
635 * Simple comparison function used to sort backlight levels.
636 */
637
638static int
639acpi_video_cmp_level(const void *a, const void *b)
640{
641 return *(int *)a - *(int *)b;
642}
643
644/*
629 * Arg: 645 * Arg:
630 * device : video output device (LCD, CRT, ..) 646 * device : video output device (LCD, CRT, ..)
631 * 647 *
@@ -676,6 +692,10 @@ acpi_video_init_brightness(struct acpi_video_device *device)
676 count++; 692 count++;
677 } 693 }
678 694
695 /* don't sort the first two brightness levels */
696 sort(&br->levels[2], count - 2, sizeof(br->levels[2]),
697 acpi_video_cmp_level, NULL);
698
679 if (count < 2) 699 if (count < 2)
680 goto out_free_levels; 700 goto out_free_levels;
681 701
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index f022eb6f5637..50e3d2dbf3af 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -234,7 +234,7 @@ EXPORT_SYMBOL(acpi_video_display_switch_support);
234 * To force that backlight or display output switching is processed by vendor 234 * To force that backlight or display output switching is processed by vendor
235 * specific acpi drivers or video.ko driver. 235 * specific acpi drivers or video.ko driver.
236 */ 236 */
237int __init acpi_backlight(char *str) 237static int __init acpi_backlight(char *str)
238{ 238{
239 if (str == NULL || *str == '\0') 239 if (str == NULL || *str == '\0')
240 return 1; 240 return 1;
@@ -250,7 +250,7 @@ int __init acpi_backlight(char *str)
250} 250}
251__setup("acpi_backlight=", acpi_backlight); 251__setup("acpi_backlight=", acpi_backlight);
252 252
253int __init acpi_display_output(char *str) 253static int __init acpi_display_output(char *str)
254{ 254{
255 if (str == NULL || *str == '\0') 255 if (str == NULL || *str == '\0')
256 return 1; 256 return 1;
diff --git a/drivers/acpi/sleep/wakeup.c b/drivers/acpi/wakeup.c
index dea4c23df764..2d34806d45dd 100644
--- a/drivers/acpi/sleep/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -8,7 +8,6 @@
8#include <acpi/acpi_drivers.h> 8#include <acpi/acpi_drivers.h>
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/types.h> 10#include <linux/types.h>
11#include <acpi/acevents.h>
12#include "sleep.h" 11#include "sleep.h"
13 12
14#define _COMPONENT ACPI_SYSTEM_COMPONENT 13#define _COMPONENT ACPI_SYSTEM_COMPONENT
@@ -28,8 +27,6 @@ void acpi_enable_wakeup_device_prep(u8 sleep_state)
28{ 27{
29 struct list_head *node, *next; 28 struct list_head *node, *next;
30 29
31 ACPI_FUNCTION_TRACE("acpi_enable_wakeup_device_prep");
32
33 spin_lock(&acpi_device_lock); 30 spin_lock(&acpi_device_lock);
34 list_for_each_safe(node, next, &acpi_wakeup_device_list) { 31 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
35 struct acpi_device *dev = container_of(node, 32 struct acpi_device *dev = container_of(node,
@@ -61,7 +58,6 @@ void acpi_enable_wakeup_device(u8 sleep_state)
61 * Caution: this routine must be invoked when interrupt is disabled 58 * Caution: this routine must be invoked when interrupt is disabled
62 * Refer ACPI2.0: P212 59 * Refer ACPI2.0: P212
63 */ 60 */
64 ACPI_FUNCTION_TRACE("acpi_enable_wakeup_device");
65 spin_lock(&acpi_device_lock); 61 spin_lock(&acpi_device_lock);
66 list_for_each_safe(node, next, &acpi_wakeup_device_list) { 62 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
67 struct acpi_device *dev = 63 struct acpi_device *dev =
@@ -103,8 +99,6 @@ void acpi_disable_wakeup_device(u8 sleep_state)
103{ 99{
104 struct list_head *node, *next; 100 struct list_head *node, *next;
105 101
106 ACPI_FUNCTION_TRACE("acpi_disable_wakeup_device");
107
108 spin_lock(&acpi_device_lock); 102 spin_lock(&acpi_device_lock);
109 list_for_each_safe(node, next, &acpi_wakeup_device_list) { 103 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
110 struct acpi_device *dev = 104 struct acpi_device *dev =
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 6b94fb7be5f2..00c46e0b40e4 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -12,9 +12,10 @@
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/io.h>
15#include <linux/amba/bus.h> 16#include <linux/amba/bus.h>
16 17
17#include <asm/io.h> 18#include <asm/irq.h>
18#include <asm/sizes.h> 19#include <asm/sizes.h>
19 20
20#define to_amba_device(d) container_of(d, struct amba_device, dev) 21#define to_amba_device(d) container_of(d, struct amba_device, dev)
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 656448c7fef9..7f701cbe14ab 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -105,7 +105,7 @@ enum {
105 board_ahci_ign_iferr = 2, 105 board_ahci_ign_iferr = 2,
106 board_ahci_sb600 = 3, 106 board_ahci_sb600 = 3,
107 board_ahci_mv = 4, 107 board_ahci_mv = 4,
108 board_ahci_sb700 = 5, 108 board_ahci_sb700 = 5, /* for SB700 and SB800 */
109 board_ahci_mcp65 = 6, 109 board_ahci_mcp65 = 6,
110 board_ahci_nopmp = 7, 110 board_ahci_nopmp = 7,
111 111
@@ -439,7 +439,7 @@ static const struct ata_port_info ahci_port_info[] = {
439 .udma_mask = ATA_UDMA6, 439 .udma_mask = ATA_UDMA6,
440 .port_ops = &ahci_ops, 440 .port_ops = &ahci_ops,
441 }, 441 },
442 /* board_ahci_sb700 */ 442 /* board_ahci_sb700, for SB700 and SB800 */
443 { 443 {
444 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL), 444 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
445 .flags = AHCI_FLAG_COMMON, 445 .flags = AHCI_FLAG_COMMON,
@@ -2446,6 +2446,8 @@ static void ahci_print_info(struct ata_host *host)
2446 speed_s = "1.5"; 2446 speed_s = "1.5";
2447 else if (speed == 2) 2447 else if (speed == 2)
2448 speed_s = "3"; 2448 speed_s = "3";
2449 else if (speed == 3)
2450 speed_s = "6";
2449 else 2451 else
2450 speed_s = "?"; 2452 speed_s = "?";
2451 2453
@@ -2610,6 +2612,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2610 (pdev->revision == 0xa1 || pdev->revision == 0xa2)) 2612 (pdev->revision == 0xa1 || pdev->revision == 0xa2))
2611 hpriv->flags |= AHCI_HFLAG_NO_MSI; 2613 hpriv->flags |= AHCI_HFLAG_NO_MSI;
2612 2614
2615 /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
2616 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
2617 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
2618
2613 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev)) 2619 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
2614 pci_intx(pdev, 1); 2620 pci_intx(pdev, 1);
2615 2621
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 5fdf1678d0cc..887d8f46a287 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -154,11 +154,13 @@ struct piix_map_db {
154 154
155struct piix_host_priv { 155struct piix_host_priv {
156 const int *map; 156 const int *map;
157 u32 saved_iocfg;
157 void __iomem *sidpr; 158 void __iomem *sidpr;
158}; 159};
159 160
160static int piix_init_one(struct pci_dev *pdev, 161static int piix_init_one(struct pci_dev *pdev,
161 const struct pci_device_id *ent); 162 const struct pci_device_id *ent);
163static void piix_remove_one(struct pci_dev *pdev);
162static int piix_pata_prereset(struct ata_link *link, unsigned long deadline); 164static int piix_pata_prereset(struct ata_link *link, unsigned long deadline);
163static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev); 165static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev);
164static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev); 166static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev);
@@ -296,7 +298,7 @@ static struct pci_driver piix_pci_driver = {
296 .name = DRV_NAME, 298 .name = DRV_NAME,
297 .id_table = piix_pci_tbl, 299 .id_table = piix_pci_tbl,
298 .probe = piix_init_one, 300 .probe = piix_init_one,
299 .remove = ata_pci_remove_one, 301 .remove = piix_remove_one,
300#ifdef CONFIG_PM 302#ifdef CONFIG_PM
301 .suspend = piix_pci_device_suspend, 303 .suspend = piix_pci_device_suspend,
302 .resume = piix_pci_device_resume, 304 .resume = piix_pci_device_resume,
@@ -308,7 +310,7 @@ static struct scsi_host_template piix_sht = {
308}; 310};
309 311
310static struct ata_port_operations piix_pata_ops = { 312static struct ata_port_operations piix_pata_ops = {
311 .inherits = &ata_bmdma_port_ops, 313 .inherits = &ata_bmdma32_port_ops,
312 .cable_detect = ata_cable_40wire, 314 .cable_detect = ata_cable_40wire,
313 .set_piomode = piix_set_piomode, 315 .set_piomode = piix_set_piomode,
314 .set_dmamode = piix_set_dmamode, 316 .set_dmamode = piix_set_dmamode,
@@ -610,8 +612,9 @@ static const struct ich_laptop ich_laptop[] = {
610static int ich_pata_cable_detect(struct ata_port *ap) 612static int ich_pata_cable_detect(struct ata_port *ap)
611{ 613{
612 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 614 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
615 struct piix_host_priv *hpriv = ap->host->private_data;
613 const struct ich_laptop *lap = &ich_laptop[0]; 616 const struct ich_laptop *lap = &ich_laptop[0];
614 u8 tmp, mask; 617 u8 mask;
615 618
616 /* Check for specials - Acer Aspire 5602WLMi */ 619 /* Check for specials - Acer Aspire 5602WLMi */
617 while (lap->device) { 620 while (lap->device) {
@@ -625,8 +628,7 @@ static int ich_pata_cable_detect(struct ata_port *ap)
625 628
626 /* check BIOS cable detect results */ 629 /* check BIOS cable detect results */
627 mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC; 630 mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
628 pci_read_config_byte(pdev, PIIX_IOCFG, &tmp); 631 if ((hpriv->saved_iocfg & mask) == 0)
629 if ((tmp & mask) == 0)
630 return ATA_CBL_PATA40; 632 return ATA_CBL_PATA40;
631 return ATA_CBL_PATA80; 633 return ATA_CBL_PATA80;
632} 634}
@@ -1350,7 +1352,7 @@ static int __devinit piix_init_sidpr(struct ata_host *host)
1350 return 0; 1352 return 0;
1351} 1353}
1352 1354
1353static void piix_iocfg_bit18_quirk(struct pci_dev *pdev) 1355static void piix_iocfg_bit18_quirk(struct ata_host *host)
1354{ 1356{
1355 static const struct dmi_system_id sysids[] = { 1357 static const struct dmi_system_id sysids[] = {
1356 { 1358 {
@@ -1367,7 +1369,8 @@ static void piix_iocfg_bit18_quirk(struct pci_dev *pdev)
1367 1369
1368 { } /* terminate list */ 1370 { } /* terminate list */
1369 }; 1371 };
1370 u32 iocfg; 1372 struct pci_dev *pdev = to_pci_dev(host->dev);
1373 struct piix_host_priv *hpriv = host->private_data;
1371 1374
1372 if (!dmi_check_system(sysids)) 1375 if (!dmi_check_system(sysids))
1373 return; 1376 return;
@@ -1376,12 +1379,11 @@ static void piix_iocfg_bit18_quirk(struct pci_dev *pdev)
1376 * seem to use it to disable a channel. Clear the bit on the 1379 * seem to use it to disable a channel. Clear the bit on the
1377 * affected systems. 1380 * affected systems.
1378 */ 1381 */
1379 pci_read_config_dword(pdev, PIIX_IOCFG, &iocfg); 1382 if (hpriv->saved_iocfg & (1 << 18)) {
1380 if (iocfg & (1 << 18)) {
1381 dev_printk(KERN_INFO, &pdev->dev, 1383 dev_printk(KERN_INFO, &pdev->dev,
1382 "applying IOCFG bit18 quirk\n"); 1384 "applying IOCFG bit18 quirk\n");
1383 iocfg &= ~(1 << 18); 1385 pci_write_config_dword(pdev, PIIX_IOCFG,
1384 pci_write_config_dword(pdev, PIIX_IOCFG, iocfg); 1386 hpriv->saved_iocfg & ~(1 << 18));
1385 } 1387 }
1386} 1388}
1387 1389
@@ -1430,6 +1432,17 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
1430 if (rc) 1432 if (rc)
1431 return rc; 1433 return rc;
1432 1434
1435 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
1436 if (!hpriv)
1437 return -ENOMEM;
1438
1439 /* Save IOCFG, this will be used for cable detection, quirk
1440 * detection and restoration on detach. This is necessary
1441 * because some ACPI implementations mess up cable related
1442 * bits on _STM. Reported on kernel bz#11879.
1443 */
1444 pci_read_config_dword(pdev, PIIX_IOCFG, &hpriv->saved_iocfg);
1445
1433 /* ICH6R may be driven by either ata_piix or ahci driver 1446 /* ICH6R may be driven by either ata_piix or ahci driver
1434 * regardless of BIOS configuration. Make sure AHCI mode is 1447 * regardless of BIOS configuration. Make sure AHCI mode is
1435 * off. 1448 * off.
@@ -1441,10 +1454,6 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
1441 } 1454 }
1442 1455
1443 /* SATA map init can change port_info, do it before prepping host */ 1456 /* SATA map init can change port_info, do it before prepping host */
1444 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
1445 if (!hpriv)
1446 return -ENOMEM;
1447
1448 if (port_flags & ATA_FLAG_SATA) 1457 if (port_flags & ATA_FLAG_SATA)
1449 hpriv->map = piix_init_sata_map(pdev, port_info, 1458 hpriv->map = piix_init_sata_map(pdev, port_info,
1450 piix_map_db_table[ent->driver_data]); 1459 piix_map_db_table[ent->driver_data]);
@@ -1463,7 +1472,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
1463 } 1472 }
1464 1473
1465 /* apply IOCFG bit18 quirk */ 1474 /* apply IOCFG bit18 quirk */
1466 piix_iocfg_bit18_quirk(pdev); 1475 piix_iocfg_bit18_quirk(host);
1467 1476
1468 /* On ICH5, some BIOSen disable the interrupt using the 1477 /* On ICH5, some BIOSen disable the interrupt using the
1469 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3. 1478 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
@@ -1488,6 +1497,16 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
1488 return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht); 1497 return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht);
1489} 1498}
1490 1499
1500static void piix_remove_one(struct pci_dev *pdev)
1501{
1502 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1503 struct piix_host_priv *hpriv = host->private_data;
1504
1505 pci_write_config_dword(pdev, PIIX_IOCFG, hpriv->saved_iocfg);
1506
1507 ata_pci_remove_one(pdev);
1508}
1509
1491static int __init piix_init(void) 1510static int __init piix_init(void)
1492{ 1511{
1493 int rc; 1512 int rc;
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index ef02e488d468..6273d98d00eb 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -19,12 +19,6 @@
19#include "libata.h" 19#include "libata.h"
20 20
21#include <acpi/acpi_bus.h> 21#include <acpi/acpi_bus.h>
22#include <acpi/acnames.h>
23#include <acpi/acnamesp.h>
24#include <acpi/acparser.h>
25#include <acpi/acexcep.h>
26#include <acpi/acmacros.h>
27#include <acpi/actypes.h>
28 22
29enum { 23enum {
30 ATA_ACPI_FILTER_SETXFER = 1 << 0, 24 ATA_ACPI_FILTER_SETXFER = 1 << 0,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index f178a450ec08..c507a9ac78f4 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1007,6 +1007,7 @@ static const char *sata_spd_string(unsigned int spd)
1007 static const char * const spd_str[] = { 1007 static const char * const spd_str[] = {
1008 "1.5 Gbps", 1008 "1.5 Gbps",
1009 "3.0 Gbps", 1009 "3.0 Gbps",
1010 "6.0 Gbps",
1010 }; 1011 };
1011 1012
1012 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) 1013 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
@@ -2000,6 +2001,10 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
2000 as the caller should know this */ 2001 as the caller should know this */
2001 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 2002 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
2002 return 0; 2003 return 0;
2004 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
2005 if (ata_id_is_cfa(adev->id)
2006 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
2007 return 0;
2003 /* PIO3 and higher it is mandatory */ 2008 /* PIO3 and higher it is mandatory */
2004 if (adev->pio_mode > XFER_PIO_2) 2009 if (adev->pio_mode > XFER_PIO_2)
2005 return 1; 2010 return 1;
@@ -4551,7 +4556,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
4551 struct scatterlist *sg = qc->sg; 4556 struct scatterlist *sg = qc->sg;
4552 int dir = qc->dma_dir; 4557 int dir = qc->dma_dir;
4553 4558
4554 WARN_ON(sg == NULL); 4559 WARN_ON_ONCE(sg == NULL);
4555 4560
4556 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4561 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4557 4562
@@ -4771,7 +4776,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
4771 struct ata_port *ap = qc->ap; 4776 struct ata_port *ap = qc->ap;
4772 unsigned int tag; 4777 unsigned int tag;
4773 4778
4774 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4779 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4775 4780
4776 qc->flags = 0; 4781 qc->flags = 0;
4777 tag = qc->tag; 4782 tag = qc->tag;
@@ -4786,8 +4791,8 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
4786 struct ata_port *ap = qc->ap; 4791 struct ata_port *ap = qc->ap;
4787 struct ata_link *link = qc->dev->link; 4792 struct ata_link *link = qc->dev->link;
4788 4793
4789 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4794 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4790 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); 4795 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4791 4796
4792 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 4797 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4793 ata_sg_clean(qc); 4798 ata_sg_clean(qc);
@@ -4873,7 +4878,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
4873 struct ata_device *dev = qc->dev; 4878 struct ata_device *dev = qc->dev;
4874 struct ata_eh_info *ehi = &dev->link->eh_info; 4879 struct ata_eh_info *ehi = &dev->link->eh_info;
4875 4880
4876 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN); 4881 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4877 4882
4878 if (unlikely(qc->err_mask)) 4883 if (unlikely(qc->err_mask))
4879 qc->flags |= ATA_QCFLAG_FAILED; 4884 qc->flags |= ATA_QCFLAG_FAILED;
@@ -4995,16 +5000,16 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
4995 * check is skipped for old EH because it reuses active qc to 5000 * check is skipped for old EH because it reuses active qc to
4996 * request ATAPI sense. 5001 * request ATAPI sense.
4997 */ 5002 */
4998 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 5003 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4999 5004
5000 if (ata_is_ncq(prot)) { 5005 if (ata_is_ncq(prot)) {
5001 WARN_ON(link->sactive & (1 << qc->tag)); 5006 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5002 5007
5003 if (!link->sactive) 5008 if (!link->sactive)
5004 ap->nr_active_links++; 5009 ap->nr_active_links++;
5005 link->sactive |= 1 << qc->tag; 5010 link->sactive |= 1 << qc->tag;
5006 } else { 5011 } else {
5007 WARN_ON(link->sactive); 5012 WARN_ON_ONCE(link->sactive);
5008 5013
5009 ap->nr_active_links++; 5014 ap->nr_active_links++;
5010 link->active_tag = qc->tag; 5015 link->active_tag = qc->tag;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 9033d164c4ec..0eae9b453556 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -66,6 +66,7 @@ const struct ata_port_operations ata_sff_port_ops = {
66 66
67 .port_start = ata_sff_port_start, 67 .port_start = ata_sff_port_start,
68}; 68};
69EXPORT_SYMBOL_GPL(ata_sff_port_ops);
69 70
70const struct ata_port_operations ata_bmdma_port_ops = { 71const struct ata_port_operations ata_bmdma_port_ops = {
71 .inherits = &ata_sff_port_ops, 72 .inherits = &ata_sff_port_ops,
@@ -77,6 +78,14 @@ const struct ata_port_operations ata_bmdma_port_ops = {
77 .bmdma_stop = ata_bmdma_stop, 78 .bmdma_stop = ata_bmdma_stop,
78 .bmdma_status = ata_bmdma_status, 79 .bmdma_status = ata_bmdma_status,
79}; 80};
81EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
82
83const struct ata_port_operations ata_bmdma32_port_ops = {
84 .inherits = &ata_bmdma_port_ops,
85
86 .sff_data_xfer = ata_sff_data_xfer32,
87};
88EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
80 89
81/** 90/**
82 * ata_fill_sg - Fill PCI IDE PRD table 91 * ata_fill_sg - Fill PCI IDE PRD table
@@ -166,8 +175,9 @@ static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
166 blen = len & 0xffff; 175 blen = len & 0xffff;
167 ap->prd[pi].addr = cpu_to_le32(addr); 176 ap->prd[pi].addr = cpu_to_le32(addr);
168 if (blen == 0) { 177 if (blen == 0) {
169 /* Some PATA chipsets like the CS5530 can't 178 /* Some PATA chipsets like the CS5530 can't
170 cope with 0x0000 meaning 64K as the spec says */ 179 cope with 0x0000 meaning 64K as the spec
180 says */
171 ap->prd[pi].flags_len = cpu_to_le32(0x8000); 181 ap->prd[pi].flags_len = cpu_to_le32(0x8000);
172 blen = 0x8000; 182 blen = 0x8000;
173 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000); 183 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
@@ -200,6 +210,7 @@ void ata_sff_qc_prep(struct ata_queued_cmd *qc)
200 210
201 ata_fill_sg(qc); 211 ata_fill_sg(qc);
202} 212}
213EXPORT_SYMBOL_GPL(ata_sff_qc_prep);
203 214
204/** 215/**
205 * ata_sff_dumb_qc_prep - Prepare taskfile for submission 216 * ata_sff_dumb_qc_prep - Prepare taskfile for submission
@@ -217,6 +228,7 @@ void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc)
217 228
218 ata_fill_sg_dumb(qc); 229 ata_fill_sg_dumb(qc);
219} 230}
231EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep);
220 232
221/** 233/**
222 * ata_sff_check_status - Read device status reg & clear interrupt 234 * ata_sff_check_status - Read device status reg & clear interrupt
@@ -233,6 +245,7 @@ u8 ata_sff_check_status(struct ata_port *ap)
233{ 245{
234 return ioread8(ap->ioaddr.status_addr); 246 return ioread8(ap->ioaddr.status_addr);
235} 247}
248EXPORT_SYMBOL_GPL(ata_sff_check_status);
236 249
237/** 250/**
238 * ata_sff_altstatus - Read device alternate status reg 251 * ata_sff_altstatus - Read device alternate status reg
@@ -275,7 +288,7 @@ static u8 ata_sff_irq_status(struct ata_port *ap)
275 status = ata_sff_altstatus(ap); 288 status = ata_sff_altstatus(ap);
276 /* Not us: We are busy */ 289 /* Not us: We are busy */
277 if (status & ATA_BUSY) 290 if (status & ATA_BUSY)
278 return status; 291 return status;
279 } 292 }
280 /* Clear INTRQ latch */ 293 /* Clear INTRQ latch */
281 status = ap->ops->sff_check_status(ap); 294 status = ap->ops->sff_check_status(ap);
@@ -319,6 +332,7 @@ void ata_sff_pause(struct ata_port *ap)
319 ata_sff_sync(ap); 332 ata_sff_sync(ap);
320 ndelay(400); 333 ndelay(400);
321} 334}
335EXPORT_SYMBOL_GPL(ata_sff_pause);
322 336
323/** 337/**
324 * ata_sff_dma_pause - Pause before commencing DMA 338 * ata_sff_dma_pause - Pause before commencing DMA
@@ -327,7 +341,7 @@ void ata_sff_pause(struct ata_port *ap)
327 * Perform I/O fencing and ensure sufficient cycle delays occur 341 * Perform I/O fencing and ensure sufficient cycle delays occur
328 * for the HDMA1:0 transition 342 * for the HDMA1:0 transition
329 */ 343 */
330 344
331void ata_sff_dma_pause(struct ata_port *ap) 345void ata_sff_dma_pause(struct ata_port *ap)
332{ 346{
333 if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { 347 if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
@@ -341,6 +355,7 @@ void ata_sff_dma_pause(struct ata_port *ap)
341 corruption. */ 355 corruption. */
342 BUG(); 356 BUG();
343} 357}
358EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
344 359
345/** 360/**
346 * ata_sff_busy_sleep - sleep until BSY clears, or timeout 361 * ata_sff_busy_sleep - sleep until BSY clears, or timeout
@@ -396,6 +411,7 @@ int ata_sff_busy_sleep(struct ata_port *ap,
396 411
397 return 0; 412 return 0;
398} 413}
414EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
399 415
400static int ata_sff_check_ready(struct ata_link *link) 416static int ata_sff_check_ready(struct ata_link *link)
401{ 417{
@@ -422,6 +438,7 @@ int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
422{ 438{
423 return ata_wait_ready(link, deadline, ata_sff_check_ready); 439 return ata_wait_ready(link, deadline, ata_sff_check_ready);
424} 440}
441EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
425 442
426/** 443/**
427 * ata_sff_dev_select - Select device 0/1 on ATA bus 444 * ata_sff_dev_select - Select device 0/1 on ATA bus
@@ -449,6 +466,7 @@ void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
449 iowrite8(tmp, ap->ioaddr.device_addr); 466 iowrite8(tmp, ap->ioaddr.device_addr);
450 ata_sff_pause(ap); /* needed; also flushes, for mmio */ 467 ata_sff_pause(ap); /* needed; also flushes, for mmio */
451} 468}
469EXPORT_SYMBOL_GPL(ata_sff_dev_select);
452 470
453/** 471/**
454 * ata_dev_select - Select device 0/1 on ATA bus 472 * ata_dev_select - Select device 0/1 on ATA bus
@@ -513,6 +531,7 @@ u8 ata_sff_irq_on(struct ata_port *ap)
513 531
514 return tmp; 532 return tmp;
515} 533}
534EXPORT_SYMBOL_GPL(ata_sff_irq_on);
516 535
517/** 536/**
518 * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt. 537 * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt.
@@ -534,6 +553,7 @@ void ata_sff_irq_clear(struct ata_port *ap)
534 553
535 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); 554 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
536} 555}
556EXPORT_SYMBOL_GPL(ata_sff_irq_clear);
537 557
538/** 558/**
539 * ata_sff_tf_load - send taskfile registers to host controller 559 * ata_sff_tf_load - send taskfile registers to host controller
@@ -558,7 +578,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
558 } 578 }
559 579
560 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 580 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
561 WARN_ON(!ioaddr->ctl_addr); 581 WARN_ON_ONCE(!ioaddr->ctl_addr);
562 iowrite8(tf->hob_feature, ioaddr->feature_addr); 582 iowrite8(tf->hob_feature, ioaddr->feature_addr);
563 iowrite8(tf->hob_nsect, ioaddr->nsect_addr); 583 iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
564 iowrite8(tf->hob_lbal, ioaddr->lbal_addr); 584 iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
@@ -593,6 +613,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
593 613
594 ata_wait_idle(ap); 614 ata_wait_idle(ap);
595} 615}
616EXPORT_SYMBOL_GPL(ata_sff_tf_load);
596 617
597/** 618/**
598 * ata_sff_tf_read - input device's ATA taskfile shadow registers 619 * ata_sff_tf_read - input device's ATA taskfile shadow registers
@@ -630,9 +651,10 @@ void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
630 iowrite8(tf->ctl, ioaddr->ctl_addr); 651 iowrite8(tf->ctl, ioaddr->ctl_addr);
631 ap->last_ctl = tf->ctl; 652 ap->last_ctl = tf->ctl;
632 } else 653 } else
633 WARN_ON(1); 654 WARN_ON_ONCE(1);
634 } 655 }
635} 656}
657EXPORT_SYMBOL_GPL(ata_sff_tf_read);
636 658
637/** 659/**
638 * ata_sff_exec_command - issue ATA command to host controller 660 * ata_sff_exec_command - issue ATA command to host controller
@@ -652,6 +674,7 @@ void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
652 iowrite8(tf->command, ap->ioaddr.command_addr); 674 iowrite8(tf->command, ap->ioaddr.command_addr);
653 ata_sff_pause(ap); 675 ata_sff_pause(ap);
654} 676}
677EXPORT_SYMBOL_GPL(ata_sff_exec_command);
655 678
656/** 679/**
657 * ata_tf_to_host - issue ATA taskfile to host controller 680 * ata_tf_to_host - issue ATA taskfile to host controller
@@ -717,6 +740,53 @@ unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,
717 740
718 return words << 1; 741 return words << 1;
719} 742}
743EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
744
745/**
746 * ata_sff_data_xfer32 - Transfer data by PIO
747 * @dev: device to target
748 * @buf: data buffer
749 * @buflen: buffer length
750 * @rw: read/write
751 *
752 * Transfer data from/to the device data register by PIO using 32bit
753 * I/O operations.
754 *
755 * LOCKING:
756 * Inherited from caller.
757 *
758 * RETURNS:
759 * Bytes consumed.
760 */
761
762unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf,
763 unsigned int buflen, int rw)
764{
765 struct ata_port *ap = dev->link->ap;
766 void __iomem *data_addr = ap->ioaddr.data_addr;
767 unsigned int words = buflen >> 2;
768 int slop = buflen & 3;
769
770 /* Transfer multiple of 4 bytes */
771 if (rw == READ)
772 ioread32_rep(data_addr, buf, words);
773 else
774 iowrite32_rep(data_addr, buf, words);
775
776 if (unlikely(slop)) {
777 __le32 pad;
778 if (rw == READ) {
779 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
780 memcpy(buf + buflen - slop, &pad, slop);
781 } else {
782 memcpy(&pad, buf + buflen - slop, slop);
783 iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
784 }
785 words++;
786 }
787 return words << 2;
788}
789EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
720 790
721/** 791/**
722 * ata_sff_data_xfer_noirq - Transfer data by PIO 792 * ata_sff_data_xfer_noirq - Transfer data by PIO
@@ -746,6 +816,7 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
746 816
747 return consumed; 817 return consumed;
748} 818}
819EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq);
749 820
750/** 821/**
751 * ata_pio_sector - Transfer a sector of data. 822 * ata_pio_sector - Transfer a sector of data.
@@ -820,7 +891,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc)
820 /* READ/WRITE MULTIPLE */ 891 /* READ/WRITE MULTIPLE */
821 unsigned int nsect; 892 unsigned int nsect;
822 893
823 WARN_ON(qc->dev->multi_count == 0); 894 WARN_ON_ONCE(qc->dev->multi_count == 0);
824 895
825 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, 896 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
826 qc->dev->multi_count); 897 qc->dev->multi_count);
@@ -847,7 +918,7 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
847{ 918{
848 /* send SCSI cdb */ 919 /* send SCSI cdb */
849 DPRINTK("send cdb\n"); 920 DPRINTK("send cdb\n");
850 WARN_ON(qc->dev->cdb_len < 12); 921 WARN_ON_ONCE(qc->dev->cdb_len < 12);
851 922
852 ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); 923 ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
853 ata_sff_sync(ap); 924 ata_sff_sync(ap);
@@ -922,13 +993,15 @@ next_sg:
922 buf = kmap_atomic(page, KM_IRQ0); 993 buf = kmap_atomic(page, KM_IRQ0);
923 994
924 /* do the actual data transfer */ 995 /* do the actual data transfer */
925 consumed = ap->ops->sff_data_xfer(dev, buf + offset, count, rw); 996 consumed = ap->ops->sff_data_xfer(dev, buf + offset,
997 count, rw);
926 998
927 kunmap_atomic(buf, KM_IRQ0); 999 kunmap_atomic(buf, KM_IRQ0);
928 local_irq_restore(flags); 1000 local_irq_restore(flags);
929 } else { 1001 } else {
930 buf = page_address(page); 1002 buf = page_address(page);
931 consumed = ap->ops->sff_data_xfer(dev, buf + offset, count, rw); 1003 consumed = ap->ops->sff_data_xfer(dev, buf + offset,
1004 count, rw);
932 } 1005 }
933 1006
934 bytes -= min(bytes, consumed); 1007 bytes -= min(bytes, consumed);
@@ -941,7 +1014,7 @@ next_sg:
941 } 1014 }
942 1015
943 /* consumed can be larger than count only for the last transfer */ 1016 /* consumed can be larger than count only for the last transfer */
944 WARN_ON(qc->cursg && count != consumed); 1017 WARN_ON_ONCE(qc->cursg && count != consumed);
945 1018
946 if (bytes) 1019 if (bytes)
947 goto next_sg; 1020 goto next_sg;
@@ -1013,18 +1086,19 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
1013 * RETURNS: 1086 * RETURNS:
1014 * 1 if ok in workqueue, 0 otherwise. 1087 * 1 if ok in workqueue, 0 otherwise.
1015 */ 1088 */
1016static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc) 1089static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
1090 struct ata_queued_cmd *qc)
1017{ 1091{
1018 if (qc->tf.flags & ATA_TFLAG_POLLING) 1092 if (qc->tf.flags & ATA_TFLAG_POLLING)
1019 return 1; 1093 return 1;
1020 1094
1021 if (ap->hsm_task_state == HSM_ST_FIRST) { 1095 if (ap->hsm_task_state == HSM_ST_FIRST) {
1022 if (qc->tf.protocol == ATA_PROT_PIO && 1096 if (qc->tf.protocol == ATA_PROT_PIO &&
1023 (qc->tf.flags & ATA_TFLAG_WRITE)) 1097 (qc->tf.flags & ATA_TFLAG_WRITE))
1024 return 1; 1098 return 1;
1025 1099
1026 if (ata_is_atapi(qc->tf.protocol) && 1100 if (ata_is_atapi(qc->tf.protocol) &&
1027 !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 1101 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1028 return 1; 1102 return 1;
1029 } 1103 }
1030 1104
@@ -1098,13 +1172,13 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1098 unsigned long flags = 0; 1172 unsigned long flags = 0;
1099 int poll_next; 1173 int poll_next;
1100 1174
1101 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0); 1175 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
1102 1176
1103 /* Make sure ata_sff_qc_issue() does not throw things 1177 /* Make sure ata_sff_qc_issue() does not throw things
1104 * like DMA polling into the workqueue. Notice that 1178 * like DMA polling into the workqueue. Notice that
1105 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). 1179 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
1106 */ 1180 */
1107 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc)); 1181 WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
1108 1182
1109fsm_start: 1183fsm_start:
1110 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", 1184 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
@@ -1313,7 +1387,7 @@ fsm_start:
1313 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", 1387 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
1314 ap->print_id, qc->dev->devno, status); 1388 ap->print_id, qc->dev->devno, status);
1315 1389
1316 WARN_ON(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); 1390 WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
1317 1391
1318 ap->hsm_task_state = HSM_ST_IDLE; 1392 ap->hsm_task_state = HSM_ST_IDLE;
1319 1393
@@ -1338,6 +1412,7 @@ fsm_start:
1338 1412
1339 return poll_next; 1413 return poll_next;
1340} 1414}
1415EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1341 1416
1342void ata_pio_task(struct work_struct *work) 1417void ata_pio_task(struct work_struct *work)
1343{ 1418{
@@ -1348,7 +1423,7 @@ void ata_pio_task(struct work_struct *work)
1348 int poll_next; 1423 int poll_next;
1349 1424
1350fsm_start: 1425fsm_start:
1351 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE); 1426 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
1352 1427
1353 /* 1428 /*
1354 * This is purely heuristic. This is a fast path. 1429 * This is purely heuristic. This is a fast path.
@@ -1437,7 +1512,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1437 break; 1512 break;
1438 1513
1439 case ATA_PROT_DMA: 1514 case ATA_PROT_DMA:
1440 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); 1515 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
1441 1516
1442 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ 1517 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
1443 ap->ops->bmdma_setup(qc); /* set up bmdma */ 1518 ap->ops->bmdma_setup(qc); /* set up bmdma */
@@ -1489,7 +1564,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1489 break; 1564 break;
1490 1565
1491 case ATAPI_PROT_DMA: 1566 case ATAPI_PROT_DMA:
1492 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); 1567 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
1493 1568
1494 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ 1569 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
1495 ap->ops->bmdma_setup(qc); /* set up bmdma */ 1570 ap->ops->bmdma_setup(qc); /* set up bmdma */
@@ -1501,12 +1576,13 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1501 break; 1576 break;
1502 1577
1503 default: 1578 default:
1504 WARN_ON(1); 1579 WARN_ON_ONCE(1);
1505 return AC_ERR_SYSTEM; 1580 return AC_ERR_SYSTEM;
1506 } 1581 }
1507 1582
1508 return 0; 1583 return 0;
1509} 1584}
1585EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
1510 1586
1511/** 1587/**
1512 * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read 1588 * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
@@ -1526,6 +1602,7 @@ bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
1526 qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf); 1602 qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
1527 return true; 1603 return true;
1528} 1604}
1605EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
1529 1606
1530/** 1607/**
1531 * ata_sff_host_intr - Handle host interrupt for given (port, task) 1608 * ata_sff_host_intr - Handle host interrupt for given (port, task)
@@ -1623,6 +1700,7 @@ idle_irq:
1623#endif 1700#endif
1624 return 0; /* irq not handled */ 1701 return 0; /* irq not handled */
1625} 1702}
1703EXPORT_SYMBOL_GPL(ata_sff_host_intr);
1626 1704
1627/** 1705/**
1628 * ata_sff_interrupt - Default ATA host interrupt handler 1706 * ata_sff_interrupt - Default ATA host interrupt handler
@@ -1667,6 +1745,7 @@ irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
1667 1745
1668 return IRQ_RETVAL(handled); 1746 return IRQ_RETVAL(handled);
1669} 1747}
1748EXPORT_SYMBOL_GPL(ata_sff_interrupt);
1670 1749
1671/** 1750/**
1672 * ata_sff_freeze - Freeze SFF controller port 1751 * ata_sff_freeze - Freeze SFF controller port
@@ -1695,6 +1774,7 @@ void ata_sff_freeze(struct ata_port *ap)
1695 1774
1696 ap->ops->sff_irq_clear(ap); 1775 ap->ops->sff_irq_clear(ap);
1697} 1776}
1777EXPORT_SYMBOL_GPL(ata_sff_freeze);
1698 1778
1699/** 1779/**
1700 * ata_sff_thaw - Thaw SFF controller port 1780 * ata_sff_thaw - Thaw SFF controller port
@@ -1712,6 +1792,7 @@ void ata_sff_thaw(struct ata_port *ap)
1712 ap->ops->sff_irq_clear(ap); 1792 ap->ops->sff_irq_clear(ap);
1713 ap->ops->sff_irq_on(ap); 1793 ap->ops->sff_irq_on(ap);
1714} 1794}
1795EXPORT_SYMBOL_GPL(ata_sff_thaw);
1715 1796
1716/** 1797/**
1717 * ata_sff_prereset - prepare SFF link for reset 1798 * ata_sff_prereset - prepare SFF link for reset
@@ -1753,6 +1834,7 @@ int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
1753 1834
1754 return 0; 1835 return 0;
1755} 1836}
1837EXPORT_SYMBOL_GPL(ata_sff_prereset);
1756 1838
1757/** 1839/**
1758 * ata_devchk - PATA device presence detection 1840 * ata_devchk - PATA device presence detection
@@ -1865,6 +1947,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
1865 1947
1866 return class; 1948 return class;
1867} 1949}
1950EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
1868 1951
1869/** 1952/**
1870 * ata_sff_wait_after_reset - wait for devices to become ready after reset 1953 * ata_sff_wait_after_reset - wait for devices to become ready after reset
@@ -1941,6 +2024,7 @@ int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
1941 2024
1942 return ret; 2025 return ret;
1943} 2026}
2027EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
1944 2028
1945static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, 2029static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
1946 unsigned long deadline) 2030 unsigned long deadline)
@@ -2013,6 +2097,7 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
2013 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); 2097 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2014 return 0; 2098 return 0;
2015} 2099}
2100EXPORT_SYMBOL_GPL(ata_sff_softreset);
2016 2101
2017/** 2102/**
2018 * sata_sff_hardreset - reset host port via SATA phy reset 2103 * sata_sff_hardreset - reset host port via SATA phy reset
@@ -2045,6 +2130,7 @@ int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
2045 DPRINTK("EXIT, class=%u\n", *class); 2130 DPRINTK("EXIT, class=%u\n", *class);
2046 return rc; 2131 return rc;
2047} 2132}
2133EXPORT_SYMBOL_GPL(sata_sff_hardreset);
2048 2134
2049/** 2135/**
2050 * ata_sff_postreset - SFF postreset callback 2136 * ata_sff_postreset - SFF postreset callback
@@ -2080,6 +2166,7 @@ void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
2080 if (ap->ioaddr.ctl_addr) 2166 if (ap->ioaddr.ctl_addr)
2081 iowrite8(ap->ctl, ap->ioaddr.ctl_addr); 2167 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2082} 2168}
2169EXPORT_SYMBOL_GPL(ata_sff_postreset);
2083 2170
2084/** 2171/**
2085 * ata_sff_error_handler - Stock error handler for BMDMA controller 2172 * ata_sff_error_handler - Stock error handler for BMDMA controller
@@ -2152,6 +2239,7 @@ void ata_sff_error_handler(struct ata_port *ap)
2152 ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, 2239 ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
2153 ap->ops->postreset); 2240 ap->ops->postreset);
2154} 2241}
2242EXPORT_SYMBOL_GPL(ata_sff_error_handler);
2155 2243
2156/** 2244/**
2157 * ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller 2245 * ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller
@@ -2174,6 +2262,7 @@ void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc)
2174 2262
2175 spin_unlock_irqrestore(ap->lock, flags); 2263 spin_unlock_irqrestore(ap->lock, flags);
2176} 2264}
2265EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd);
2177 2266
2178/** 2267/**
2179 * ata_sff_port_start - Set port up for dma. 2268 * ata_sff_port_start - Set port up for dma.
@@ -2194,6 +2283,7 @@ int ata_sff_port_start(struct ata_port *ap)
2194 return ata_port_start(ap); 2283 return ata_port_start(ap);
2195 return 0; 2284 return 0;
2196} 2285}
2286EXPORT_SYMBOL_GPL(ata_sff_port_start);
2197 2287
2198/** 2288/**
2199 * ata_sff_std_ports - initialize ioaddr with standard port offsets. 2289 * ata_sff_std_ports - initialize ioaddr with standard port offsets.
@@ -2219,6 +2309,7 @@ void ata_sff_std_ports(struct ata_ioports *ioaddr)
2219 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; 2309 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
2220 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; 2310 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
2221} 2311}
2312EXPORT_SYMBOL_GPL(ata_sff_std_ports);
2222 2313
2223unsigned long ata_bmdma_mode_filter(struct ata_device *adev, 2314unsigned long ata_bmdma_mode_filter(struct ata_device *adev,
2224 unsigned long xfer_mask) 2315 unsigned long xfer_mask)
@@ -2230,6 +2321,7 @@ unsigned long ata_bmdma_mode_filter(struct ata_device *adev,
2230 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 2321 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2231 return xfer_mask; 2322 return xfer_mask;
2232} 2323}
2324EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter);
2233 2325
2234/** 2326/**
2235 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction 2327 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
@@ -2258,6 +2350,7 @@ void ata_bmdma_setup(struct ata_queued_cmd *qc)
2258 /* issue r/w command */ 2350 /* issue r/w command */
2259 ap->ops->sff_exec_command(ap, &qc->tf); 2351 ap->ops->sff_exec_command(ap, &qc->tf);
2260} 2352}
2353EXPORT_SYMBOL_GPL(ata_bmdma_setup);
2261 2354
2262/** 2355/**
2263 * ata_bmdma_start - Start a PCI IDE BMDMA transaction 2356 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
@@ -2290,6 +2383,7 @@ void ata_bmdma_start(struct ata_queued_cmd *qc)
2290 * unneccessarily delayed for MMIO 2383 * unneccessarily delayed for MMIO
2291 */ 2384 */
2292} 2385}
2386EXPORT_SYMBOL_GPL(ata_bmdma_start);
2293 2387
2294/** 2388/**
2295 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer 2389 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
@@ -2314,6 +2408,7 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc)
2314 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 2408 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
2315 ata_sff_dma_pause(ap); 2409 ata_sff_dma_pause(ap);
2316} 2410}
2411EXPORT_SYMBOL_GPL(ata_bmdma_stop);
2317 2412
2318/** 2413/**
2319 * ata_bmdma_status - Read PCI IDE BMDMA status 2414 * ata_bmdma_status - Read PCI IDE BMDMA status
@@ -2330,6 +2425,7 @@ u8 ata_bmdma_status(struct ata_port *ap)
2330{ 2425{
2331 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 2426 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
2332} 2427}
2428EXPORT_SYMBOL_GPL(ata_bmdma_status);
2333 2429
2334/** 2430/**
2335 * ata_bus_reset - reset host port and associated ATA channel 2431 * ata_bus_reset - reset host port and associated ATA channel
@@ -2422,6 +2518,7 @@ err_out:
2422 2518
2423 DPRINTK("EXIT\n"); 2519 DPRINTK("EXIT\n");
2424} 2520}
2521EXPORT_SYMBOL_GPL(ata_bus_reset);
2425 2522
2426#ifdef CONFIG_PCI 2523#ifdef CONFIG_PCI
2427 2524
@@ -2449,6 +2546,7 @@ int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
2449 return -EOPNOTSUPP; 2546 return -EOPNOTSUPP;
2450 return 0; 2547 return 0;
2451} 2548}
2549EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
2452 2550
2453/** 2551/**
2454 * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host 2552 * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
@@ -2501,11 +2599,12 @@ int ata_pci_bmdma_init(struct ata_host *host)
2501 host->flags |= ATA_HOST_SIMPLEX; 2599 host->flags |= ATA_HOST_SIMPLEX;
2502 2600
2503 ata_port_desc(ap, "bmdma 0x%llx", 2601 ata_port_desc(ap, "bmdma 0x%llx",
2504 (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); 2602 (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
2505 } 2603 }
2506 2604
2507 return 0; 2605 return 0;
2508} 2606}
2607EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
2509 2608
2510static int ata_resources_present(struct pci_dev *pdev, int port) 2609static int ata_resources_present(struct pci_dev *pdev, int port)
2511{ 2610{
@@ -2513,7 +2612,7 @@ static int ata_resources_present(struct pci_dev *pdev, int port)
2513 2612
2514 /* Check the PCI resources for this channel are enabled */ 2613 /* Check the PCI resources for this channel are enabled */
2515 port = port * 2; 2614 port = port * 2;
2516 for (i = 0; i < 2; i ++) { 2615 for (i = 0; i < 2; i++) {
2517 if (pci_resource_start(pdev, port + i) == 0 || 2616 if (pci_resource_start(pdev, port + i) == 0 ||
2518 pci_resource_len(pdev, port + i) == 0) 2617 pci_resource_len(pdev, port + i) == 0)
2519 return 0; 2618 return 0;
@@ -2598,6 +2697,7 @@ int ata_pci_sff_init_host(struct ata_host *host)
2598 2697
2599 return 0; 2698 return 0;
2600} 2699}
2700EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2601 2701
2602/** 2702/**
2603 * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host 2703 * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host
@@ -2615,7 +2715,7 @@ int ata_pci_sff_init_host(struct ata_host *host)
2615 * 0 on success, -errno otherwise. 2715 * 0 on success, -errno otherwise.
2616 */ 2716 */
2617int ata_pci_sff_prepare_host(struct pci_dev *pdev, 2717int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2618 const struct ata_port_info * const * ppi, 2718 const struct ata_port_info * const *ppi,
2619 struct ata_host **r_host) 2719 struct ata_host **r_host)
2620{ 2720{
2621 struct ata_host *host; 2721 struct ata_host *host;
@@ -2645,17 +2745,18 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2645 *r_host = host; 2745 *r_host = host;
2646 return 0; 2746 return 0;
2647 2747
2648 err_bmdma: 2748err_bmdma:
2649 /* This is necessary because PCI and iomap resources are 2749 /* This is necessary because PCI and iomap resources are
2650 * merged and releasing the top group won't release the 2750 * merged and releasing the top group won't release the
2651 * acquired resources if some of those have been acquired 2751 * acquired resources if some of those have been acquired
2652 * before entering this function. 2752 * before entering this function.
2653 */ 2753 */
2654 pcim_iounmap_regions(pdev, 0xf); 2754 pcim_iounmap_regions(pdev, 0xf);
2655 err_out: 2755err_out:
2656 devres_release_group(&pdev->dev, NULL); 2756 devres_release_group(&pdev->dev, NULL);
2657 return rc; 2757 return rc;
2658} 2758}
2759EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
2659 2760
2660/** 2761/**
2661 * ata_pci_sff_activate_host - start SFF host, request IRQ and register it 2762 * ata_pci_sff_activate_host - start SFF host, request IRQ and register it
@@ -2741,7 +2842,7 @@ int ata_pci_sff_activate_host(struct ata_host *host,
2741 } 2842 }
2742 2843
2743 rc = ata_host_register(host, sht); 2844 rc = ata_host_register(host, sht);
2744 out: 2845out:
2745 if (rc == 0) 2846 if (rc == 0)
2746 devres_remove_group(dev, NULL); 2847 devres_remove_group(dev, NULL);
2747 else 2848 else
@@ -2749,6 +2850,7 @@ int ata_pci_sff_activate_host(struct ata_host *host,
2749 2850
2750 return rc; 2851 return rc;
2751} 2852}
2853EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
2752 2854
2753/** 2855/**
2754 * ata_pci_sff_init_one - Initialize/register PCI IDE host controller 2856 * ata_pci_sff_init_one - Initialize/register PCI IDE host controller
@@ -2776,7 +2878,7 @@ int ata_pci_sff_activate_host(struct ata_host *host,
2776 * Zero on success, negative on errno-based value on error. 2878 * Zero on success, negative on errno-based value on error.
2777 */ 2879 */
2778int ata_pci_sff_init_one(struct pci_dev *pdev, 2880int ata_pci_sff_init_one(struct pci_dev *pdev,
2779 const struct ata_port_info * const * ppi, 2881 const struct ata_port_info * const *ppi,
2780 struct scsi_host_template *sht, void *host_priv) 2882 struct scsi_host_template *sht, void *host_priv)
2781{ 2883{
2782 struct device *dev = &pdev->dev; 2884 struct device *dev = &pdev->dev;
@@ -2815,7 +2917,7 @@ int ata_pci_sff_init_one(struct pci_dev *pdev,
2815 2917
2816 pci_set_master(pdev); 2918 pci_set_master(pdev);
2817 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); 2919 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
2818 out: 2920out:
2819 if (rc == 0) 2921 if (rc == 0)
2820 devres_remove_group(&pdev->dev, NULL); 2922 devres_remove_group(&pdev->dev, NULL);
2821 else 2923 else
@@ -2823,54 +2925,7 @@ int ata_pci_sff_init_one(struct pci_dev *pdev,
2823 2925
2824 return rc; 2926 return rc;
2825} 2927}
2928EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
2826 2929
2827#endif /* CONFIG_PCI */ 2930#endif /* CONFIG_PCI */
2828 2931
2829EXPORT_SYMBOL_GPL(ata_sff_port_ops);
2830EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
2831EXPORT_SYMBOL_GPL(ata_sff_qc_prep);
2832EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep);
2833EXPORT_SYMBOL_GPL(ata_sff_dev_select);
2834EXPORT_SYMBOL_GPL(ata_sff_check_status);
2835EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
2836EXPORT_SYMBOL_GPL(ata_sff_pause);
2837EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
2838EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
2839EXPORT_SYMBOL_GPL(ata_sff_tf_load);
2840EXPORT_SYMBOL_GPL(ata_sff_tf_read);
2841EXPORT_SYMBOL_GPL(ata_sff_exec_command);
2842EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
2843EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq);
2844EXPORT_SYMBOL_GPL(ata_sff_irq_on);
2845EXPORT_SYMBOL_GPL(ata_sff_irq_clear);
2846EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
2847EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
2848EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
2849EXPORT_SYMBOL_GPL(ata_sff_host_intr);
2850EXPORT_SYMBOL_GPL(ata_sff_interrupt);
2851EXPORT_SYMBOL_GPL(ata_sff_freeze);
2852EXPORT_SYMBOL_GPL(ata_sff_thaw);
2853EXPORT_SYMBOL_GPL(ata_sff_prereset);
2854EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
2855EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
2856EXPORT_SYMBOL_GPL(ata_sff_softreset);
2857EXPORT_SYMBOL_GPL(sata_sff_hardreset);
2858EXPORT_SYMBOL_GPL(ata_sff_postreset);
2859EXPORT_SYMBOL_GPL(ata_sff_error_handler);
2860EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd);
2861EXPORT_SYMBOL_GPL(ata_sff_port_start);
2862EXPORT_SYMBOL_GPL(ata_sff_std_ports);
2863EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter);
2864EXPORT_SYMBOL_GPL(ata_bmdma_setup);
2865EXPORT_SYMBOL_GPL(ata_bmdma_start);
2866EXPORT_SYMBOL_GPL(ata_bmdma_stop);
2867EXPORT_SYMBOL_GPL(ata_bmdma_status);
2868EXPORT_SYMBOL_GPL(ata_bus_reset);
2869#ifdef CONFIG_PCI
2870EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
2871EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
2872EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2873EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
2874EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
2875EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
2876#endif /* CONFIG_PCI */
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index e2e332d8ff95..8b77a9802df1 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -13,12 +13,6 @@
13#include <linux/device.h> 13#include <linux/device.h>
14#include <scsi/scsi_host.h> 14#include <scsi/scsi_host.h>
15#include <acpi/acpi_bus.h> 15#include <acpi/acpi_bus.h>
16#include <acpi/acnames.h>
17#include <acpi/acnamesp.h>
18#include <acpi/acparser.h>
19#include <acpi/acexcep.h>
20#include <acpi/acmacros.h>
21#include <acpi/actypes.h>
22 16
23#include <linux/libata.h> 17#include <linux/libata.h>
24#include <linux/ata.h> 18#include <linux/ata.h>
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 73c466e452ca..a7999c19f0c9 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -19,7 +19,9 @@
19 * 19 *
20 * TODO/CHECK 20 * TODO/CHECK
21 * Cannot have ATAPI on both master & slave for rev < c2 (???) but 21 * Cannot have ATAPI on both master & slave for rev < c2 (???) but
22 * otherwise should do atapi DMA. 22 * otherwise should do atapi DMA (For now for old we do PIO only for
23 * ATAPI)
24 * Review Sunblade workaround.
23 */ 25 */
24 26
25#include <linux/kernel.h> 27#include <linux/kernel.h>
@@ -33,12 +35,14 @@
33#include <linux/dmi.h> 35#include <linux/dmi.h>
34 36
35#define DRV_NAME "pata_ali" 37#define DRV_NAME "pata_ali"
36#define DRV_VERSION "0.7.5" 38#define DRV_VERSION "0.7.8"
37 39
38static int ali_atapi_dma = 0; 40static int ali_atapi_dma = 0;
39module_param_named(atapi_dma, ali_atapi_dma, int, 0644); 41module_param_named(atapi_dma, ali_atapi_dma, int, 0644);
40MODULE_PARM_DESC(atapi_dma, "Enable ATAPI DMA (0=disable, 1=enable)"); 42MODULE_PARM_DESC(atapi_dma, "Enable ATAPI DMA (0=disable, 1=enable)");
41 43
44static struct pci_dev *isa_bridge;
45
42/* 46/*
43 * Cable special cases 47 * Cable special cases
44 */ 48 */
@@ -147,8 +151,7 @@ static void ali_fifo_control(struct ata_port *ap, struct ata_device *adev, int o
147 151
148 pci_read_config_byte(pdev, pio_fifo, &fifo); 152 pci_read_config_byte(pdev, pio_fifo, &fifo);
149 fifo &= ~(0x0F << shift); 153 fifo &= ~(0x0F << shift);
150 if (on) 154 fifo |= (on << shift);
151 fifo |= (on << shift);
152 pci_write_config_byte(pdev, pio_fifo, fifo); 155 pci_write_config_byte(pdev, pio_fifo, fifo);
153} 156}
154 157
@@ -337,6 +340,23 @@ static int ali_check_atapi_dma(struct ata_queued_cmd *qc)
337 return 0; 340 return 0;
338} 341}
339 342
343static void ali_c2_c3_postreset(struct ata_link *link, unsigned int *classes)
344{
345 u8 r;
346 int port_bit = 4 << link->ap->port_no;
347
348 /* If our bridge is an ALI 1533 then do the extra work */
349 if (isa_bridge) {
350 /* Tristate and re-enable the bus signals */
351 pci_read_config_byte(isa_bridge, 0x58, &r);
352 r &= ~port_bit;
353 pci_write_config_byte(isa_bridge, 0x58, r);
354 r |= port_bit;
355 pci_write_config_byte(isa_bridge, 0x58, r);
356 }
357 ata_sff_postreset(link, classes);
358}
359
340static struct scsi_host_template ali_sht = { 360static struct scsi_host_template ali_sht = {
341 ATA_BMDMA_SHT(DRV_NAME), 361 ATA_BMDMA_SHT(DRV_NAME),
342}; 362};
@@ -349,10 +369,11 @@ static struct ata_port_operations ali_early_port_ops = {
349 .inherits = &ata_sff_port_ops, 369 .inherits = &ata_sff_port_ops,
350 .cable_detect = ata_cable_40wire, 370 .cable_detect = ata_cable_40wire,
351 .set_piomode = ali_set_piomode, 371 .set_piomode = ali_set_piomode,
372 .sff_data_xfer = ata_sff_data_xfer32,
352}; 373};
353 374
354static const struct ata_port_operations ali_dma_base_ops = { 375static const struct ata_port_operations ali_dma_base_ops = {
355 .inherits = &ata_bmdma_port_ops, 376 .inherits = &ata_bmdma32_port_ops,
356 .set_piomode = ali_set_piomode, 377 .set_piomode = ali_set_piomode,
357 .set_dmamode = ali_set_dmamode, 378 .set_dmamode = ali_set_dmamode,
358}; 379};
@@ -377,6 +398,17 @@ static struct ata_port_operations ali_c2_port_ops = {
377 .check_atapi_dma = ali_check_atapi_dma, 398 .check_atapi_dma = ali_check_atapi_dma,
378 .cable_detect = ali_c2_cable_detect, 399 .cable_detect = ali_c2_cable_detect,
379 .dev_config = ali_lock_sectors, 400 .dev_config = ali_lock_sectors,
401 .postreset = ali_c2_c3_postreset,
402};
403
404/*
405 * Port operations for DMA capable ALi with cable detect
406 */
407static struct ata_port_operations ali_c4_port_ops = {
408 .inherits = &ali_dma_base_ops,
409 .check_atapi_dma = ali_check_atapi_dma,
410 .cable_detect = ali_c2_cable_detect,
411 .dev_config = ali_lock_sectors,
380}; 412};
381 413
382/* 414/*
@@ -401,52 +433,49 @@ static struct ata_port_operations ali_c5_port_ops = {
401static void ali_init_chipset(struct pci_dev *pdev) 433static void ali_init_chipset(struct pci_dev *pdev)
402{ 434{
403 u8 tmp; 435 u8 tmp;
404 struct pci_dev *north, *isa_bridge; 436 struct pci_dev *north;
405 437
406 /* 438 /*
407 * The chipset revision selects the driver operations and 439 * The chipset revision selects the driver operations and
408 * mode data. 440 * mode data.
409 */ 441 */
410 442
411 if (pdev->revision >= 0x20 && pdev->revision < 0xC2) { 443 if (pdev->revision <= 0x20) {
412 /* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */ 444 pci_read_config_byte(pdev, 0x53, &tmp);
413 pci_read_config_byte(pdev, 0x4B, &tmp); 445 tmp |= 0x03;
414 /* Clear CD-ROM DMA write bit */ 446 pci_write_config_byte(pdev, 0x53, tmp);
415 tmp &= 0x7F; 447 } else {
416 pci_write_config_byte(pdev, 0x4B, tmp); 448 pci_read_config_byte(pdev, 0x4a, &tmp);
417 } else if (pdev->revision >= 0xC2) { 449 pci_write_config_byte(pdev, 0x4a, tmp | 0x20);
418 /* Enable cable detection logic */
419 pci_read_config_byte(pdev, 0x4B, &tmp); 450 pci_read_config_byte(pdev, 0x4B, &tmp);
420 pci_write_config_byte(pdev, 0x4B, tmp | 0x08); 451 if (pdev->revision < 0xC2)
421 } 452 /* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */
422 north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); 453 /* Clear CD-ROM DMA write bit */
423 isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); 454 tmp &= 0x7F;
424 455 /* Cable and UDMA */
425 if (north && north->vendor == PCI_VENDOR_ID_AL && isa_bridge) { 456 pci_write_config_byte(pdev, 0x4B, tmp | 0x09);
426 /* Configure the ALi bridge logic. For non ALi rely on BIOS.
427 Set the south bridge enable bit */
428 pci_read_config_byte(isa_bridge, 0x79, &tmp);
429 if (pdev->revision == 0xC2)
430 pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04);
431 else if (pdev->revision > 0xC2 && pdev->revision < 0xC5)
432 pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02);
433 }
434 if (pdev->revision >= 0x20) {
435 /* 457 /*
436 * CD_ROM DMA on (0x53 bit 0). Enable this even if we want 458 * CD_ROM DMA on (0x53 bit 0). Enable this even if we want
437 * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control 459 * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
438 * via 0x54/55. 460 * via 0x54/55.
439 */ 461 */
440 pci_read_config_byte(pdev, 0x53, &tmp); 462 pci_read_config_byte(pdev, 0x53, &tmp);
441 if (pdev->revision <= 0x20)
442 tmp &= ~0x02;
443 if (pdev->revision >= 0xc7) 463 if (pdev->revision >= 0xc7)
444 tmp |= 0x03; 464 tmp |= 0x03;
445 else 465 else
446 tmp |= 0x01; /* CD_ROM enable for DMA */ 466 tmp |= 0x01; /* CD_ROM enable for DMA */
447 pci_write_config_byte(pdev, 0x53, tmp); 467 pci_write_config_byte(pdev, 0x53, tmp);
448 } 468 }
449 pci_dev_put(isa_bridge); 469 north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
470 if (north && north->vendor == PCI_VENDOR_ID_AL && isa_bridge) {
471 /* Configure the ALi bridge logic. For non ALi rely on BIOS.
472 Set the south bridge enable bit */
473 pci_read_config_byte(isa_bridge, 0x79, &tmp);
474 if (pdev->revision == 0xC2)
475 pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04);
476 else if (pdev->revision > 0xC2 && pdev->revision < 0xC5)
477 pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02);
478 }
450 pci_dev_put(north); 479 pci_dev_put(north);
451 ata_pci_bmdma_clear_simplex(pdev); 480 ata_pci_bmdma_clear_simplex(pdev);
452} 481}
@@ -503,7 +532,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
503 .pio_mask = 0x1f, 532 .pio_mask = 0x1f,
504 .mwdma_mask = 0x07, 533 .mwdma_mask = 0x07,
505 .udma_mask = ATA_UDMA5, 534 .udma_mask = ATA_UDMA5,
506 .port_ops = &ali_c2_port_ops 535 .port_ops = &ali_c4_port_ops
507 }; 536 };
508 /* Revision 0xC5 is UDMA133 with LBA48 DMA */ 537 /* Revision 0xC5 is UDMA133 with LBA48 DMA */
509 static const struct ata_port_info info_c5 = { 538 static const struct ata_port_info info_c5 = {
@@ -516,7 +545,6 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
516 545
517 const struct ata_port_info *ppi[] = { NULL, NULL }; 546 const struct ata_port_info *ppi[] = { NULL, NULL };
518 u8 tmp; 547 u8 tmp;
519 struct pci_dev *isa_bridge;
520 int rc; 548 int rc;
521 549
522 rc = pcim_enable_device(pdev); 550 rc = pcim_enable_device(pdev);
@@ -543,14 +571,12 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
543 571
544 ali_init_chipset(pdev); 572 ali_init_chipset(pdev);
545 573
546 isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
547 if (isa_bridge && pdev->revision >= 0x20 && pdev->revision < 0xC2) { 574 if (isa_bridge && pdev->revision >= 0x20 && pdev->revision < 0xC2) {
548 /* Are we paired with a UDMA capable chip */ 575 /* Are we paired with a UDMA capable chip */
549 pci_read_config_byte(isa_bridge, 0x5E, &tmp); 576 pci_read_config_byte(isa_bridge, 0x5E, &tmp);
550 if ((tmp & 0x1E) == 0x12) 577 if ((tmp & 0x1E) == 0x12)
551 ppi[0] = &info_20_udma; 578 ppi[0] = &info_20_udma;
552 } 579 }
553 pci_dev_put(isa_bridge);
554 580
555 return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL); 581 return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL);
556} 582}
@@ -590,13 +616,20 @@ static struct pci_driver ali_pci_driver = {
590 616
591static int __init ali_init(void) 617static int __init ali_init(void)
592{ 618{
593 return pci_register_driver(&ali_pci_driver); 619 int ret;
620 isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
621
622 ret = pci_register_driver(&ali_pci_driver);
623 if (ret < 0)
624 pci_dev_put(isa_bridge);
625 return ret;
594} 626}
595 627
596 628
597static void __exit ali_exit(void) 629static void __exit ali_exit(void)
598{ 630{
599 pci_unregister_driver(&ali_pci_driver); 631 pci_unregister_driver(&ali_pci_driver);
632 pci_dev_put(isa_bridge);
600} 633}
601 634
602 635
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 0ec9c7d9fe9d..63719ab9ea44 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -24,7 +24,7 @@
24#include <linux/libata.h> 24#include <linux/libata.h>
25 25
26#define DRV_NAME "pata_amd" 26#define DRV_NAME "pata_amd"
27#define DRV_VERSION "0.3.10" 27#define DRV_VERSION "0.3.11"
28 28
29/** 29/**
30 * timing_setup - shared timing computation and load 30 * timing_setup - shared timing computation and load
@@ -345,7 +345,7 @@ static struct scsi_host_template amd_sht = {
345}; 345};
346 346
347static const struct ata_port_operations amd_base_port_ops = { 347static const struct ata_port_operations amd_base_port_ops = {
348 .inherits = &ata_bmdma_port_ops, 348 .inherits = &ata_bmdma32_port_ops,
349 .prereset = amd_pre_reset, 349 .prereset = amd_pre_reset,
350}; 350};
351 351
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index e0c4f05d7d57..65c28e5a6cd7 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -30,7 +30,7 @@
30#define DRV_VERSION "0.6.2" 30#define DRV_VERSION "0.6.2"
31 31
32struct hpt_clock { 32struct hpt_clock {
33 u8 xfer_speed; 33 u8 xfer_mode;
34 u32 timing; 34 u32 timing;
35}; 35};
36 36
@@ -189,28 +189,6 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask)
189 return ata_bmdma_mode_filter(adev, mask); 189 return ata_bmdma_mode_filter(adev, mask);
190} 190}
191 191
192/**
193 * hpt36x_find_mode - reset the hpt36x bus
194 * @ap: ATA port
195 * @speed: transfer mode
196 *
197 * Return the 32bit register programming information for this channel
198 * that matches the speed provided.
199 */
200
201static u32 hpt36x_find_mode(struct ata_port *ap, int speed)
202{
203 struct hpt_clock *clocks = ap->host->private_data;
204
205 while(clocks->xfer_speed) {
206 if (clocks->xfer_speed == speed)
207 return clocks->timing;
208 clocks++;
209 }
210 BUG();
211 return 0xffffffffU; /* silence compiler warning */
212}
213
214static int hpt36x_cable_detect(struct ata_port *ap) 192static int hpt36x_cable_detect(struct ata_port *ap)
215{ 193{
216 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 194 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
@@ -226,25 +204,16 @@ static int hpt36x_cable_detect(struct ata_port *ap)
226 return ATA_CBL_PATA80; 204 return ATA_CBL_PATA80;
227} 205}
228 206
229/** 207static void hpt366_set_mode(struct ata_port *ap, struct ata_device *adev,
230 * hpt366_set_piomode - PIO setup 208 u8 mode)
231 * @ap: ATA interface
232 * @adev: device on the interface
233 *
234 * Perform PIO mode setup.
235 */
236
237static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev)
238{ 209{
210 struct hpt_clock *clocks = ap->host->private_data;
239 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 211 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
240 u32 addr1, addr2; 212 u32 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
241 u32 reg; 213 u32 addr2 = 0x51 + 4 * ap->port_no;
242 u32 mode; 214 u32 mask, reg;
243 u8 fast; 215 u8 fast;
244 216
245 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
246 addr2 = 0x51 + 4 * ap->port_no;
247
248 /* Fast interrupt prediction disable, hold off interrupt disable */ 217 /* Fast interrupt prediction disable, hold off interrupt disable */
249 pci_read_config_byte(pdev, addr2, &fast); 218 pci_read_config_byte(pdev, addr2, &fast);
250 if (fast & 0x80) { 219 if (fast & 0x80) {
@@ -252,12 +221,43 @@ static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev)
252 pci_write_config_byte(pdev, addr2, fast); 221 pci_write_config_byte(pdev, addr2, fast);
253 } 222 }
254 223
224 /* determine timing mask and find matching clock entry */
225 if (mode < XFER_MW_DMA_0)
226 mask = 0xc1f8ffff;
227 else if (mode < XFER_UDMA_0)
228 mask = 0x303800ff;
229 else
230 mask = 0x30070000;
231
232 while (clocks->xfer_mode) {
233 if (clocks->xfer_mode == mode)
234 break;
235 clocks++;
236 }
237 if (!clocks->xfer_mode)
238 BUG();
239
240 /*
241 * Combine new mode bits with old config bits and disable
242 * on-chip PIO FIFO/buffer (and PIO MST mode as well) to avoid
243 * problems handling I/O errors later.
244 */
255 pci_read_config_dword(pdev, addr1, &reg); 245 pci_read_config_dword(pdev, addr1, &reg);
256 mode = hpt36x_find_mode(ap, adev->pio_mode); 246 reg = ((reg & ~mask) | (clocks->timing & mask)) & ~0xc0000000;
257 mode &= ~0x8000000; /* No FIFO in PIO */ 247 pci_write_config_dword(pdev, addr1, reg);
258 mode &= ~0x30070000; /* Leave config bits alone */ 248}
259 reg &= 0x30070000; /* Strip timing bits */ 249
260 pci_write_config_dword(pdev, addr1, reg | mode); 250/**
251 * hpt366_set_piomode - PIO setup
252 * @ap: ATA interface
253 * @adev: device on the interface
254 *
255 * Perform PIO mode setup.
256 */
257
258static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev)
259{
260 hpt366_set_mode(ap, adev, adev->pio_mode);
261} 261}
262 262
263/** 263/**
@@ -271,28 +271,7 @@ static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev)
271 271
272static void hpt366_set_dmamode(struct ata_port *ap, struct ata_device *adev) 272static void hpt366_set_dmamode(struct ata_port *ap, struct ata_device *adev)
273{ 273{
274 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 274 hpt366_set_mode(ap, adev, adev->dma_mode);
275 u32 addr1, addr2;
276 u32 reg;
277 u32 mode;
278 u8 fast;
279
280 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
281 addr2 = 0x51 + 4 * ap->port_no;
282
283 /* Fast interrupt prediction disable, hold off interrupt disable */
284 pci_read_config_byte(pdev, addr2, &fast);
285 if (fast & 0x80) {
286 fast &= ~0x80;
287 pci_write_config_byte(pdev, addr2, fast);
288 }
289
290 pci_read_config_dword(pdev, addr1, &reg);
291 mode = hpt36x_find_mode(ap, adev->dma_mode);
292 mode |= 0x8000000; /* FIFO in MWDMA or UDMA */
293 mode &= ~0xC0000000; /* Leave config bits alone */
294 reg &= 0xC0000000; /* Strip timing bits */
295 pci_write_config_dword(pdev, addr1, reg | mode);
296} 275}
297 276
298static struct scsi_host_template hpt36x_sht = { 277static struct scsi_host_template hpt36x_sht = {
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index f11a320337c0..f19cc645881a 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -23,7 +23,7 @@
23#include <linux/libata.h> 23#include <linux/libata.h>
24 24
25#define DRV_NAME "pata_hpt3x3" 25#define DRV_NAME "pata_hpt3x3"
26#define DRV_VERSION "0.5.3" 26#define DRV_VERSION "0.6.1"
27 27
28/** 28/**
29 * hpt3x3_set_piomode - PIO setup 29 * hpt3x3_set_piomode - PIO setup
@@ -80,14 +80,48 @@ static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev)
80 r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */ 80 r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */
81 81
82 if (adev->dma_mode >= XFER_UDMA_0) 82 if (adev->dma_mode >= XFER_UDMA_0)
83 r2 |= (0x10 << dn); /* Ultra mode */ 83 r2 |= (0x01 << dn); /* Ultra mode */
84 else 84 else
85 r2 |= (0x01 << dn); /* MWDMA */ 85 r2 |= (0x10 << dn); /* MWDMA */
86 86
87 pci_write_config_dword(pdev, 0x44, r1); 87 pci_write_config_dword(pdev, 0x44, r1);
88 pci_write_config_dword(pdev, 0x48, r2); 88 pci_write_config_dword(pdev, 0x48, r2);
89} 89}
90#endif /* CONFIG_PATA_HPT3X3_DMA */ 90
91/**
92 * hpt3x3_freeze - DMA workaround
93 * @ap: port to freeze
94 *
95 * When freezing an HPT3x3 we must stop any pending DMA before
96 * writing to the control register or the chip will hang
97 */
98
99static void hpt3x3_freeze(struct ata_port *ap)
100{
101 void __iomem *mmio = ap->ioaddr.bmdma_addr;
102
103 iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ ATA_DMA_START,
104 mmio + ATA_DMA_CMD);
105 ata_sff_dma_pause(ap);
106 ata_sff_freeze(ap);
107}
108
109/**
110 * hpt3x3_bmdma_setup - DMA workaround
111 * @qc: Queued command
112 *
113 * When issuing BMDMA we must clean up the error/active bits in
114 * software on this device
115 */
116
117static void hpt3x3_bmdma_setup(struct ata_queued_cmd *qc)
118{
119 struct ata_port *ap = qc->ap;
120 u8 r = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
121 r |= ATA_DMA_INTR | ATA_DMA_ERR;
122 iowrite8(r, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
123 return ata_bmdma_setup(qc);
124}
91 125
92/** 126/**
93 * hpt3x3_atapi_dma - ATAPI DMA check 127 * hpt3x3_atapi_dma - ATAPI DMA check
@@ -101,18 +135,23 @@ static int hpt3x3_atapi_dma(struct ata_queued_cmd *qc)
101 return 1; 135 return 1;
102} 136}
103 137
138#endif /* CONFIG_PATA_HPT3X3_DMA */
139
104static struct scsi_host_template hpt3x3_sht = { 140static struct scsi_host_template hpt3x3_sht = {
105 ATA_BMDMA_SHT(DRV_NAME), 141 ATA_BMDMA_SHT(DRV_NAME),
106}; 142};
107 143
108static struct ata_port_operations hpt3x3_port_ops = { 144static struct ata_port_operations hpt3x3_port_ops = {
109 .inherits = &ata_bmdma_port_ops, 145 .inherits = &ata_bmdma_port_ops,
110 .check_atapi_dma= hpt3x3_atapi_dma,
111 .cable_detect = ata_cable_40wire, 146 .cable_detect = ata_cable_40wire,
112 .set_piomode = hpt3x3_set_piomode, 147 .set_piomode = hpt3x3_set_piomode,
113#if defined(CONFIG_PATA_HPT3X3_DMA) 148#if defined(CONFIG_PATA_HPT3X3_DMA)
114 .set_dmamode = hpt3x3_set_dmamode, 149 .set_dmamode = hpt3x3_set_dmamode,
150 .bmdma_setup = hpt3x3_bmdma_setup,
151 .check_atapi_dma= hpt3x3_atapi_dma,
152 .freeze = hpt3x3_freeze,
115#endif 153#endif
154
116}; 155};
117 156
118/** 157/**
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index 7c8faa48b5f3..aa576cac4d17 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -35,7 +35,7 @@
35#include <linux/libata.h> 35#include <linux/libata.h>
36 36
37#define DRV_NAME "pata_mpiix" 37#define DRV_NAME "pata_mpiix"
38#define DRV_VERSION "0.7.6" 38#define DRV_VERSION "0.7.7"
39 39
40enum { 40enum {
41 IDETIM = 0x6C, /* IDE control register */ 41 IDETIM = 0x6C, /* IDE control register */
@@ -146,6 +146,7 @@ static struct ata_port_operations mpiix_port_ops = {
146 .cable_detect = ata_cable_40wire, 146 .cable_detect = ata_cable_40wire,
147 .set_piomode = mpiix_set_piomode, 147 .set_piomode = mpiix_set_piomode,
148 .prereset = mpiix_pre_reset, 148 .prereset = mpiix_pre_reset,
149 .sff_data_xfer = ata_sff_data_xfer32,
149}; 150};
150 151
151static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id) 152static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 6afa07a37648..d8d743af3225 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -186,7 +186,7 @@ EXPORT_SYMBOL_GPL(__pata_platform_probe);
186 * A platform bus ATA device has been unplugged. Perform the needed 186 * A platform bus ATA device has been unplugged. Perform the needed
187 * cleanup. Also called on module unload for any active devices. 187 * cleanup. Also called on module unload for any active devices.
188 */ 188 */
189int __devexit __pata_platform_remove(struct device *dev) 189int __pata_platform_remove(struct device *dev)
190{ 190{
191 struct ata_host *host = dev_get_drvdata(dev); 191 struct ata_host *host = dev_get_drvdata(dev);
192 192
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 83580a59db58..9e764e5747e6 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -32,7 +32,7 @@
32#include <linux/libata.h> 32#include <linux/libata.h>
33 33
34#define DRV_NAME "pata_sil680" 34#define DRV_NAME "pata_sil680"
35#define DRV_VERSION "0.4.8" 35#define DRV_VERSION "0.4.9"
36 36
37#define SIL680_MMIO_BAR 5 37#define SIL680_MMIO_BAR 5
38 38
@@ -195,7 +195,7 @@ static struct scsi_host_template sil680_sht = {
195}; 195};
196 196
197static struct ata_port_operations sil680_port_ops = { 197static struct ata_port_operations sil680_port_ops = {
198 .inherits = &ata_bmdma_port_ops, 198 .inherits = &ata_bmdma32_port_ops,
199 .cable_detect = sil680_cable_detect, 199 .cable_detect = sil680_cable_detect,
200 .set_piomode = sil680_set_piomode, 200 .set_piomode = sil680_set_piomode,
201 .set_dmamode = sil680_set_dmamode, 201 .set_dmamode = sil680_set_dmamode,
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index ccee930f1e12..2590c2279fa7 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -51,13 +51,6 @@ struct sil24_sge {
51 __le32 flags; 51 __le32 flags;
52}; 52};
53 53
54/*
55 * Port multiplier
56 */
57struct sil24_port_multiplier {
58 __le32 diag;
59 __le32 sactive;
60};
61 54
62enum { 55enum {
63 SIL24_HOST_BAR = 0, 56 SIL24_HOST_BAR = 0,
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 088885ed51b9..e1c7611e9144 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -64,7 +64,7 @@
64#include <linux/jiffies.h> 64#include <linux/jiffies.h>
65#include "iphase.h" 65#include "iphase.h"
66#include "suni.h" 66#include "suni.h"
67#define swap(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8)) 67#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
68 68
69#define PRIV(dev) ((struct suni_priv *) dev->phy_data) 69#define PRIV(dev) ((struct suni_priv *) dev->phy_data)
70 70
@@ -1306,7 +1306,7 @@ static void rx_dle_intr(struct atm_dev *dev)
1306 // get real pkt length pwang_test 1306 // get real pkt length pwang_test
1307 trailer = (struct cpcs_trailer*)((u_char *)skb->data + 1307 trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1308 skb->len - sizeof(*trailer)); 1308 skb->len - sizeof(*trailer));
1309 length = swap(trailer->length); 1309 length = swap_byte_order(trailer->length);
1310 if ((length > iadev->rx_buf_sz) || (length > 1310 if ((length > iadev->rx_buf_sz) || (length >
1311 (skb->len - sizeof(struct cpcs_trailer)))) 1311 (skb->len - sizeof(struct cpcs_trailer))))
1312 { 1312 {
@@ -2995,7 +2995,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2995 skb->len, PCI_DMA_TODEVICE); 2995 skb->len, PCI_DMA_TODEVICE);
2996 wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | 2996 wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
2997 buf_desc_ptr->buf_start_lo; 2997 buf_desc_ptr->buf_start_lo;
2998 /* wr_ptr->bytes = swap(total_len); didn't seem to affect ?? */ 2998 /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
2999 wr_ptr->bytes = skb->len; 2999 wr_ptr->bytes = skb->len;
3000 3000
3001 /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */ 3001 /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 35914b6e1d2a..f5be8081cd81 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -616,6 +616,7 @@ config HVC_ISERIES
616 default y 616 default y
617 select HVC_DRIVER 617 select HVC_DRIVER
618 select HVC_IRQ 618 select HVC_IRQ
619 select VIOPATH
619 help 620 help
620 iSeries machines support a hypervisor virtual console. 621 iSeries machines support a hypervisor virtual console.
621 622
diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
index 91cdb35a9204..0afc8b82212e 100644
--- a/drivers/char/hvc_beat.c
+++ b/drivers/char/hvc_beat.c
@@ -44,7 +44,7 @@ static int hvc_beat_get_chars(uint32_t vtermno, char *buf, int cnt)
44 static unsigned char q[sizeof(unsigned long) * 2] 44 static unsigned char q[sizeof(unsigned long) * 2]
45 __attribute__((aligned(sizeof(unsigned long)))); 45 __attribute__((aligned(sizeof(unsigned long))));
46 static int qlen = 0; 46 static int qlen = 0;
47 unsigned long got; 47 u64 got;
48 48
49again: 49again:
50 if (qlen) { 50 if (qlen) {
@@ -63,7 +63,7 @@ again:
63 } 63 }
64 } 64 }
65 if (beat_get_term_char(vtermno, &got, 65 if (beat_get_term_char(vtermno, &got,
66 ((unsigned long *)q), ((unsigned long *)q) + 1) == 0) { 66 ((u64 *)q), ((u64 *)q) + 1) == 0) {
67 qlen = got; 67 qlen = got;
68 goto again; 68 goto again;
69 } 69 }
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index 5ea7d7713fca..a53496828b76 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -1,26 +1,30 @@
1/* 1/*
2 * hvc_iucv.c - z/VM IUCV back-end for the Hypervisor Console (HVC) 2 * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver
3 * 3 *
4 * This back-end for HVC provides terminal access via 4 * This HVC device driver provides terminal access using
5 * z/VM IUCV communication paths. 5 * z/VM IUCV communication paths.
6 * 6 *
7 * Copyright IBM Corp. 2008. 7 * Copyright IBM Corp. 2008
8 * 8 *
9 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 9 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
10 */ 10 */
11#define KMSG_COMPONENT "hvc_iucv" 11#define KMSG_COMPONENT "hvc_iucv"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 13
13#include <linux/types.h> 14#include <linux/types.h>
14#include <asm/ebcdic.h> 15#include <asm/ebcdic.h>
16#include <linux/delay.h>
17#include <linux/init.h>
15#include <linux/mempool.h> 18#include <linux/mempool.h>
16#include <linux/module.h> 19#include <linux/module.h>
17#include <linux/tty.h> 20#include <linux/tty.h>
21#include <linux/wait.h>
18#include <net/iucv/iucv.h> 22#include <net/iucv/iucv.h>
19 23
20#include "hvc_console.h" 24#include "hvc_console.h"
21 25
22 26
23/* HVC backend for z/VM IUCV */ 27/* General device driver settings */
24#define HVC_IUCV_MAGIC 0xc9e4c3e5 28#define HVC_IUCV_MAGIC 0xc9e4c3e5
25#define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS 29#define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
26#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4) 30#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
@@ -33,14 +37,14 @@
33#define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */ 37#define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */
34#define MSG_TYPE_DATA 0x10 /* Terminal data */ 38#define MSG_TYPE_DATA 0x10 /* Terminal data */
35 39
36#define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
37struct iucv_tty_msg { 40struct iucv_tty_msg {
38 u8 version; /* Message version */ 41 u8 version; /* Message version */
39 u8 type; /* Message type */ 42 u8 type; /* Message type */
40#define MSG_MAX_DATALEN (~(u16)0) 43#define MSG_MAX_DATALEN ((u16)(~0))
41 u16 datalen; /* Payload length */ 44 u16 datalen; /* Payload length */
42 u8 data[]; /* Payload buffer */ 45 u8 data[]; /* Payload buffer */
43} __attribute__((packed)); 46} __attribute__((packed));
47#define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
44 48
45enum iucv_state_t { 49enum iucv_state_t {
46 IUCV_DISCONN = 0, 50 IUCV_DISCONN = 0,
@@ -54,19 +58,26 @@ enum tty_state_t {
54}; 58};
55 59
56struct hvc_iucv_private { 60struct hvc_iucv_private {
57 struct hvc_struct *hvc; /* HVC console struct reference */ 61 struct hvc_struct *hvc; /* HVC struct reference */
58 u8 srv_name[8]; /* IUCV service name (ebcdic) */ 62 u8 srv_name[8]; /* IUCV service name (ebcdic) */
63 unsigned char is_console; /* Linux console usage flag */
59 enum iucv_state_t iucv_state; /* IUCV connection status */ 64 enum iucv_state_t iucv_state; /* IUCV connection status */
60 enum tty_state_t tty_state; /* TTY status */ 65 enum tty_state_t tty_state; /* TTY status */
61 struct iucv_path *path; /* IUCV path pointer */ 66 struct iucv_path *path; /* IUCV path pointer */
62 spinlock_t lock; /* hvc_iucv_private lock */ 67 spinlock_t lock; /* hvc_iucv_private lock */
68#define SNDBUF_SIZE (PAGE_SIZE) /* must be < MSG_MAX_DATALEN */
69 void *sndbuf; /* send buffer */
70 size_t sndbuf_len; /* length of send buffer */
71#define QUEUE_SNDBUF_DELAY (HZ / 25)
72 struct delayed_work sndbuf_work; /* work: send iucv msg(s) */
73 wait_queue_head_t sndbuf_waitq; /* wait for send completion */
63 struct list_head tty_outqueue; /* outgoing IUCV messages */ 74 struct list_head tty_outqueue; /* outgoing IUCV messages */
64 struct list_head tty_inqueue; /* incoming IUCV messages */ 75 struct list_head tty_inqueue; /* incoming IUCV messages */
65}; 76};
66 77
67struct iucv_tty_buffer { 78struct iucv_tty_buffer {
68 struct list_head list; /* list pointer */ 79 struct list_head list; /* list pointer */
69 struct iucv_message msg; /* store an incoming IUCV message */ 80 struct iucv_message msg; /* store an IUCV message */
70 size_t offset; /* data buffer offset */ 81 size_t offset; /* data buffer offset */
71 struct iucv_tty_msg *mbuf; /* buffer to store input/output data */ 82 struct iucv_tty_msg *mbuf; /* buffer to store input/output data */
72}; 83};
@@ -78,11 +89,12 @@ static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
78static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *); 89static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
79 90
80 91
81/* Kernel module parameters */ 92/* Kernel module parameter: use one terminal device as default */
82static unsigned long hvc_iucv_devices; 93static unsigned long hvc_iucv_devices = 1;
83 94
84/* Array of allocated hvc iucv tty lines... */ 95/* Array of allocated hvc iucv tty lines... */
85static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES]; 96static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
97#define IUCV_HVC_CON_IDX (0)
86 98
87/* Kmem cache and mempool for iucv_tty_buffer elements */ 99/* Kmem cache and mempool for iucv_tty_buffer elements */
88static struct kmem_cache *hvc_iucv_buffer_cache; 100static struct kmem_cache *hvc_iucv_buffer_cache;
@@ -112,7 +124,7 @@ struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
112} 124}
113 125
114/** 126/**
115 * alloc_tty_buffer() - Returns a new struct iucv_tty_buffer element. 127 * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
116 * @size: Size of the internal buffer used to store data. 128 * @size: Size of the internal buffer used to store data.
117 * @flags: Memory allocation flags passed to mempool. 129 * @flags: Memory allocation flags passed to mempool.
118 * 130 *
@@ -120,7 +132,6 @@ struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
120 * allocates an internal data buffer with the specified size @size. 132 * allocates an internal data buffer with the specified size @size.
121 * Note: The total message size arises from the internal buffer size and the 133 * Note: The total message size arises from the internal buffer size and the
122 * members of the iucv_tty_msg structure. 134 * members of the iucv_tty_msg structure.
123 *
124 * The function returns NULL if memory allocation has failed. 135 * The function returns NULL if memory allocation has failed.
125 */ 136 */
126static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags) 137static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
@@ -130,7 +141,7 @@ static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
130 bufp = mempool_alloc(hvc_iucv_mempool, flags); 141 bufp = mempool_alloc(hvc_iucv_mempool, flags);
131 if (!bufp) 142 if (!bufp)
132 return NULL; 143 return NULL;
133 memset(bufp, 0, sizeof(struct iucv_tty_buffer)); 144 memset(bufp, 0, sizeof(*bufp));
134 145
135 if (size > 0) { 146 if (size > 0) {
136 bufp->msg.length = MSG_SIZE(size); 147 bufp->msg.length = MSG_SIZE(size);
@@ -149,9 +160,6 @@ static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
149/** 160/**
150 * destroy_tty_buffer() - destroy struct iucv_tty_buffer element. 161 * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
151 * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL. 162 * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
152 *
153 * The destroy_tty_buffer() function frees the internal data buffer and returns
154 * the struct iucv_tty_buffer element back to the mempool for freeing.
155 */ 163 */
156static void destroy_tty_buffer(struct iucv_tty_buffer *bufp) 164static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
157{ 165{
@@ -161,11 +169,7 @@ static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
161 169
162/** 170/**
163 * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element. 171 * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
164 * @list: List head pointer to a list containing struct iucv_tty_buffer 172 * @list: List containing struct iucv_tty_buffer elements.
165 * elements.
166 *
167 * Calls destroy_tty_buffer() for each struct iucv_tty_buffer element in the
168 * list @list.
169 */ 173 */
170static void destroy_tty_buffer_list(struct list_head *list) 174static void destroy_tty_buffer_list(struct list_head *list)
171{ 175{
@@ -178,24 +182,24 @@ static void destroy_tty_buffer_list(struct list_head *list)
178} 182}
179 183
180/** 184/**
181 * hvc_iucv_write() - Receive IUCV message write data to HVC console buffer. 185 * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
182 * @priv: Pointer to hvc_iucv_private structure. 186 * @priv: Pointer to struct hvc_iucv_private
183 * @buf: HVC console buffer for writing received terminal data. 187 * @buf: HVC buffer for writing received terminal data.
184 * @count: HVC console buffer size. 188 * @count: HVC buffer size.
185 * @has_more_data: Pointer to an int variable. 189 * @has_more_data: Pointer to an int variable.
186 * 190 *
187 * The function picks up pending messages from the input queue and receives 191 * The function picks up pending messages from the input queue and receives
188 * the message data that is then written to the specified buffer @buf. 192 * the message data that is then written to the specified buffer @buf.
189 * If the buffer size @count is less than the data message size, then the 193 * If the buffer size @count is less than the data message size, the
190 * message is kept on the input queue and @has_more_data is set to 1. 194 * message is kept on the input queue and @has_more_data is set to 1.
191 * If the message data has been entirely written, the message is removed from 195 * If all message data has been written, the message is removed from
192 * the input queue. 196 * the input queue.
193 * 197 *
194 * The function returns the number of bytes written to the terminal, zero if 198 * The function returns the number of bytes written to the terminal, zero if
195 * there are no pending data messages available or if there is no established 199 * there are no pending data messages available or if there is no established
196 * IUCV path. 200 * IUCV path.
197 * If the IUCV path has been severed, then -EPIPE is returned to cause a 201 * If the IUCV path has been severed, then -EPIPE is returned to cause a
198 * hang up (that is issued by the HVC console layer). 202 * hang up (that is issued by the HVC layer).
199 */ 203 */
200static int hvc_iucv_write(struct hvc_iucv_private *priv, 204static int hvc_iucv_write(struct hvc_iucv_private *priv,
201 char *buf, int count, int *has_more_data) 205 char *buf, int count, int *has_more_data)
@@ -204,12 +208,12 @@ static int hvc_iucv_write(struct hvc_iucv_private *priv,
204 int written; 208 int written;
205 int rc; 209 int rc;
206 210
207 /* Immediately return if there is no IUCV connection */ 211 /* immediately return if there is no IUCV connection */
208 if (priv->iucv_state == IUCV_DISCONN) 212 if (priv->iucv_state == IUCV_DISCONN)
209 return 0; 213 return 0;
210 214
211 /* If the IUCV path has been severed, return -EPIPE to inform the 215 /* if the IUCV path has been severed, return -EPIPE to inform the
212 * hvc console layer to hang up the tty device. */ 216 * HVC layer to hang up the tty device. */
213 if (priv->iucv_state == IUCV_SEVERED) 217 if (priv->iucv_state == IUCV_SEVERED)
214 return -EPIPE; 218 return -EPIPE;
215 219
@@ -217,7 +221,7 @@ static int hvc_iucv_write(struct hvc_iucv_private *priv,
217 if (list_empty(&priv->tty_inqueue)) 221 if (list_empty(&priv->tty_inqueue))
218 return 0; 222 return 0;
219 223
220 /* receive a iucv message and flip data to the tty (ldisc) */ 224 /* receive an iucv message and flip data to the tty (ldisc) */
221 rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list); 225 rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
222 226
223 written = 0; 227 written = 0;
@@ -260,7 +264,7 @@ static int hvc_iucv_write(struct hvc_iucv_private *priv,
260 case MSG_TYPE_WINSIZE: 264 case MSG_TYPE_WINSIZE:
261 if (rb->mbuf->datalen != sizeof(struct winsize)) 265 if (rb->mbuf->datalen != sizeof(struct winsize))
262 break; 266 break;
263 hvc_resize(priv->hvc, *((struct winsize *)rb->mbuf->data)); 267 hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
264 break; 268 break;
265 269
266 case MSG_TYPE_ERROR: /* ignored ... */ 270 case MSG_TYPE_ERROR: /* ignored ... */
@@ -284,10 +288,9 @@ out_written:
284 * @buf: Pointer to a buffer to store data 288 * @buf: Pointer to a buffer to store data
285 * @count: Size of buffer available for writing 289 * @count: Size of buffer available for writing
286 * 290 *
287 * The hvc_console thread calls this method to read characters from 291 * The HVC thread calls this method to read characters from the back-end.
288 * the terminal backend. If an IUCV communication path has been established, 292 * If an IUCV communication path has been established, pending IUCV messages
289 * pending IUCV messages are received and data is copied into buffer @buf 293 * are received and data is copied into buffer @buf up to @count bytes.
290 * up to @count bytes.
291 * 294 *
292 * Locking: The routine gets called under an irqsave() spinlock; and 295 * Locking: The routine gets called under an irqsave() spinlock; and
293 * the routine locks the struct hvc_iucv_private->lock to call 296 * the routine locks the struct hvc_iucv_private->lock to call
@@ -318,66 +321,122 @@ static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
318} 321}
319 322
320/** 323/**
321 * hvc_iucv_send() - Send an IUCV message containing terminal data. 324 * hvc_iucv_queue() - Buffer terminal data for sending.
322 * @priv: Pointer to struct hvc_iucv_private instance. 325 * @priv: Pointer to struct hvc_iucv_private instance.
323 * @buf: Buffer containing data to send. 326 * @buf: Buffer containing data to send.
324 * @size: Size of buffer and amount of data to send. 327 * @count: Size of buffer and amount of data to send.
328 *
329 * The function queues data for sending. To actually send the buffered data,
330 * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
331 * The function returns the number of data bytes that has been buffered.
325 * 332 *
326 * If an IUCV communication path is established, the function copies the buffer 333 * If the device is not connected, data is ignored and the function returns
327 * data to a newly allocated struct iucv_tty_buffer element, sends the data and 334 * @count.
328 * puts the element to the outqueue. 335 * If the buffer is full, the function returns 0.
336 * If an existing IUCV communicaton path has been severed, -EPIPE is returned
337 * (that can be passed to HVC layer to cause a tty hangup).
338 */
339static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
340 int count)
341{
342 size_t len;
343
344 if (priv->iucv_state == IUCV_DISCONN)
345 return count; /* ignore data */
346
347 if (priv->iucv_state == IUCV_SEVERED)
348 return -EPIPE;
349
350 len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
351 if (!len)
352 return 0;
353
354 memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
355 priv->sndbuf_len += len;
356
357 if (priv->iucv_state == IUCV_CONNECTED)
358 schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
359
360 return len;
361}
362
363/**
364 * hvc_iucv_send() - Send an IUCV message containing terminal data.
365 * @priv: Pointer to struct hvc_iucv_private instance.
329 * 366 *
330 * If there is no IUCV communication path established, the function returns 0. 367 * If an IUCV communication path has been established, the buffered output data
331 * If an existing IUCV communicaton path has been severed, the function returns 368 * is sent via an IUCV message and the number of bytes sent is returned.
332 * -EPIPE (can be passed to HVC layer to cause a tty hangup). 369 * Returns 0 if there is no established IUCV communication path or
370 * -EPIPE if an existing IUCV communicaton path has been severed.
333 */ 371 */
334static int hvc_iucv_send(struct hvc_iucv_private *priv, const char *buf, 372static int hvc_iucv_send(struct hvc_iucv_private *priv)
335 int count)
336{ 373{
337 struct iucv_tty_buffer *sb; 374 struct iucv_tty_buffer *sb;
338 int rc; 375 int rc, len;
339 u16 len;
340 376
341 if (priv->iucv_state == IUCV_SEVERED) 377 if (priv->iucv_state == IUCV_SEVERED)
342 return -EPIPE; 378 return -EPIPE;
343 379
344 if (priv->iucv_state == IUCV_DISCONN) 380 if (priv->iucv_state == IUCV_DISCONN)
345 return 0; 381 return -EIO;
346 382
347 len = min_t(u16, MSG_MAX_DATALEN, count); 383 if (!priv->sndbuf_len)
384 return 0;
348 385
349 /* allocate internal buffer to store msg data and also compute total 386 /* allocate internal buffer to store msg data and also compute total
350 * message length */ 387 * message length */
351 sb = alloc_tty_buffer(len, GFP_ATOMIC); 388 sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
352 if (!sb) 389 if (!sb)
353 return -ENOMEM; 390 return -ENOMEM;
354 391
355 sb->mbuf->datalen = len; 392 memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
356 memcpy(sb->mbuf->data, buf, len); 393 sb->mbuf->datalen = (u16) priv->sndbuf_len;
394 sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
357 395
358 list_add_tail(&sb->list, &priv->tty_outqueue); 396 list_add_tail(&sb->list, &priv->tty_outqueue);
359 397
360 rc = __iucv_message_send(priv->path, &sb->msg, 0, 0, 398 rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
361 (void *) sb->mbuf, sb->msg.length); 399 (void *) sb->mbuf, sb->msg.length);
362 if (rc) { 400 if (rc) {
401 /* drop the message here; however we might want to handle
402 * 0x03 (msg limit reached) by trying again... */
363 list_del(&sb->list); 403 list_del(&sb->list);
364 destroy_tty_buffer(sb); 404 destroy_tty_buffer(sb);
365 len = 0;
366 } 405 }
406 len = priv->sndbuf_len;
407 priv->sndbuf_len = 0;
367 408
368 return len; 409 return len;
369} 410}
370 411
371/** 412/**
413 * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
414 * @work: Work structure.
415 *
416 * This work queue function sends buffered output data over IUCV and,
417 * if not all buffered data could be sent, reschedules itself.
418 */
419static void hvc_iucv_sndbuf_work(struct work_struct *work)
420{
421 struct hvc_iucv_private *priv;
422
423 priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
424 if (!priv)
425 return;
426
427 spin_lock_bh(&priv->lock);
428 hvc_iucv_send(priv);
429 spin_unlock_bh(&priv->lock);
430}
431
432/**
372 * hvc_iucv_put_chars() - HVC put_chars operation. 433 * hvc_iucv_put_chars() - HVC put_chars operation.
373 * @vtermno: HVC virtual terminal number. 434 * @vtermno: HVC virtual terminal number.
374 * @buf: Pointer to an buffer to read data from 435 * @buf: Pointer to an buffer to read data from
375 * @count: Size of buffer available for reading 436 * @count: Size of buffer available for reading
376 * 437 *
377 * The hvc_console thread calls this method to write characters from 438 * The HVC thread calls this method to write characters to the back-end.
378 * to the terminal backend. 439 * The function calls hvc_iucv_queue() to queue terminal data for sending.
379 * The function calls hvc_iucv_send() under the lock of the
380 * struct hvc_iucv_private instance that corresponds to the tty @vtermno.
381 * 440 *
382 * Locking: The method gets called under an irqsave() spinlock; and 441 * Locking: The method gets called under an irqsave() spinlock; and
383 * locks struct hvc_iucv_private->lock. 442 * locks struct hvc_iucv_private->lock.
@@ -385,7 +444,7 @@ static int hvc_iucv_send(struct hvc_iucv_private *priv, const char *buf,
385static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count) 444static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
386{ 445{
387 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno); 446 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
388 int sent; 447 int queued;
389 448
390 if (count <= 0) 449 if (count <= 0)
391 return 0; 450 return 0;
@@ -394,10 +453,10 @@ static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
394 return -ENODEV; 453 return -ENODEV;
395 454
396 spin_lock(&priv->lock); 455 spin_lock(&priv->lock);
397 sent = hvc_iucv_send(priv, buf, count); 456 queued = hvc_iucv_queue(priv, buf, count);
398 spin_unlock(&priv->lock); 457 spin_unlock(&priv->lock);
399 458
400 return sent; 459 return queued;
401} 460}
402 461
403/** 462/**
@@ -406,7 +465,7 @@ static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
406 * @id: Additional data (originally passed to hvc_alloc): the index of an struct 465 * @id: Additional data (originally passed to hvc_alloc): the index of an struct
407 * hvc_iucv_private instance. 466 * hvc_iucv_private instance.
408 * 467 *
409 * The function sets the tty state to TTY_OPEN for the struct hvc_iucv_private 468 * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
410 * instance that is derived from @id. Always returns 0. 469 * instance that is derived from @id. Always returns 0.
411 * 470 *
412 * Locking: struct hvc_iucv_private->lock, spin_lock_bh 471 * Locking: struct hvc_iucv_private->lock, spin_lock_bh
@@ -427,12 +486,8 @@ static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
427} 486}
428 487
429/** 488/**
430 * hvc_iucv_cleanup() - Clean up function if the tty portion is finally closed. 489 * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
431 * @priv: Pointer to the struct hvc_iucv_private instance. 490 * @priv: Pointer to the struct hvc_iucv_private instance.
432 *
433 * The functions severs the established IUCV communication path (if any), and
434 * destroy struct iucv_tty_buffer elements from the in- and outqueue. Finally,
435 * the functions resets the states to TTY_CLOSED and IUCV_DISCONN.
436 */ 491 */
437static void hvc_iucv_cleanup(struct hvc_iucv_private *priv) 492static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
438{ 493{
@@ -441,25 +496,62 @@ static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
441 496
442 priv->tty_state = TTY_CLOSED; 497 priv->tty_state = TTY_CLOSED;
443 priv->iucv_state = IUCV_DISCONN; 498 priv->iucv_state = IUCV_DISCONN;
499
500 priv->sndbuf_len = 0;
444} 501}
445 502
446/** 503/**
447 * hvc_iucv_notifier_hangup() - HVC notifier for tty hangups. 504 * tty_outqueue_empty() - Test if the tty outq is empty
448 * @hp: Pointer to the HVC device (struct hvc_struct) 505 * @priv: Pointer to struct hvc_iucv_private instance.
449 * @id: Additional data (originally passed to hvc_alloc): the index of an struct 506 */
450 * hvc_iucv_private instance. 507static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
508{
509 int rc;
510
511 spin_lock_bh(&priv->lock);
512 rc = list_empty(&priv->tty_outqueue);
513 spin_unlock_bh(&priv->lock);
514
515 return rc;
516}
517
518/**
519 * flush_sndbuf_sync() - Flush send buffer and wait for completion
520 * @priv: Pointer to struct hvc_iucv_private instance.
451 * 521 *
452 * This routine notifies the HVC backend that a tty hangup (carrier loss, 522 * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
453 * virtual or otherwise) has occured. 523 * to flush any buffered terminal output data and waits for completion.
524 */
525static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
526{
527 int sync_wait;
528
529 cancel_delayed_work_sync(&priv->sndbuf_work);
530
531 spin_lock_bh(&priv->lock);
532 hvc_iucv_send(priv); /* force sending buffered data */
533 sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
534 spin_unlock_bh(&priv->lock);
535
536 if (sync_wait)
537 wait_event_timeout(priv->sndbuf_waitq,
538 tty_outqueue_empty(priv), HZ);
539}
540
541/**
542 * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
543 * @hp: Pointer to the HVC device (struct hvc_struct)
544 * @id: Additional data (originally passed to hvc_alloc):
545 * the index of an struct hvc_iucv_private instance.
454 * 546 *
455 * The HVC backend for z/VM IUCV ignores virtual hangups (vhangup()), to keep 547 * This routine notifies the HVC back-end that a tty hangup (carrier loss,
456 * an existing IUCV communication path established. 548 * virtual or otherwise) has occured.
549 * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
550 * to keep an existing IUCV communication path established.
457 * (Background: vhangup() is called from user space (by getty or login) to 551 * (Background: vhangup() is called from user space (by getty or login) to
458 * disable writing to the tty by other applications). 552 * disable writing to the tty by other applications).
459 * 553 * If the tty has been opened and an established IUCV path has been severed
460 * If the tty has been opened (e.g. getty) and an established IUCV path has been 554 * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
461 * severed (we caused the tty hangup in that case), then the functions invokes
462 * hvc_iucv_cleanup() to clean up.
463 * 555 *
464 * Locking: struct hvc_iucv_private->lock 556 * Locking: struct hvc_iucv_private->lock
465 */ 557 */
@@ -471,12 +563,12 @@ static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
471 if (!priv) 563 if (!priv)
472 return; 564 return;
473 565
566 flush_sndbuf_sync(priv);
567
474 spin_lock_bh(&priv->lock); 568 spin_lock_bh(&priv->lock);
475 /* NOTE: If the hangup was scheduled by ourself (from the iucv 569 /* NOTE: If the hangup was scheduled by ourself (from the iucv
476 * path_servered callback [IUCV_SEVERED]), then we have to 570 * path_servered callback [IUCV_SEVERED]), we have to clean up
477 * finally clean up the tty backend structure and set state to 571 * our structure and to set state to TTY_CLOSED.
478 * TTY_CLOSED.
479 *
480 * If the tty was hung up otherwise (e.g. vhangup()), then we 572 * If the tty was hung up otherwise (e.g. vhangup()), then we
481 * ignore this hangup and keep an established IUCV path open... 573 * ignore this hangup and keep an established IUCV path open...
482 * (...the reason is that we are not able to connect back to the 574 * (...the reason is that we are not able to connect back to the
@@ -494,10 +586,9 @@ static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
494 * @id: Additional data (originally passed to hvc_alloc): 586 * @id: Additional data (originally passed to hvc_alloc):
495 * the index of an struct hvc_iucv_private instance. 587 * the index of an struct hvc_iucv_private instance.
496 * 588 *
497 * This routine notifies the HVC backend that the last tty device file 589 * This routine notifies the HVC back-end that the last tty device fd has been
498 * descriptor has been closed. 590 * closed. The function calls hvc_iucv_cleanup() to clean up the struct
499 * The function calls hvc_iucv_cleanup() to clean up the struct hvc_iucv_private 591 * hvc_iucv_private instance.
500 * instance.
501 * 592 *
502 * Locking: struct hvc_iucv_private->lock 593 * Locking: struct hvc_iucv_private->lock
503 */ 594 */
@@ -510,6 +601,8 @@ static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
510 if (!priv) 601 if (!priv)
511 return; 602 return;
512 603
604 flush_sndbuf_sync(priv);
605
513 spin_lock_bh(&priv->lock); 606 spin_lock_bh(&priv->lock);
514 path = priv->path; /* save reference to IUCV path */ 607 path = priv->path; /* save reference to IUCV path */
515 priv->path = NULL; 608 priv->path = NULL;
@@ -527,20 +620,18 @@ static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
527/** 620/**
528 * hvc_iucv_path_pending() - IUCV handler to process a connection request. 621 * hvc_iucv_path_pending() - IUCV handler to process a connection request.
529 * @path: Pending path (struct iucv_path) 622 * @path: Pending path (struct iucv_path)
530 * @ipvmid: Originator z/VM system identifier 623 * @ipvmid: z/VM system identifier of originator
531 * @ipuser: User specified data for this path 624 * @ipuser: User specified data for this path
532 * (AF_IUCV: port/service name and originator port) 625 * (AF_IUCV: port/service name and originator port)
533 * 626 *
534 * The function uses the @ipuser data to check to determine if the pending 627 * The function uses the @ipuser data to determine if the pending path belongs
535 * path belongs to a terminal managed by this HVC backend. 628 * to a terminal managed by this device driver.
536 * If the check is successful, then an additional check is done to ensure 629 * If the path belongs to this driver, ensure that the terminal is not accessed
537 * that a terminal cannot be accessed multiple times (only one connection 630 * multiple times (only one connection to a terminal is allowed).
538 * to a terminal is allowed). In that particular case, the pending path is 631 * If the terminal is not yet connected, the pending path is accepted and is
539 * severed. If it is the first connection, the pending path is accepted and 632 * associated to the appropriate struct hvc_iucv_private instance.
540 * associated to the struct hvc_iucv_private. The iucv state is updated to
541 * reflect that a communication path has been established.
542 * 633 *
543 * Returns 0 if the path belongs to a terminal managed by the this HVC backend; 634 * Returns 0 if @path belongs to a terminal managed by the this device driver;
544 * otherwise returns -ENODEV in order to dispatch this path to other handlers. 635 * otherwise returns -ENODEV in order to dispatch this path to other handlers.
545 * 636 *
546 * Locking: struct hvc_iucv_private->lock 637 * Locking: struct hvc_iucv_private->lock
@@ -559,7 +650,6 @@ static int hvc_iucv_path_pending(struct iucv_path *path,
559 priv = hvc_iucv_table[i]; 650 priv = hvc_iucv_table[i];
560 break; 651 break;
561 } 652 }
562
563 if (!priv) 653 if (!priv)
564 return -ENODEV; 654 return -ENODEV;
565 655
@@ -588,6 +678,9 @@ static int hvc_iucv_path_pending(struct iucv_path *path,
588 priv->path = path; 678 priv->path = path;
589 priv->iucv_state = IUCV_CONNECTED; 679 priv->iucv_state = IUCV_CONNECTED;
590 680
681 /* flush buffered output data... */
682 schedule_delayed_work(&priv->sndbuf_work, 5);
683
591out_path_handled: 684out_path_handled:
592 spin_unlock(&priv->lock); 685 spin_unlock(&priv->lock);
593 return 0; 686 return 0;
@@ -603,8 +696,7 @@ out_path_handled:
603 * sets the iucv state to IUCV_SEVERED for the associated struct 696 * sets the iucv state to IUCV_SEVERED for the associated struct
604 * hvc_iucv_private instance. Later, the IUCV_SEVERED state triggers a tty 697 * hvc_iucv_private instance. Later, the IUCV_SEVERED state triggers a tty
605 * hangup (hvc_iucv_get_chars() / hvc_iucv_write()). 698 * hangup (hvc_iucv_get_chars() / hvc_iucv_write()).
606 * 699 * If tty portion of the HVC is closed, clean up the outqueue.
607 * If tty portion of the HVC is closed then clean up the outqueue in addition.
608 * 700 *
609 * Locking: struct hvc_iucv_private->lock 701 * Locking: struct hvc_iucv_private->lock
610 */ 702 */
@@ -615,15 +707,25 @@ static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
615 spin_lock(&priv->lock); 707 spin_lock(&priv->lock);
616 priv->iucv_state = IUCV_SEVERED; 708 priv->iucv_state = IUCV_SEVERED;
617 709
618 /* NOTE: If the tty has not yet been opened by a getty program 710 /* If the tty has not yet been opened, clean up the hvc_iucv_private
619 * (e.g. to see console messages), then cleanup the 711 * structure to allow re-connects.
620 * hvc_iucv_private structure to allow re-connects. 712 * This is also done for our console device because console hangups
713 * are handled specially and no notifier is called by HVC.
714 * The tty session is active (TTY_OPEN) and ready for re-connects...
621 * 715 *
622 * If the tty has been opened, the get_chars() callback returns 716 * If it has been opened, let get_chars() return -EPIPE to signal the
623 * -EPIPE to signal the hvc console layer to hang up the tty. */ 717 * HVC layer to hang up the tty.
718 * If so, we need to wake up the HVC thread to call get_chars()...
719 */
624 priv->path = NULL; 720 priv->path = NULL;
625 if (priv->tty_state == TTY_CLOSED) 721 if (priv->tty_state == TTY_CLOSED)
626 hvc_iucv_cleanup(priv); 722 hvc_iucv_cleanup(priv);
723 else
724 if (priv->is_console) {
725 hvc_iucv_cleanup(priv);
726 priv->tty_state = TTY_OPENED;
727 } else
728 hvc_kick();
627 spin_unlock(&priv->lock); 729 spin_unlock(&priv->lock);
628 730
629 /* finally sever path (outside of priv->lock due to lock ordering) */ 731 /* finally sever path (outside of priv->lock due to lock ordering) */
@@ -636,9 +738,9 @@ static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
636 * @path: Pending path (struct iucv_path) 738 * @path: Pending path (struct iucv_path)
637 * @msg: Pointer to the IUCV message 739 * @msg: Pointer to the IUCV message
638 * 740 *
639 * The function stores an incoming message on the input queue for later 741 * The function puts an incoming message on the input queue for later
640 * processing (by hvc_iucv_get_chars() / hvc_iucv_write()). 742 * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
641 * However, if the tty has not yet been opened, the message is rejected. 743 * If the tty has not yet been opened, the message is rejected.
642 * 744 *
643 * Locking: struct hvc_iucv_private->lock 745 * Locking: struct hvc_iucv_private->lock
644 */ 746 */
@@ -648,6 +750,12 @@ static void hvc_iucv_msg_pending(struct iucv_path *path,
648 struct hvc_iucv_private *priv = path->private; 750 struct hvc_iucv_private *priv = path->private;
649 struct iucv_tty_buffer *rb; 751 struct iucv_tty_buffer *rb;
650 752
753 /* reject messages that exceed max size of iucv_tty_msg->datalen */
754 if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
755 iucv_message_reject(path, msg);
756 return;
757 }
758
651 spin_lock(&priv->lock); 759 spin_lock(&priv->lock);
652 760
653 /* reject messages if tty has not yet been opened */ 761 /* reject messages if tty has not yet been opened */
@@ -656,7 +764,7 @@ static void hvc_iucv_msg_pending(struct iucv_path *path,
656 goto unlock_return; 764 goto unlock_return;
657 } 765 }
658 766
659 /* allocate buffer an empty buffer element */ 767 /* allocate tty buffer to save iucv msg only */
660 rb = alloc_tty_buffer(0, GFP_ATOMIC); 768 rb = alloc_tty_buffer(0, GFP_ATOMIC);
661 if (!rb) { 769 if (!rb) {
662 iucv_message_reject(path, msg); 770 iucv_message_reject(path, msg);
@@ -666,7 +774,7 @@ static void hvc_iucv_msg_pending(struct iucv_path *path,
666 774
667 list_add_tail(&rb->list, &priv->tty_inqueue); 775 list_add_tail(&rb->list, &priv->tty_inqueue);
668 776
669 hvc_kick(); /* wakup hvc console thread */ 777 hvc_kick(); /* wake up hvc thread */
670 778
671unlock_return: 779unlock_return:
672 spin_unlock(&priv->lock); 780 spin_unlock(&priv->lock);
@@ -677,10 +785,10 @@ unlock_return:
677 * @path: Pending path (struct iucv_path) 785 * @path: Pending path (struct iucv_path)
678 * @msg: Pointer to the IUCV message 786 * @msg: Pointer to the IUCV message
679 * 787 *
680 * The function is called upon completion of message delivery and the 788 * The function is called upon completion of message delivery to remove the
681 * message is removed from the outqueue. Additional delivery information 789 * message from the outqueue. Additional delivery information can be found
682 * can be found in msg->audit: rejected messages (0x040000 (IPADRJCT)) and 790 * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
683 * purged messages (0x010000 (IPADPGNR)). 791 * purged messages (0x010000 (IPADPGNR)).
684 * 792 *
685 * Locking: struct hvc_iucv_private->lock 793 * Locking: struct hvc_iucv_private->lock
686 */ 794 */
@@ -697,6 +805,7 @@ static void hvc_iucv_msg_complete(struct iucv_path *path,
697 list_move(&ent->list, &list_remove); 805 list_move(&ent->list, &list_remove);
698 break; 806 break;
699 } 807 }
808 wake_up(&priv->sndbuf_waitq);
700 spin_unlock(&priv->lock); 809 spin_unlock(&priv->lock);
701 destroy_tty_buffer_list(&list_remove); 810 destroy_tty_buffer_list(&list_remove);
702} 811}
@@ -713,13 +822,14 @@ static struct hv_ops hvc_iucv_ops = {
713 822
714/** 823/**
715 * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance 824 * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
716 * @id: hvc_iucv_table index 825 * @id: hvc_iucv_table index
826 * @is_console: Flag if the instance is used as Linux console
717 * 827 *
718 * This function allocates a new hvc_iucv_private struct and put the 828 * This function allocates a new hvc_iucv_private structure and stores
719 * instance into hvc_iucv_table at index @id. 829 * the instance in hvc_iucv_table at index @id.
720 * Returns 0 on success; otherwise non-zero. 830 * Returns 0 on success; otherwise non-zero.
721 */ 831 */
722static int __init hvc_iucv_alloc(int id) 832static int __init hvc_iucv_alloc(int id, unsigned int is_console)
723{ 833{
724 struct hvc_iucv_private *priv; 834 struct hvc_iucv_private *priv;
725 char name[9]; 835 char name[9];
@@ -732,18 +842,33 @@ static int __init hvc_iucv_alloc(int id)
732 spin_lock_init(&priv->lock); 842 spin_lock_init(&priv->lock);
733 INIT_LIST_HEAD(&priv->tty_outqueue); 843 INIT_LIST_HEAD(&priv->tty_outqueue);
734 INIT_LIST_HEAD(&priv->tty_inqueue); 844 INIT_LIST_HEAD(&priv->tty_inqueue);
845 INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
846 init_waitqueue_head(&priv->sndbuf_waitq);
847
848 priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
849 if (!priv->sndbuf) {
850 kfree(priv);
851 return -ENOMEM;
852 }
853
854 /* set console flag */
855 priv->is_console = is_console;
735 856
736 /* Finally allocate hvc */ 857 /* finally allocate hvc */
737 priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, 858 priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */
738 HVC_IUCV_MAGIC + id, &hvc_iucv_ops, PAGE_SIZE); 859 HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
739 if (IS_ERR(priv->hvc)) { 860 if (IS_ERR(priv->hvc)) {
740 rc = PTR_ERR(priv->hvc); 861 rc = PTR_ERR(priv->hvc);
862 free_page((unsigned long) priv->sndbuf);
741 kfree(priv); 863 kfree(priv);
742 return rc; 864 return rc;
743 } 865 }
744 866
867 /* notify HVC thread instead of using polling */
868 priv->hvc->irq_requested = 1;
869
745 /* setup iucv related information */ 870 /* setup iucv related information */
746 snprintf(name, 9, "ihvc%-4d", id); 871 snprintf(name, 9, "lnxhvc%-2d", id);
747 memcpy(priv->srv_name, name, 8); 872 memcpy(priv->srv_name, name, 8);
748 ASCEBC(priv->srv_name, 8); 873 ASCEBC(priv->srv_name, 8);
749 874
@@ -752,15 +877,16 @@ static int __init hvc_iucv_alloc(int id)
752} 877}
753 878
754/** 879/**
755 * hvc_iucv_init() - Initialization of HVC backend for z/VM IUCV 880 * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
756 */ 881 */
757static int __init hvc_iucv_init(void) 882static int __init hvc_iucv_init(void)
758{ 883{
759 int rc, i; 884 int rc;
885 unsigned int i;
760 886
761 if (!MACHINE_IS_VM) { 887 if (!MACHINE_IS_VM) {
762 pr_warning("The z/VM IUCV Hypervisor console cannot be " 888 pr_info("The z/VM IUCV HVC device driver cannot "
763 "used without z/VM.\n"); 889 "be used without z/VM\n");
764 return -ENODEV; 890 return -ENODEV;
765 } 891 }
766 892
@@ -774,26 +900,33 @@ static int __init hvc_iucv_init(void)
774 sizeof(struct iucv_tty_buffer), 900 sizeof(struct iucv_tty_buffer),
775 0, 0, NULL); 901 0, 0, NULL);
776 if (!hvc_iucv_buffer_cache) { 902 if (!hvc_iucv_buffer_cache) {
777 pr_err("Not enough memory for driver initialization " 903 pr_err("Allocating memory failed with reason code=%d\n", 1);
778 "(rs=%d).\n", 1);
779 return -ENOMEM; 904 return -ENOMEM;
780 } 905 }
781 906
782 hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR, 907 hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
783 hvc_iucv_buffer_cache); 908 hvc_iucv_buffer_cache);
784 if (!hvc_iucv_mempool) { 909 if (!hvc_iucv_mempool) {
785 pr_err("Not enough memory for driver initialization " 910 pr_err("Allocating memory failed with reason code=%d\n", 2);
786 "(rs=%d).\n", 2);
787 kmem_cache_destroy(hvc_iucv_buffer_cache); 911 kmem_cache_destroy(hvc_iucv_buffer_cache);
788 return -ENOMEM; 912 return -ENOMEM;
789 } 913 }
790 914
915 /* register the first terminal device as console
916 * (must be done before allocating hvc terminal devices) */
917 rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
918 if (rc) {
919 pr_err("Registering HVC terminal device as "
920 "Linux console failed\n");
921 goto out_error_memory;
922 }
923
791 /* allocate hvc_iucv_private structs */ 924 /* allocate hvc_iucv_private structs */
792 for (i = 0; i < hvc_iucv_devices; i++) { 925 for (i = 0; i < hvc_iucv_devices; i++) {
793 rc = hvc_iucv_alloc(i); 926 rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
794 if (rc) { 927 if (rc) {
795 pr_err("Could not create new z/VM IUCV HVC backend " 928 pr_err("Creating a new HVC terminal device "
796 "rc=%d.\n", rc); 929 "failed with error code=%d\n", rc);
797 goto out_error_hvc; 930 goto out_error_hvc;
798 } 931 }
799 } 932 }
@@ -801,7 +934,8 @@ static int __init hvc_iucv_init(void)
801 /* register IUCV callback handler */ 934 /* register IUCV callback handler */
802 rc = iucv_register(&hvc_iucv_handler, 0); 935 rc = iucv_register(&hvc_iucv_handler, 0);
803 if (rc) { 936 if (rc) {
804 pr_err("Could not register iucv handler (rc=%d).\n", rc); 937 pr_err("Registering IUCV handlers failed with error code=%d\n",
938 rc);
805 goto out_error_iucv; 939 goto out_error_iucv;
806 } 940 }
807 941
@@ -816,22 +950,13 @@ out_error_hvc:
816 hvc_remove(hvc_iucv_table[i]->hvc); 950 hvc_remove(hvc_iucv_table[i]->hvc);
817 kfree(hvc_iucv_table[i]); 951 kfree(hvc_iucv_table[i]);
818 } 952 }
953out_error_memory:
819 mempool_destroy(hvc_iucv_mempool); 954 mempool_destroy(hvc_iucv_mempool);
820 kmem_cache_destroy(hvc_iucv_buffer_cache); 955 kmem_cache_destroy(hvc_iucv_buffer_cache);
821 return rc; 956 return rc;
822} 957}
823 958
824/** 959/**
825 * hvc_iucv_console_init() - Early console initialization
826 */
827static int __init hvc_iucv_console_init(void)
828{
829 if (!MACHINE_IS_VM || !hvc_iucv_devices)
830 return -ENODEV;
831 return hvc_instantiate(HVC_IUCV_MAGIC, 0, &hvc_iucv_ops);
832}
833
834/**
835 * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter 960 * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter
836 * @val: Parameter value (numeric) 961 * @val: Parameter value (numeric)
837 */ 962 */
@@ -841,10 +966,5 @@ static int __init hvc_iucv_config(char *val)
841} 966}
842 967
843 968
844module_init(hvc_iucv_init); 969device_initcall(hvc_iucv_init);
845console_initcall(hvc_iucv_console_init);
846__setup("hvc_iucv=", hvc_iucv_config); 970__setup("hvc_iucv=", hvc_iucv_config);
847
848MODULE_LICENSE("GPL");
849MODULE_DESCRIPTION("HVC back-end for z/VM IUCV.");
850MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>");
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index 112a6ba9a96f..146c97613da0 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -32,7 +32,7 @@
32 32
33/* These are global because they are accessed in tty_io.c */ 33/* These are global because they are accessed in tty_io.c */
34#ifdef CONFIG_UNIX98_PTYS 34#ifdef CONFIG_UNIX98_PTYS
35struct tty_driver *ptm_driver; 35static struct tty_driver *ptm_driver;
36static struct tty_driver *pts_driver; 36static struct tty_driver *pts_driver;
37#endif 37#endif
38 38
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 20d6efb6324e..e0d0f8b2696b 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -48,9 +48,10 @@
48 * CONFIG_HPET_EMULATE_RTC 48 * CONFIG_HPET_EMULATE_RTC
49 * 1.12a Maciej W. Rozycki: Handle memory-mapped chips properly. 49 * 1.12a Maciej W. Rozycki: Handle memory-mapped chips properly.
50 * 1.12ac Alan Cox: Allow read access to the day of week register 50 * 1.12ac Alan Cox: Allow read access to the day of week register
51 * 1.12b David John: Remove calls to the BKL.
51 */ 52 */
52 53
53#define RTC_VERSION "1.12ac" 54#define RTC_VERSION "1.12b"
54 55
55/* 56/*
56 * Note that *all* calls to CMOS_READ and CMOS_WRITE are done with 57 * Note that *all* calls to CMOS_READ and CMOS_WRITE are done with
@@ -73,7 +74,6 @@
73#include <linux/proc_fs.h> 74#include <linux/proc_fs.h>
74#include <linux/seq_file.h> 75#include <linux/seq_file.h>
75#include <linux/spinlock.h> 76#include <linux/spinlock.h>
76#include <linux/smp_lock.h>
77#include <linux/sysctl.h> 77#include <linux/sysctl.h>
78#include <linux/wait.h> 78#include <linux/wait.h>
79#include <linux/bcd.h> 79#include <linux/bcd.h>
@@ -182,8 +182,8 @@ static int rtc_proc_open(struct inode *inode, struct file *file);
182 182
183/* 183/*
184 * rtc_status is never changed by rtc_interrupt, and ioctl/open/close is 184 * rtc_status is never changed by rtc_interrupt, and ioctl/open/close is
185 * protected by the big kernel lock. However, ioctl can still disable the timer 185 * protected by the spin lock rtc_lock. However, ioctl can still disable the
186 * in rtc_status and then with del_timer after the interrupt has read 186 * timer in rtc_status and then with del_timer after the interrupt has read
187 * rtc_status but before mod_timer is called, which would then reenable the 187 * rtc_status but before mod_timer is called, which would then reenable the
188 * timer (but you would need to have an awful timing before you'd trip on it) 188 * timer (but you would need to have an awful timing before you'd trip on it)
189 */ 189 */
@@ -720,9 +720,7 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
720static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 720static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
721{ 721{
722 long ret; 722 long ret;
723 lock_kernel();
724 ret = rtc_do_ioctl(cmd, arg, 0); 723 ret = rtc_do_ioctl(cmd, arg, 0);
725 unlock_kernel();
726 return ret; 724 return ret;
727} 725}
728 726
@@ -731,12 +729,8 @@ static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
731 * Also clear the previous interrupt data on an open, and clean 729 * Also clear the previous interrupt data on an open, and clean
732 * up things on a close. 730 * up things on a close.
733 */ 731 */
734
735/* We use rtc_lock to protect against concurrent opens. So the BKL is not
736 * needed here. Or anywhere else in this driver. */
737static int rtc_open(struct inode *inode, struct file *file) 732static int rtc_open(struct inode *inode, struct file *file)
738{ 733{
739 lock_kernel();
740 spin_lock_irq(&rtc_lock); 734 spin_lock_irq(&rtc_lock);
741 735
742 if (rtc_status & RTC_IS_OPEN) 736 if (rtc_status & RTC_IS_OPEN)
@@ -746,12 +740,10 @@ static int rtc_open(struct inode *inode, struct file *file)
746 740
747 rtc_irq_data = 0; 741 rtc_irq_data = 0;
748 spin_unlock_irq(&rtc_lock); 742 spin_unlock_irq(&rtc_lock);
749 unlock_kernel();
750 return 0; 743 return 0;
751 744
752out_busy: 745out_busy:
753 spin_unlock_irq(&rtc_lock); 746 spin_unlock_irq(&rtc_lock);
754 unlock_kernel();
755 return -EBUSY; 747 return -EBUSY;
756} 748}
757 749
@@ -800,7 +792,6 @@ no_irq:
800} 792}
801 793
802#ifdef RTC_IRQ 794#ifdef RTC_IRQ
803/* Called without the kernel lock - fine */
804static unsigned int rtc_poll(struct file *file, poll_table *wait) 795static unsigned int rtc_poll(struct file *file, poll_table *wait)
805{ 796{
806 unsigned long l; 797 unsigned long l;
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
index 68f052b42ed7..ed306eb1057f 100644
--- a/drivers/char/tpm/tpm_bios.c
+++ b/drivers/char/tpm/tpm_bios.c
@@ -23,8 +23,6 @@
23#include <linux/security.h> 23#include <linux/security.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <acpi/acpi.h> 25#include <acpi/acpi.h>
26#include <acpi/actypes.h>
27#include <acpi/actbl.h>
28#include "tpm.h" 26#include "tpm.h"
29 27
30#define TCG_EVENT_NAME_LEN_MAX 255 28#define TCG_EVENT_NAME_LEN_MAX 255
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index ab18c1e7b115..70efba2ee053 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -273,12 +273,23 @@ static void tpm_nsc_remove(struct device *dev)
273 } 273 }
274} 274}
275 275
276static struct device_driver nsc_drv = { 276static int tpm_nsc_suspend(struct platform_device *dev, pm_message_t msg)
277 .name = "tpm_nsc", 277{
278 .bus = &platform_bus_type, 278 return tpm_pm_suspend(&dev->dev, msg);
279 .owner = THIS_MODULE, 279}
280 .suspend = tpm_pm_suspend, 280
281 .resume = tpm_pm_resume, 281static int tpm_nsc_resume(struct platform_device *dev)
282{
283 return tpm_pm_resume(&dev->dev);
284}
285
286static struct platform_driver nsc_drv = {
287 .suspend = tpm_nsc_suspend,
288 .resume = tpm_nsc_resume,
289 .driver = {
290 .name = "tpm_nsc",
291 .owner = THIS_MODULE,
292 },
282}; 293};
283 294
284static int __init init_nsc(void) 295static int __init init_nsc(void)
@@ -297,7 +308,7 @@ static int __init init_nsc(void)
297 return -ENODEV; 308 return -ENODEV;
298 } 309 }
299 310
300 err = driver_register(&nsc_drv); 311 err = platform_driver_register(&nsc_drv);
301 if (err) 312 if (err)
302 return err; 313 return err;
303 314
@@ -308,17 +319,15 @@ static int __init init_nsc(void)
308 /* enable the DPM module */ 319 /* enable the DPM module */
309 tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01); 320 tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01);
310 321
311 pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL); 322 pdev = platform_device_alloc("tpm_nscl0", -1);
312 if (!pdev) { 323 if (!pdev) {
313 rc = -ENOMEM; 324 rc = -ENOMEM;
314 goto err_unreg_drv; 325 goto err_unreg_drv;
315 } 326 }
316 327
317 pdev->name = "tpm_nscl0";
318 pdev->id = -1;
319 pdev->num_resources = 0; 328 pdev->num_resources = 0;
329 pdev->dev.driver = &nsc_drv.driver;
320 pdev->dev.release = tpm_nsc_remove; 330 pdev->dev.release = tpm_nsc_remove;
321 pdev->dev.driver = &nsc_drv;
322 331
323 if ((rc = platform_device_register(pdev)) < 0) 332 if ((rc = platform_device_register(pdev)) < 0)
324 goto err_free_dev; 333 goto err_free_dev;
@@ -377,7 +386,7 @@ err_unreg_dev:
377err_free_dev: 386err_free_dev:
378 kfree(pdev); 387 kfree(pdev);
379err_unreg_drv: 388err_unreg_drv:
380 driver_unregister(&nsc_drv); 389 platform_driver_unregister(&nsc_drv);
381 return rc; 390 return rc;
382} 391}
383 392
@@ -390,7 +399,7 @@ static void __exit cleanup_nsc(void)
390 pdev = NULL; 399 pdev = NULL;
391 } 400 }
392 401
393 driver_unregister(&nsc_drv); 402 platform_driver_unregister(&nsc_drv);
394} 403}
395 404
396module_init(init_nsc); 405module_init(init_nsc);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 80014213fb53..7900bd63b36d 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -969,8 +969,7 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows)
969 * Takes the console sem and the called methods then take the tty 969 * Takes the console sem and the called methods then take the tty
970 * termios_mutex and the tty ctrl_lock in that order. 970 * termios_mutex and the tty ctrl_lock in that order.
971 */ 971 */
972 972static int vt_resize(struct tty_struct *tty, struct winsize *ws)
973int vt_resize(struct tty_struct *tty, struct winsize *ws)
974{ 973{
975 struct vc_data *vc = tty->driver_data; 974 struct vc_data *vc = tty->driver_data;
976 int ret; 975 int ret;
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 8d7cf3f31450..f1df59f59a37 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -15,12 +15,14 @@
15#include <linux/tick.h> 15#include <linux/tick.h>
16 16
17#define BREAK_FUZZ 4 /* 4 us */ 17#define BREAK_FUZZ 4 /* 4 us */
18#define PRED_HISTORY_PCT 50
18 19
19struct menu_device { 20struct menu_device {
20 int last_state_idx; 21 int last_state_idx;
21 22
22 unsigned int expected_us; 23 unsigned int expected_us;
23 unsigned int predicted_us; 24 unsigned int predicted_us;
25 unsigned int current_predicted_us;
24 unsigned int last_measured_us; 26 unsigned int last_measured_us;
25 unsigned int elapsed_us; 27 unsigned int elapsed_us;
26}; 28};
@@ -47,6 +49,12 @@ static int menu_select(struct cpuidle_device *dev)
47 data->expected_us = 49 data->expected_us =
48 (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; 50 (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;
49 51
52 /* Recalculate predicted_us based on prediction_history_pct */
53 data->predicted_us *= PRED_HISTORY_PCT;
54 data->predicted_us += (100 - PRED_HISTORY_PCT) *
55 data->current_predicted_us;
56 data->predicted_us /= 100;
57
50 /* find the deepest idle state that satisfies our constraints */ 58 /* find the deepest idle state that satisfies our constraints */
51 for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) { 59 for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
52 struct cpuidle_state *s = &dev->states[i]; 60 struct cpuidle_state *s = &dev->states[i];
@@ -97,7 +105,7 @@ static void menu_reflect(struct cpuidle_device *dev)
97 measured_us = -1; 105 measured_us = -1;
98 106
99 /* Predict time until next break event */ 107 /* Predict time until next break event */
100 data->predicted_us = max(measured_us, data->last_measured_us); 108 data->current_predicted_us = max(measured_us, data->last_measured_us);
101 109
102 if (last_idle_us + BREAK_FUZZ < 110 if (last_idle_us + BREAK_FUZZ <
103 data->expected_us - target->exit_latency) { 111 data->expected_us - target->exit_latency) {
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index d883e1b8bb8c..55433849bfa6 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -270,6 +270,6 @@ static void __exit dca_exit(void)
270 dca_sysfs_exit(); 270 dca_sysfs_exit();
271} 271}
272 272
273subsys_initcall(dca_init); 273arch_initcall(dca_init);
274module_exit(dca_exit); 274module_exit(dca_exit);
275 275
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 904e57558bb5..e34b06420816 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -33,7 +33,6 @@ config INTEL_IOATDMA
33config INTEL_IOP_ADMA 33config INTEL_IOP_ADMA
34 tristate "Intel IOP ADMA support" 34 tristate "Intel IOP ADMA support"
35 depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX 35 depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
36 select ASYNC_CORE
37 select DMA_ENGINE 36 select DMA_ENGINE
38 help 37 help
39 Enable support for the Intel(R) IOP Series RAID engines. 38 Enable support for the Intel(R) IOP Series RAID engines.
@@ -59,7 +58,6 @@ config FSL_DMA
59config MV_XOR 58config MV_XOR
60 bool "Marvell XOR engine support" 59 bool "Marvell XOR engine support"
61 depends on PLAT_ORION 60 depends on PLAT_ORION
62 select ASYNC_CORE
63 select DMA_ENGINE 61 select DMA_ENGINE
64 ---help--- 62 ---help---
65 Enable support for the Marvell XOR engine. 63 Enable support for the Marvell XOR engine.
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 657996517374..403dbe781122 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -31,32 +31,18 @@
31 * 31 *
32 * LOCKING: 32 * LOCKING:
33 * 33 *
34 * The subsystem keeps two global lists, dma_device_list and dma_client_list. 34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * Both of these are protected by a mutex, dma_list_mutex. 35 * mutex, dma_list_mutex.
36 *
37 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
36 * 41 *
37 * Each device has a channels list, which runs unlocked but is never modified 42 * Each device has a channels list, which runs unlocked but is never modified
38 * once the device is registered, it's just setup by the driver. 43 * once the device is registered, it's just setup by the driver.
39 * 44 *
40 * Each client is responsible for keeping track of the channels it uses. See 45 * See Documentation/dmaengine.txt for more details
41 * the definition of dma_event_callback in dmaengine.h.
42 *
43 * Each device has a kref, which is initialized to 1 when the device is
44 * registered. A kref_get is done for each device registered. When the
45 * device is released, the corresponding kref_put is done in the release
46 * method. Every time one of the device's channels is allocated to a client,
47 * a kref_get occurs. When the channel is freed, the corresponding kref_put
48 * happens. The device's release function does a completion, so
49 * unregister_device does a remove event, device_unregister, a kref_put
50 * for the first reference, then waits on the completion for all other
51 * references to finish.
52 *
53 * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
54 * with a kref and a per_cpu local_t. A dma_chan_get is called when a client
55 * signals that it wants to use a channel, and dma_chan_put is called when
56 * a channel is removed or a client using it is unregistered. A client can
57 * take extra references per outstanding transaction, as is the case with
58 * the NET DMA client. The release function does a kref_put on the device.
59 * -ChrisL, DanW
60 */ 46 */
61 47
62#include <linux/init.h> 48#include <linux/init.h>
@@ -70,54 +56,85 @@
70#include <linux/rcupdate.h> 56#include <linux/rcupdate.h>
71#include <linux/mutex.h> 57#include <linux/mutex.h>
72#include <linux/jiffies.h> 58#include <linux/jiffies.h>
59#include <linux/rculist.h>
60#include <linux/idr.h>
73 61
74static DEFINE_MUTEX(dma_list_mutex); 62static DEFINE_MUTEX(dma_list_mutex);
75static LIST_HEAD(dma_device_list); 63static LIST_HEAD(dma_device_list);
76static LIST_HEAD(dma_client_list); 64static long dmaengine_ref_count;
65static struct idr dma_idr;
77 66
78/* --- sysfs implementation --- */ 67/* --- sysfs implementation --- */
79 68
69/**
70 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
71 * @dev - device node
72 *
73 * Must be called under dma_list_mutex
74 */
75static struct dma_chan *dev_to_dma_chan(struct device *dev)
76{
77 struct dma_chan_dev *chan_dev;
78
79 chan_dev = container_of(dev, typeof(*chan_dev), device);
80 return chan_dev->chan;
81}
82
80static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) 83static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
81{ 84{
82 struct dma_chan *chan = to_dma_chan(dev); 85 struct dma_chan *chan;
83 unsigned long count = 0; 86 unsigned long count = 0;
84 int i; 87 int i;
88 int err;
85 89
86 for_each_possible_cpu(i) 90 mutex_lock(&dma_list_mutex);
87 count += per_cpu_ptr(chan->local, i)->memcpy_count; 91 chan = dev_to_dma_chan(dev);
92 if (chan) {
93 for_each_possible_cpu(i)
94 count += per_cpu_ptr(chan->local, i)->memcpy_count;
95 err = sprintf(buf, "%lu\n", count);
96 } else
97 err = -ENODEV;
98 mutex_unlock(&dma_list_mutex);
88 99
89 return sprintf(buf, "%lu\n", count); 100 return err;
90} 101}
91 102
92static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, 103static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
93 char *buf) 104 char *buf)
94{ 105{
95 struct dma_chan *chan = to_dma_chan(dev); 106 struct dma_chan *chan;
96 unsigned long count = 0; 107 unsigned long count = 0;
97 int i; 108 int i;
109 int err;
98 110
99 for_each_possible_cpu(i) 111 mutex_lock(&dma_list_mutex);
100 count += per_cpu_ptr(chan->local, i)->bytes_transferred; 112 chan = dev_to_dma_chan(dev);
113 if (chan) {
114 for_each_possible_cpu(i)
115 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
116 err = sprintf(buf, "%lu\n", count);
117 } else
118 err = -ENODEV;
119 mutex_unlock(&dma_list_mutex);
101 120
102 return sprintf(buf, "%lu\n", count); 121 return err;
103} 122}
104 123
105static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) 124static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
106{ 125{
107 struct dma_chan *chan = to_dma_chan(dev); 126 struct dma_chan *chan;
108 int in_use = 0; 127 int err;
109
110 if (unlikely(chan->slow_ref) &&
111 atomic_read(&chan->refcount.refcount) > 1)
112 in_use = 1;
113 else {
114 if (local_read(&(per_cpu_ptr(chan->local,
115 get_cpu())->refcount)) > 0)
116 in_use = 1;
117 put_cpu();
118 }
119 128
120 return sprintf(buf, "%d\n", in_use); 129 mutex_lock(&dma_list_mutex);
130 chan = dev_to_dma_chan(dev);
131 if (chan)
132 err = sprintf(buf, "%d\n", chan->client_count);
133 else
134 err = -ENODEV;
135 mutex_unlock(&dma_list_mutex);
136
137 return err;
121} 138}
122 139
123static struct device_attribute dma_attrs[] = { 140static struct device_attribute dma_attrs[] = {
@@ -127,76 +144,110 @@ static struct device_attribute dma_attrs[] = {
127 __ATTR_NULL 144 __ATTR_NULL
128}; 145};
129 146
130static void dma_async_device_cleanup(struct kref *kref); 147static void chan_dev_release(struct device *dev)
131
132static void dma_dev_release(struct device *dev)
133{ 148{
134 struct dma_chan *chan = to_dma_chan(dev); 149 struct dma_chan_dev *chan_dev;
135 kref_put(&chan->device->refcount, dma_async_device_cleanup); 150
151 chan_dev = container_of(dev, typeof(*chan_dev), device);
152 if (atomic_dec_and_test(chan_dev->idr_ref)) {
153 mutex_lock(&dma_list_mutex);
154 idr_remove(&dma_idr, chan_dev->dev_id);
155 mutex_unlock(&dma_list_mutex);
156 kfree(chan_dev->idr_ref);
157 }
158 kfree(chan_dev);
136} 159}
137 160
138static struct class dma_devclass = { 161static struct class dma_devclass = {
139 .name = "dma", 162 .name = "dma",
140 .dev_attrs = dma_attrs, 163 .dev_attrs = dma_attrs,
141 .dev_release = dma_dev_release, 164 .dev_release = chan_dev_release,
142}; 165};
143 166
144/* --- client and device registration --- */ 167/* --- client and device registration --- */
145 168
146#define dma_chan_satisfies_mask(chan, mask) \ 169#define dma_device_satisfies_mask(device, mask) \
147 __dma_chan_satisfies_mask((chan), &(mask)) 170 __dma_device_satisfies_mask((device), &(mask))
148static int 171static int
149__dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want) 172__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
150{ 173{
151 dma_cap_mask_t has; 174 dma_cap_mask_t has;
152 175
153 bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits, 176 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
154 DMA_TX_TYPE_END); 177 DMA_TX_TYPE_END);
155 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); 178 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
156} 179}
157 180
181static struct module *dma_chan_to_owner(struct dma_chan *chan)
182{
183 return chan->device->dev->driver->owner;
184}
185
158/** 186/**
159 * dma_client_chan_alloc - try to allocate channels to a client 187 * balance_ref_count - catch up the channel reference count
160 * @client: &dma_client 188 * @chan - channel to balance ->client_count versus dmaengine_ref_count
161 * 189 *
162 * Called with dma_list_mutex held. 190 * balance_ref_count must be called under dma_list_mutex
163 */ 191 */
164static void dma_client_chan_alloc(struct dma_client *client) 192static void balance_ref_count(struct dma_chan *chan)
165{ 193{
166 struct dma_device *device; 194 struct module *owner = dma_chan_to_owner(chan);
167 struct dma_chan *chan;
168 int desc; /* allocated descriptor count */
169 enum dma_state_client ack;
170 195
171 /* Find a channel */ 196 while (chan->client_count < dmaengine_ref_count) {
172 list_for_each_entry(device, &dma_device_list, global_node) { 197 __module_get(owner);
173 /* Does the client require a specific DMA controller? */ 198 chan->client_count++;
174 if (client->slave && client->slave->dma_dev 199 }
175 && client->slave->dma_dev != device->dev) 200}
176 continue;
177 201
178 list_for_each_entry(chan, &device->channels, device_node) { 202/**
179 if (!dma_chan_satisfies_mask(chan, client->cap_mask)) 203 * dma_chan_get - try to grab a dma channel's parent driver module
180 continue; 204 * @chan - channel to grab
205 *
206 * Must be called under dma_list_mutex
207 */
208static int dma_chan_get(struct dma_chan *chan)
209{
210 int err = -ENODEV;
211 struct module *owner = dma_chan_to_owner(chan);
212
213 if (chan->client_count) {
214 __module_get(owner);
215 err = 0;
216 } else if (try_module_get(owner))
217 err = 0;
218
219 if (err == 0)
220 chan->client_count++;
221
222 /* allocate upon first client reference */
223 if (chan->client_count == 1 && err == 0) {
224 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
225
226 if (desc_cnt < 0) {
227 err = desc_cnt;
228 chan->client_count = 0;
229 module_put(owner);
230 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
231 balance_ref_count(chan);
232 }
181 233
182 desc = chan->device->device_alloc_chan_resources( 234 return err;
183 chan, client); 235}
184 if (desc >= 0) {
185 ack = client->event_callback(client,
186 chan,
187 DMA_RESOURCE_AVAILABLE);
188 236
189 /* we are done once this client rejects 237/**
190 * an available resource 238 * dma_chan_put - drop a reference to a dma channel's parent driver module
191 */ 239 * @chan - channel to release
192 if (ack == DMA_ACK) { 240 *
193 dma_chan_get(chan); 241 * Must be called under dma_list_mutex
194 chan->client_count++; 242 */
195 } else if (ack == DMA_NAK) 243static void dma_chan_put(struct dma_chan *chan)
196 return; 244{
197 } 245 if (!chan->client_count)
198 } 246 return; /* this channel failed alloc_chan_resources */
199 } 247 chan->client_count--;
248 module_put(dma_chan_to_owner(chan));
249 if (chan->client_count == 0)
250 chan->device->device_free_chan_resources(chan);
200} 251}
201 252
202enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 253enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
@@ -218,138 +269,342 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
218EXPORT_SYMBOL(dma_sync_wait); 269EXPORT_SYMBOL(dma_sync_wait);
219 270
220/** 271/**
221 * dma_chan_cleanup - release a DMA channel's resources 272 * dma_cap_mask_all - enable iteration over all operation types
222 * @kref: kernel reference structure that contains the DMA channel device 273 */
274static dma_cap_mask_t dma_cap_mask_all;
275
276/**
277 * dma_chan_tbl_ent - tracks channel allocations per core/operation
278 * @chan - associated channel for this entry
279 */
280struct dma_chan_tbl_ent {
281 struct dma_chan *chan;
282};
283
284/**
285 * channel_table - percpu lookup table for memory-to-memory offload providers
223 */ 286 */
224void dma_chan_cleanup(struct kref *kref) 287static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
288
289static int __init dma_channel_table_init(void)
225{ 290{
226 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount); 291 enum dma_transaction_type cap;
227 chan->device->device_free_chan_resources(chan); 292 int err = 0;
228 kref_put(&chan->device->refcount, dma_async_device_cleanup); 293
294 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
295
296 /* 'interrupt', 'private', and 'slave' are channel capabilities,
297 * but are not associated with an operation so they do not need
298 * an entry in the channel_table
299 */
300 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
301 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
302 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
303
304 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
305 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
306 if (!channel_table[cap]) {
307 err = -ENOMEM;
308 break;
309 }
310 }
311
312 if (err) {
313 pr_err("dmaengine: initialization failure\n");
314 for_each_dma_cap_mask(cap, dma_cap_mask_all)
315 if (channel_table[cap])
316 free_percpu(channel_table[cap]);
317 }
318
319 return err;
229} 320}
230EXPORT_SYMBOL(dma_chan_cleanup); 321arch_initcall(dma_channel_table_init);
231 322
232static void dma_chan_free_rcu(struct rcu_head *rcu) 323/**
324 * dma_find_channel - find a channel to carry out the operation
325 * @tx_type: transaction type
326 */
327struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
233{ 328{
234 struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu); 329 struct dma_chan *chan;
235 int bias = 0x7FFFFFFF; 330 int cpu;
236 int i; 331
237 for_each_possible_cpu(i) 332 WARN_ONCE(dmaengine_ref_count == 0,
238 bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount); 333 "client called %s without a reference", __func__);
239 atomic_sub(bias, &chan->refcount.refcount); 334
240 kref_put(&chan->refcount, dma_chan_cleanup); 335 cpu = get_cpu();
336 chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
337 put_cpu();
338
339 return chan;
241} 340}
341EXPORT_SYMBOL(dma_find_channel);
242 342
243static void dma_chan_release(struct dma_chan *chan) 343/**
344 * dma_issue_pending_all - flush all pending operations across all channels
345 */
346void dma_issue_pending_all(void)
244{ 347{
245 atomic_add(0x7FFFFFFF, &chan->refcount.refcount); 348 struct dma_device *device;
246 chan->slow_ref = 1; 349 struct dma_chan *chan;
247 call_rcu(&chan->rcu, dma_chan_free_rcu); 350
351 WARN_ONCE(dmaengine_ref_count == 0,
352 "client called %s without a reference", __func__);
353
354 rcu_read_lock();
355 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
356 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
357 continue;
358 list_for_each_entry(chan, &device->channels, device_node)
359 if (chan->client_count)
360 device->device_issue_pending(chan);
361 }
362 rcu_read_unlock();
248} 363}
364EXPORT_SYMBOL(dma_issue_pending_all);
249 365
250/** 366/**
251 * dma_chans_notify_available - broadcast available channels to the clients 367 * nth_chan - returns the nth channel of the given capability
368 * @cap: capability to match
369 * @n: nth channel desired
370 *
371 * Defaults to returning the channel with the desired capability and the
372 * lowest reference count when 'n' cannot be satisfied. Must be called
373 * under dma_list_mutex.
252 */ 374 */
253static void dma_clients_notify_available(void) 375static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
254{ 376{
255 struct dma_client *client; 377 struct dma_device *device;
378 struct dma_chan *chan;
379 struct dma_chan *ret = NULL;
380 struct dma_chan *min = NULL;
256 381
257 mutex_lock(&dma_list_mutex); 382 list_for_each_entry(device, &dma_device_list, global_node) {
383 if (!dma_has_cap(cap, device->cap_mask) ||
384 dma_has_cap(DMA_PRIVATE, device->cap_mask))
385 continue;
386 list_for_each_entry(chan, &device->channels, device_node) {
387 if (!chan->client_count)
388 continue;
389 if (!min)
390 min = chan;
391 else if (chan->table_count < min->table_count)
392 min = chan;
393
394 if (n-- == 0) {
395 ret = chan;
396 break; /* done */
397 }
398 }
399 if (ret)
400 break; /* done */
401 }
258 402
259 list_for_each_entry(client, &dma_client_list, global_node) 403 if (!ret)
260 dma_client_chan_alloc(client); 404 ret = min;
261 405
262 mutex_unlock(&dma_list_mutex); 406 if (ret)
407 ret->table_count++;
408
409 return ret;
263} 410}
264 411
265/** 412/**
266 * dma_chans_notify_available - tell the clients that a channel is going away 413 * dma_channel_rebalance - redistribute the available channels
267 * @chan: channel on its way out 414 *
415 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
416 * operation type) in the SMP case, and operation isolation (avoid
417 * multi-tasking channels) in the non-SMP case. Must be called under
418 * dma_list_mutex.
268 */ 419 */
269static void dma_clients_notify_removed(struct dma_chan *chan) 420static void dma_channel_rebalance(void)
270{ 421{
271 struct dma_client *client; 422 struct dma_chan *chan;
272 enum dma_state_client ack; 423 struct dma_device *device;
424 int cpu;
425 int cap;
426 int n;
273 427
274 mutex_lock(&dma_list_mutex); 428 /* undo the last distribution */
429 for_each_dma_cap_mask(cap, dma_cap_mask_all)
430 for_each_possible_cpu(cpu)
431 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
432
433 list_for_each_entry(device, &dma_device_list, global_node) {
434 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
435 continue;
436 list_for_each_entry(chan, &device->channels, device_node)
437 chan->table_count = 0;
438 }
275 439
276 list_for_each_entry(client, &dma_client_list, global_node) { 440 /* don't populate the channel_table if no clients are available */
277 ack = client->event_callback(client, chan, 441 if (!dmaengine_ref_count)
278 DMA_RESOURCE_REMOVED); 442 return;
279 443
280 /* client was holding resources for this channel so 444 /* redistribute available channels */
281 * free it 445 n = 0;
282 */ 446 for_each_dma_cap_mask(cap, dma_cap_mask_all)
283 if (ack == DMA_ACK) { 447 for_each_online_cpu(cpu) {
284 dma_chan_put(chan); 448 if (num_possible_cpus() > 1)
285 chan->client_count--; 449 chan = nth_chan(cap, n++);
450 else
451 chan = nth_chan(cap, -1);
452
453 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
454 }
455}
456
457static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
458 dma_filter_fn fn, void *fn_param)
459{
460 struct dma_chan *chan;
461
462 if (!__dma_device_satisfies_mask(dev, mask)) {
463 pr_debug("%s: wrong capabilities\n", __func__);
464 return NULL;
465 }
466 /* devices with multiple channels need special handling as we need to
467 * ensure that all channels are either private or public.
468 */
469 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
470 list_for_each_entry(chan, &dev->channels, device_node) {
471 /* some channels are already publicly allocated */
472 if (chan->client_count)
473 return NULL;
286 } 474 }
475
476 list_for_each_entry(chan, &dev->channels, device_node) {
477 if (chan->client_count) {
478 pr_debug("%s: %s busy\n",
479 __func__, dma_chan_name(chan));
480 continue;
481 }
482 if (fn && !fn(chan, fn_param)) {
483 pr_debug("%s: %s filter said false\n",
484 __func__, dma_chan_name(chan));
485 continue;
486 }
487 return chan;
287 } 488 }
288 489
289 mutex_unlock(&dma_list_mutex); 490 return NULL;
290} 491}
291 492
292/** 493/**
293 * dma_async_client_register - register a &dma_client 494 * dma_request_channel - try to allocate an exclusive channel
294 * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask' 495 * @mask: capabilities that the channel must satisfy
496 * @fn: optional callback to disposition available channels
497 * @fn_param: opaque parameter to pass to dma_filter_fn
295 */ 498 */
296void dma_async_client_register(struct dma_client *client) 499struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
297{ 500{
298 /* validate client data */ 501 struct dma_device *device, *_d;
299 BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) && 502 struct dma_chan *chan = NULL;
300 !client->slave); 503 int err;
301 504
505 /* Find a channel */
506 mutex_lock(&dma_list_mutex);
507 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
508 chan = private_candidate(mask, device, fn, fn_param);
509 if (chan) {
510 /* Found a suitable channel, try to grab, prep, and
511 * return it. We first set DMA_PRIVATE to disable
512 * balance_ref_count as this channel will not be
513 * published in the general-purpose allocator
514 */
515 dma_cap_set(DMA_PRIVATE, device->cap_mask);
516 err = dma_chan_get(chan);
517
518 if (err == -ENODEV) {
519 pr_debug("%s: %s module removed\n", __func__,
520 dma_chan_name(chan));
521 list_del_rcu(&device->global_node);
522 } else if (err)
523 pr_err("dmaengine: failed to get %s: (%d)\n",
524 dma_chan_name(chan), err);
525 else
526 break;
527 chan = NULL;
528 }
529 }
530 mutex_unlock(&dma_list_mutex);
531
532 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
533 chan ? dma_chan_name(chan) : NULL);
534
535 return chan;
536}
537EXPORT_SYMBOL_GPL(__dma_request_channel);
538
539void dma_release_channel(struct dma_chan *chan)
540{
302 mutex_lock(&dma_list_mutex); 541 mutex_lock(&dma_list_mutex);
303 list_add_tail(&client->global_node, &dma_client_list); 542 WARN_ONCE(chan->client_count != 1,
543 "chan reference count %d != 1\n", chan->client_count);
544 dma_chan_put(chan);
304 mutex_unlock(&dma_list_mutex); 545 mutex_unlock(&dma_list_mutex);
305} 546}
306EXPORT_SYMBOL(dma_async_client_register); 547EXPORT_SYMBOL_GPL(dma_release_channel);
307 548
308/** 549/**
309 * dma_async_client_unregister - unregister a client and free the &dma_client 550 * dmaengine_get - register interest in dma_channels
310 * @client: &dma_client to free
311 *
312 * Force frees any allocated DMA channels, frees the &dma_client memory
313 */ 551 */
314void dma_async_client_unregister(struct dma_client *client) 552void dmaengine_get(void)
315{ 553{
316 struct dma_device *device; 554 struct dma_device *device, *_d;
317 struct dma_chan *chan; 555 struct dma_chan *chan;
318 enum dma_state_client ack; 556 int err;
319
320 if (!client)
321 return;
322 557
323 mutex_lock(&dma_list_mutex); 558 mutex_lock(&dma_list_mutex);
324 /* free all channels the client is holding */ 559 dmaengine_ref_count++;
325 list_for_each_entry(device, &dma_device_list, global_node)
326 list_for_each_entry(chan, &device->channels, device_node) {
327 ack = client->event_callback(client, chan,
328 DMA_RESOURCE_REMOVED);
329 560
330 if (ack == DMA_ACK) { 561 /* try to grab channels */
331 dma_chan_put(chan); 562 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
332 chan->client_count--; 563 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
333 } 564 continue;
565 list_for_each_entry(chan, &device->channels, device_node) {
566 err = dma_chan_get(chan);
567 if (err == -ENODEV) {
568 /* module removed before we could use it */
569 list_del_rcu(&device->global_node);
570 break;
571 } else if (err)
572 pr_err("dmaengine: failed to get %s: (%d)\n",
573 dma_chan_name(chan), err);
334 } 574 }
575 }
335 576
336 list_del(&client->global_node); 577 /* if this is the first reference and there were channels
578 * waiting we need to rebalance to get those channels
579 * incorporated into the channel table
580 */
581 if (dmaengine_ref_count == 1)
582 dma_channel_rebalance();
337 mutex_unlock(&dma_list_mutex); 583 mutex_unlock(&dma_list_mutex);
338} 584}
339EXPORT_SYMBOL(dma_async_client_unregister); 585EXPORT_SYMBOL(dmaengine_get);
340 586
341/** 587/**
342 * dma_async_client_chan_request - send all available channels to the 588 * dmaengine_put - let dma drivers be removed when ref_count == 0
343 * client that satisfy the capability mask
344 * @client - requester
345 */ 589 */
346void dma_async_client_chan_request(struct dma_client *client) 590void dmaengine_put(void)
347{ 591{
592 struct dma_device *device;
593 struct dma_chan *chan;
594
348 mutex_lock(&dma_list_mutex); 595 mutex_lock(&dma_list_mutex);
349 dma_client_chan_alloc(client); 596 dmaengine_ref_count--;
597 BUG_ON(dmaengine_ref_count < 0);
598 /* drop channel references */
599 list_for_each_entry(device, &dma_device_list, global_node) {
600 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
601 continue;
602 list_for_each_entry(chan, &device->channels, device_node)
603 dma_chan_put(chan);
604 }
350 mutex_unlock(&dma_list_mutex); 605 mutex_unlock(&dma_list_mutex);
351} 606}
352EXPORT_SYMBOL(dma_async_client_chan_request); 607EXPORT_SYMBOL(dmaengine_put);
353 608
354/** 609/**
355 * dma_async_device_register - registers DMA devices found 610 * dma_async_device_register - registers DMA devices found
@@ -357,9 +612,9 @@ EXPORT_SYMBOL(dma_async_client_chan_request);
357 */ 612 */
358int dma_async_device_register(struct dma_device *device) 613int dma_async_device_register(struct dma_device *device)
359{ 614{
360 static int id;
361 int chancnt = 0, rc; 615 int chancnt = 0, rc;
362 struct dma_chan* chan; 616 struct dma_chan* chan;
617 atomic_t *idr_ref;
363 618
364 if (!device) 619 if (!device)
365 return -ENODEV; 620 return -ENODEV;
@@ -386,57 +641,83 @@ int dma_async_device_register(struct dma_device *device)
386 BUG_ON(!device->device_issue_pending); 641 BUG_ON(!device->device_issue_pending);
387 BUG_ON(!device->dev); 642 BUG_ON(!device->dev);
388 643
389 init_completion(&device->done); 644 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
390 kref_init(&device->refcount); 645 if (!idr_ref)
391 646 return -ENOMEM;
647 atomic_set(idr_ref, 0);
648 idr_retry:
649 if (!idr_pre_get(&dma_idr, GFP_KERNEL))
650 return -ENOMEM;
392 mutex_lock(&dma_list_mutex); 651 mutex_lock(&dma_list_mutex);
393 device->dev_id = id++; 652 rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
394 mutex_unlock(&dma_list_mutex); 653 mutex_unlock(&dma_list_mutex);
654 if (rc == -EAGAIN)
655 goto idr_retry;
656 else if (rc != 0)
657 return rc;
395 658
396 /* represent channels in sysfs. Probably want devs too */ 659 /* represent channels in sysfs. Probably want devs too */
397 list_for_each_entry(chan, &device->channels, device_node) { 660 list_for_each_entry(chan, &device->channels, device_node) {
398 chan->local = alloc_percpu(typeof(*chan->local)); 661 chan->local = alloc_percpu(typeof(*chan->local));
399 if (chan->local == NULL) 662 if (chan->local == NULL)
400 continue; 663 continue;
664 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
665 if (chan->dev == NULL) {
666 free_percpu(chan->local);
667 continue;
668 }
401 669
402 chan->chan_id = chancnt++; 670 chan->chan_id = chancnt++;
403 chan->dev.class = &dma_devclass; 671 chan->dev->device.class = &dma_devclass;
404 chan->dev.parent = device->dev; 672 chan->dev->device.parent = device->dev;
405 dev_set_name(&chan->dev, "dma%dchan%d", 673 chan->dev->chan = chan;
674 chan->dev->idr_ref = idr_ref;
675 chan->dev->dev_id = device->dev_id;
676 atomic_inc(idr_ref);
677 dev_set_name(&chan->dev->device, "dma%dchan%d",
406 device->dev_id, chan->chan_id); 678 device->dev_id, chan->chan_id);
407 679
408 rc = device_register(&chan->dev); 680 rc = device_register(&chan->dev->device);
409 if (rc) { 681 if (rc) {
410 chancnt--;
411 free_percpu(chan->local); 682 free_percpu(chan->local);
412 chan->local = NULL; 683 chan->local = NULL;
413 goto err_out; 684 goto err_out;
414 } 685 }
415
416 /* One for the channel, one of the class device */
417 kref_get(&device->refcount);
418 kref_get(&device->refcount);
419 kref_init(&chan->refcount);
420 chan->client_count = 0; 686 chan->client_count = 0;
421 chan->slow_ref = 0;
422 INIT_RCU_HEAD(&chan->rcu);
423 } 687 }
688 device->chancnt = chancnt;
424 689
425 mutex_lock(&dma_list_mutex); 690 mutex_lock(&dma_list_mutex);
426 list_add_tail(&device->global_node, &dma_device_list); 691 /* take references on public channels */
692 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
693 list_for_each_entry(chan, &device->channels, device_node) {
694 /* if clients are already waiting for channels we need
695 * to take references on their behalf
696 */
697 if (dma_chan_get(chan) == -ENODEV) {
698 /* note we can only get here for the first
699 * channel as the remaining channels are
700 * guaranteed to get a reference
701 */
702 rc = -ENODEV;
703 mutex_unlock(&dma_list_mutex);
704 goto err_out;
705 }
706 }
707 list_add_tail_rcu(&device->global_node, &dma_device_list);
708 dma_channel_rebalance();
427 mutex_unlock(&dma_list_mutex); 709 mutex_unlock(&dma_list_mutex);
428 710
429 dma_clients_notify_available();
430
431 return 0; 711 return 0;
432 712
433err_out: 713err_out:
434 list_for_each_entry(chan, &device->channels, device_node) { 714 list_for_each_entry(chan, &device->channels, device_node) {
435 if (chan->local == NULL) 715 if (chan->local == NULL)
436 continue; 716 continue;
437 kref_put(&device->refcount, dma_async_device_cleanup); 717 mutex_lock(&dma_list_mutex);
438 device_unregister(&chan->dev); 718 chan->dev->chan = NULL;
439 chancnt--; 719 mutex_unlock(&dma_list_mutex);
720 device_unregister(&chan->dev->device);
440 free_percpu(chan->local); 721 free_percpu(chan->local);
441 } 722 }
442 return rc; 723 return rc;
@@ -444,37 +725,30 @@ err_out:
444EXPORT_SYMBOL(dma_async_device_register); 725EXPORT_SYMBOL(dma_async_device_register);
445 726
446/** 727/**
447 * dma_async_device_cleanup - function called when all references are released 728 * dma_async_device_unregister - unregister a DMA device
448 * @kref: kernel reference object
449 */
450static void dma_async_device_cleanup(struct kref *kref)
451{
452 struct dma_device *device;
453
454 device = container_of(kref, struct dma_device, refcount);
455 complete(&device->done);
456}
457
458/**
459 * dma_async_device_unregister - unregisters DMA devices
460 * @device: &dma_device 729 * @device: &dma_device
730 *
731 * This routine is called by dma driver exit routines, dmaengine holds module
732 * references to prevent it being called while channels are in use.
461 */ 733 */
462void dma_async_device_unregister(struct dma_device *device) 734void dma_async_device_unregister(struct dma_device *device)
463{ 735{
464 struct dma_chan *chan; 736 struct dma_chan *chan;
465 737
466 mutex_lock(&dma_list_mutex); 738 mutex_lock(&dma_list_mutex);
467 list_del(&device->global_node); 739 list_del_rcu(&device->global_node);
740 dma_channel_rebalance();
468 mutex_unlock(&dma_list_mutex); 741 mutex_unlock(&dma_list_mutex);
469 742
470 list_for_each_entry(chan, &device->channels, device_node) { 743 list_for_each_entry(chan, &device->channels, device_node) {
471 dma_clients_notify_removed(chan); 744 WARN_ONCE(chan->client_count,
472 device_unregister(&chan->dev); 745 "%s called while %d clients hold a reference\n",
473 dma_chan_release(chan); 746 __func__, chan->client_count);
747 mutex_lock(&dma_list_mutex);
748 chan->dev->chan = NULL;
749 mutex_unlock(&dma_list_mutex);
750 device_unregister(&chan->dev->device);
474 } 751 }
475
476 kref_put(&device->refcount, dma_async_device_cleanup);
477 wait_for_completion(&device->done);
478} 752}
479EXPORT_SYMBOL(dma_async_device_unregister); 753EXPORT_SYMBOL(dma_async_device_unregister);
480 754
@@ -626,10 +900,96 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
626} 900}
627EXPORT_SYMBOL(dma_async_tx_descriptor_init); 901EXPORT_SYMBOL(dma_async_tx_descriptor_init);
628 902
903/* dma_wait_for_async_tx - spin wait for a transaction to complete
904 * @tx: in-flight transaction to wait on
905 *
906 * This routine assumes that tx was obtained from a call to async_memcpy,
907 * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
908 * and submitted). Walking the parent chain is only meant to cover for DMA
909 * drivers that do not implement the DMA_INTERRUPT capability and may race with
910 * the driver's descriptor cleanup routine.
911 */
912enum dma_status
913dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
914{
915 enum dma_status status;
916 struct dma_async_tx_descriptor *iter;
917 struct dma_async_tx_descriptor *parent;
918
919 if (!tx)
920 return DMA_SUCCESS;
921
922 WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
923 " %s\n", __func__, dma_chan_name(tx->chan));
924
925 /* poll through the dependency chain, return when tx is complete */
926 do {
927 iter = tx;
928
929 /* find the root of the unsubmitted dependency chain */
930 do {
931 parent = iter->parent;
932 if (!parent)
933 break;
934 else
935 iter = parent;
936 } while (parent);
937
938 /* there is a small window for ->parent == NULL and
939 * ->cookie == -EBUSY
940 */
941 while (iter->cookie == -EBUSY)
942 cpu_relax();
943
944 status = dma_sync_wait(iter->chan, iter->cookie);
945 } while (status == DMA_IN_PROGRESS || (iter != tx));
946
947 return status;
948}
949EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
950
951/* dma_run_dependencies - helper routine for dma drivers to process
952 * (start) dependent operations on their target channel
953 * @tx: transaction with dependencies
954 */
955void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
956{
957 struct dma_async_tx_descriptor *dep = tx->next;
958 struct dma_async_tx_descriptor *dep_next;
959 struct dma_chan *chan;
960
961 if (!dep)
962 return;
963
964 chan = dep->chan;
965
966 /* keep submitting up until a channel switch is detected
967 * in that case we will be called again as a result of
968 * processing the interrupt from async_tx_channel_switch
969 */
970 for (; dep; dep = dep_next) {
971 spin_lock_bh(&dep->lock);
972 dep->parent = NULL;
973 dep_next = dep->next;
974 if (dep_next && dep_next->chan == chan)
975 dep->next = NULL; /* ->next will be submitted */
976 else
977 dep_next = NULL; /* submit current dep and terminate */
978 spin_unlock_bh(&dep->lock);
979
980 dep->tx_submit(dep);
981 }
982
983 chan->device->device_issue_pending(chan);
984}
985EXPORT_SYMBOL_GPL(dma_run_dependencies);
986
629static int __init dma_bus_init(void) 987static int __init dma_bus_init(void)
630{ 988{
989 idr_init(&dma_idr);
631 mutex_init(&dma_list_mutex); 990 mutex_init(&dma_list_mutex);
632 return class_register(&dma_devclass); 991 return class_register(&dma_devclass);
633} 992}
634subsys_initcall(dma_bus_init); 993arch_initcall(dma_bus_init);
994
635 995
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index ed9636bfb54a..3603f1ea5b28 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -35,7 +35,7 @@ MODULE_PARM_DESC(threads_per_chan,
35 35
36static unsigned int max_channels; 36static unsigned int max_channels;
37module_param(max_channels, uint, S_IRUGO); 37module_param(max_channels, uint, S_IRUGO);
38MODULE_PARM_DESC(nr_channels, 38MODULE_PARM_DESC(max_channels,
39 "Maximum number of channels to use (default: all)"); 39 "Maximum number of channels to use (default: all)");
40 40
41/* 41/*
@@ -71,7 +71,7 @@ struct dmatest_chan {
71 71
72/* 72/*
73 * These are protected by dma_list_mutex since they're only used by 73 * These are protected by dma_list_mutex since they're only used by
74 * the DMA client event callback 74 * the DMA filter function callback
75 */ 75 */
76static LIST_HEAD(dmatest_channels); 76static LIST_HEAD(dmatest_channels);
77static unsigned int nr_channels; 77static unsigned int nr_channels;
@@ -80,7 +80,7 @@ static bool dmatest_match_channel(struct dma_chan *chan)
80{ 80{
81 if (test_channel[0] == '\0') 81 if (test_channel[0] == '\0')
82 return true; 82 return true;
83 return strcmp(dev_name(&chan->dev), test_channel) == 0; 83 return strcmp(dma_chan_name(chan), test_channel) == 0;
84} 84}
85 85
86static bool dmatest_match_device(struct dma_device *device) 86static bool dmatest_match_device(struct dma_device *device)
@@ -215,7 +215,6 @@ static int dmatest_func(void *data)
215 215
216 smp_rmb(); 216 smp_rmb();
217 chan = thread->chan; 217 chan = thread->chan;
218 dma_chan_get(chan);
219 218
220 while (!kthread_should_stop()) { 219 while (!kthread_should_stop()) {
221 total_tests++; 220 total_tests++;
@@ -293,7 +292,6 @@ static int dmatest_func(void *data)
293 } 292 }
294 293
295 ret = 0; 294 ret = 0;
296 dma_chan_put(chan);
297 kfree(thread->dstbuf); 295 kfree(thread->dstbuf);
298err_dstbuf: 296err_dstbuf:
299 kfree(thread->srcbuf); 297 kfree(thread->srcbuf);
@@ -319,21 +317,16 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
319 kfree(dtc); 317 kfree(dtc);
320} 318}
321 319
322static enum dma_state_client dmatest_add_channel(struct dma_chan *chan) 320static int dmatest_add_channel(struct dma_chan *chan)
323{ 321{
324 struct dmatest_chan *dtc; 322 struct dmatest_chan *dtc;
325 struct dmatest_thread *thread; 323 struct dmatest_thread *thread;
326 unsigned int i; 324 unsigned int i;
327 325
328 /* Have we already been told about this channel? */
329 list_for_each_entry(dtc, &dmatest_channels, node)
330 if (dtc->chan == chan)
331 return DMA_DUP;
332
333 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); 326 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
334 if (!dtc) { 327 if (!dtc) {
335 pr_warning("dmatest: No memory for %s\n", dev_name(&chan->dev)); 328 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
336 return DMA_NAK; 329 return -ENOMEM;
337 } 330 }
338 331
339 dtc->chan = chan; 332 dtc->chan = chan;
@@ -343,16 +336,16 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
343 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); 336 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
344 if (!thread) { 337 if (!thread) {
345 pr_warning("dmatest: No memory for %s-test%u\n", 338 pr_warning("dmatest: No memory for %s-test%u\n",
346 dev_name(&chan->dev), i); 339 dma_chan_name(chan), i);
347 break; 340 break;
348 } 341 }
349 thread->chan = dtc->chan; 342 thread->chan = dtc->chan;
350 smp_wmb(); 343 smp_wmb();
351 thread->task = kthread_run(dmatest_func, thread, "%s-test%u", 344 thread->task = kthread_run(dmatest_func, thread, "%s-test%u",
352 dev_name(&chan->dev), i); 345 dma_chan_name(chan), i);
353 if (IS_ERR(thread->task)) { 346 if (IS_ERR(thread->task)) {
354 pr_warning("dmatest: Failed to run thread %s-test%u\n", 347 pr_warning("dmatest: Failed to run thread %s-test%u\n",
355 dev_name(&chan->dev), i); 348 dma_chan_name(chan), i);
356 kfree(thread); 349 kfree(thread);
357 break; 350 break;
358 } 351 }
@@ -362,86 +355,62 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
362 list_add_tail(&thread->node, &dtc->threads); 355 list_add_tail(&thread->node, &dtc->threads);
363 } 356 }
364 357
365 pr_info("dmatest: Started %u threads using %s\n", i, dev_name(&chan->dev)); 358 pr_info("dmatest: Started %u threads using %s\n", i, dma_chan_name(chan));
366 359
367 list_add_tail(&dtc->node, &dmatest_channels); 360 list_add_tail(&dtc->node, &dmatest_channels);
368 nr_channels++; 361 nr_channels++;
369 362
370 return DMA_ACK; 363 return 0;
371}
372
373static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan)
374{
375 struct dmatest_chan *dtc, *_dtc;
376
377 list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
378 if (dtc->chan == chan) {
379 list_del(&dtc->node);
380 dmatest_cleanup_channel(dtc);
381 pr_debug("dmatest: lost channel %s\n",
382 dev_name(&chan->dev));
383 return DMA_ACK;
384 }
385 }
386
387 return DMA_DUP;
388} 364}
389 365
390/* 366static bool filter(struct dma_chan *chan, void *param)
391 * Start testing threads as new channels are assigned to us, and kill
392 * them when the channels go away.
393 *
394 * When we unregister the client, all channels are removed so this
395 * will also take care of cleaning things up when the module is
396 * unloaded.
397 */
398static enum dma_state_client
399dmatest_event(struct dma_client *client, struct dma_chan *chan,
400 enum dma_state state)
401{ 367{
402 enum dma_state_client ack = DMA_NAK; 368 if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device))
403 369 return false;
404 switch (state) { 370 else
405 case DMA_RESOURCE_AVAILABLE: 371 return true;
406 if (!dmatest_match_channel(chan)
407 || !dmatest_match_device(chan->device))
408 ack = DMA_DUP;
409 else if (max_channels && nr_channels >= max_channels)
410 ack = DMA_NAK;
411 else
412 ack = dmatest_add_channel(chan);
413 break;
414
415 case DMA_RESOURCE_REMOVED:
416 ack = dmatest_remove_channel(chan);
417 break;
418
419 default:
420 pr_info("dmatest: Unhandled event %u (%s)\n",
421 state, dev_name(&chan->dev));
422 break;
423 }
424
425 return ack;
426} 372}
427 373
428static struct dma_client dmatest_client = {
429 .event_callback = dmatest_event,
430};
431
432static int __init dmatest_init(void) 374static int __init dmatest_init(void)
433{ 375{
434 dma_cap_set(DMA_MEMCPY, dmatest_client.cap_mask); 376 dma_cap_mask_t mask;
435 dma_async_client_register(&dmatest_client); 377 struct dma_chan *chan;
436 dma_async_client_chan_request(&dmatest_client); 378 int err = 0;
379
380 dma_cap_zero(mask);
381 dma_cap_set(DMA_MEMCPY, mask);
382 for (;;) {
383 chan = dma_request_channel(mask, filter, NULL);
384 if (chan) {
385 err = dmatest_add_channel(chan);
386 if (err == 0)
387 continue;
388 else {
389 dma_release_channel(chan);
390 break; /* add_channel failed, punt */
391 }
392 } else
393 break; /* no more channels available */
394 if (max_channels && nr_channels >= max_channels)
395 break; /* we have all we need */
396 }
437 397
438 return 0; 398 return err;
439} 399}
440module_init(dmatest_init); 400/* when compiled-in wait for drivers to load first */
401late_initcall(dmatest_init);
441 402
442static void __exit dmatest_exit(void) 403static void __exit dmatest_exit(void)
443{ 404{
444 dma_async_client_unregister(&dmatest_client); 405 struct dmatest_chan *dtc, *_dtc;
406
407 list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
408 list_del(&dtc->node);
409 dmatest_cleanup_channel(dtc);
410 pr_debug("dmatest: dropped channel %s\n",
411 dma_chan_name(dtc->chan));
412 dma_release_channel(dtc->chan);
413 }
445} 414}
446module_exit(dmatest_exit); 415module_exit(dmatest_exit);
447 416
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 0778d99aea7c..6b702cc46b3d 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -70,6 +70,15 @@
70 * the controller, though. 70 * the controller, though.
71 */ 71 */
72 72
73static struct device *chan2dev(struct dma_chan *chan)
74{
75 return &chan->dev->device;
76}
77static struct device *chan2parent(struct dma_chan *chan)
78{
79 return chan->dev->device.parent;
80}
81
73static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) 82static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
74{ 83{
75 return list_entry(dwc->active_list.next, struct dw_desc, desc_node); 84 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
@@ -93,12 +102,12 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
93 ret = desc; 102 ret = desc;
94 break; 103 break;
95 } 104 }
96 dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc); 105 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
97 i++; 106 i++;
98 } 107 }
99 spin_unlock_bh(&dwc->lock); 108 spin_unlock_bh(&dwc->lock);
100 109
101 dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i); 110 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
102 111
103 return ret; 112 return ret;
104} 113}
@@ -108,10 +117,10 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
108 struct dw_desc *child; 117 struct dw_desc *child;
109 118
110 list_for_each_entry(child, &desc->txd.tx_list, desc_node) 119 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
111 dma_sync_single_for_cpu(dwc->chan.dev.parent, 120 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
112 child->txd.phys, sizeof(child->lli), 121 child->txd.phys, sizeof(child->lli),
113 DMA_TO_DEVICE); 122 DMA_TO_DEVICE);
114 dma_sync_single_for_cpu(dwc->chan.dev.parent, 123 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
115 desc->txd.phys, sizeof(desc->lli), 124 desc->txd.phys, sizeof(desc->lli),
116 DMA_TO_DEVICE); 125 DMA_TO_DEVICE);
117} 126}
@@ -129,11 +138,11 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
129 138
130 spin_lock_bh(&dwc->lock); 139 spin_lock_bh(&dwc->lock);
131 list_for_each_entry(child, &desc->txd.tx_list, desc_node) 140 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
132 dev_vdbg(&dwc->chan.dev, 141 dev_vdbg(chan2dev(&dwc->chan),
133 "moving child desc %p to freelist\n", 142 "moving child desc %p to freelist\n",
134 child); 143 child);
135 list_splice_init(&desc->txd.tx_list, &dwc->free_list); 144 list_splice_init(&desc->txd.tx_list, &dwc->free_list);
136 dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc); 145 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
137 list_add(&desc->desc_node, &dwc->free_list); 146 list_add(&desc->desc_node, &dwc->free_list);
138 spin_unlock_bh(&dwc->lock); 147 spin_unlock_bh(&dwc->lock);
139 } 148 }
@@ -163,9 +172,9 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
163 172
164 /* ASSERT: channel is idle */ 173 /* ASSERT: channel is idle */
165 if (dma_readl(dw, CH_EN) & dwc->mask) { 174 if (dma_readl(dw, CH_EN) & dwc->mask) {
166 dev_err(&dwc->chan.dev, 175 dev_err(chan2dev(&dwc->chan),
167 "BUG: Attempted to start non-idle channel\n"); 176 "BUG: Attempted to start non-idle channel\n");
168 dev_err(&dwc->chan.dev, 177 dev_err(chan2dev(&dwc->chan),
169 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 178 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
170 channel_readl(dwc, SAR), 179 channel_readl(dwc, SAR),
171 channel_readl(dwc, DAR), 180 channel_readl(dwc, DAR),
@@ -193,7 +202,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
193 void *param; 202 void *param;
194 struct dma_async_tx_descriptor *txd = &desc->txd; 203 struct dma_async_tx_descriptor *txd = &desc->txd;
195 204
196 dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie); 205 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
197 206
198 dwc->completed = txd->cookie; 207 dwc->completed = txd->cookie;
199 callback = txd->callback; 208 callback = txd->callback;
@@ -208,11 +217,11 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
208 * mapped before they were submitted... 217 * mapped before they were submitted...
209 */ 218 */
210 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) 219 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP))
211 dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len, 220 dma_unmap_page(chan2parent(&dwc->chan), desc->lli.dar,
212 DMA_FROM_DEVICE); 221 desc->len, DMA_FROM_DEVICE);
213 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) 222 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
214 dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len, 223 dma_unmap_page(chan2parent(&dwc->chan), desc->lli.sar,
215 DMA_TO_DEVICE); 224 desc->len, DMA_TO_DEVICE);
216 225
217 /* 226 /*
218 * The API requires that no submissions are done from a 227 * The API requires that no submissions are done from a
@@ -228,7 +237,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
228 LIST_HEAD(list); 237 LIST_HEAD(list);
229 238
230 if (dma_readl(dw, CH_EN) & dwc->mask) { 239 if (dma_readl(dw, CH_EN) & dwc->mask) {
231 dev_err(&dwc->chan.dev, 240 dev_err(chan2dev(&dwc->chan),
232 "BUG: XFER bit set, but channel not idle!\n"); 241 "BUG: XFER bit set, but channel not idle!\n");
233 242
234 /* Try to continue after resetting the channel... */ 243 /* Try to continue after resetting the channel... */
@@ -273,7 +282,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
273 return; 282 return;
274 } 283 }
275 284
276 dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp); 285 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
277 286
278 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { 287 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
279 if (desc->lli.llp == llp) 288 if (desc->lli.llp == llp)
@@ -292,7 +301,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
292 dwc_descriptor_complete(dwc, desc); 301 dwc_descriptor_complete(dwc, desc);
293 } 302 }
294 303
295 dev_err(&dwc->chan.dev, 304 dev_err(chan2dev(&dwc->chan),
296 "BUG: All descriptors done, but channel not idle!\n"); 305 "BUG: All descriptors done, but channel not idle!\n");
297 306
298 /* Try to continue after resetting the channel... */ 307 /* Try to continue after resetting the channel... */
@@ -308,7 +317,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
308 317
309static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) 318static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
310{ 319{
311 dev_printk(KERN_CRIT, &dwc->chan.dev, 320 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
312 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", 321 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
313 lli->sar, lli->dar, lli->llp, 322 lli->sar, lli->dar, lli->llp,
314 lli->ctlhi, lli->ctllo); 323 lli->ctlhi, lli->ctllo);
@@ -342,9 +351,9 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
342 * controller flagged an error instead of scribbling over 351 * controller flagged an error instead of scribbling over
343 * random memory locations. 352 * random memory locations.
344 */ 353 */
345 dev_printk(KERN_CRIT, &dwc->chan.dev, 354 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
346 "Bad descriptor submitted for DMA!\n"); 355 "Bad descriptor submitted for DMA!\n");
347 dev_printk(KERN_CRIT, &dwc->chan.dev, 356 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
348 " cookie: %d\n", bad_desc->txd.cookie); 357 " cookie: %d\n", bad_desc->txd.cookie);
349 dwc_dump_lli(dwc, &bad_desc->lli); 358 dwc_dump_lli(dwc, &bad_desc->lli);
350 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) 359 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
@@ -442,12 +451,12 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
442 * for DMA. But this is hard to do in a race-free manner. 451 * for DMA. But this is hard to do in a race-free manner.
443 */ 452 */
444 if (list_empty(&dwc->active_list)) { 453 if (list_empty(&dwc->active_list)) {
445 dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n", 454 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
446 desc->txd.cookie); 455 desc->txd.cookie);
447 dwc_dostart(dwc, desc); 456 dwc_dostart(dwc, desc);
448 list_add_tail(&desc->desc_node, &dwc->active_list); 457 list_add_tail(&desc->desc_node, &dwc->active_list);
449 } else { 458 } else {
450 dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n", 459 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
451 desc->txd.cookie); 460 desc->txd.cookie);
452 461
453 list_add_tail(&desc->desc_node, &dwc->queue); 462 list_add_tail(&desc->desc_node, &dwc->queue);
@@ -472,11 +481,11 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
472 unsigned int dst_width; 481 unsigned int dst_width;
473 u32 ctllo; 482 u32 ctllo;
474 483
475 dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", 484 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
476 dest, src, len, flags); 485 dest, src, len, flags);
477 486
478 if (unlikely(!len)) { 487 if (unlikely(!len)) {
479 dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n"); 488 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
480 return NULL; 489 return NULL;
481 } 490 }
482 491
@@ -516,7 +525,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
516 first = desc; 525 first = desc;
517 } else { 526 } else {
518 prev->lli.llp = desc->txd.phys; 527 prev->lli.llp = desc->txd.phys;
519 dma_sync_single_for_device(chan->dev.parent, 528 dma_sync_single_for_device(chan2parent(chan),
520 prev->txd.phys, sizeof(prev->lli), 529 prev->txd.phys, sizeof(prev->lli),
521 DMA_TO_DEVICE); 530 DMA_TO_DEVICE);
522 list_add_tail(&desc->desc_node, 531 list_add_tail(&desc->desc_node,
@@ -531,7 +540,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
531 prev->lli.ctllo |= DWC_CTLL_INT_EN; 540 prev->lli.ctllo |= DWC_CTLL_INT_EN;
532 541
533 prev->lli.llp = 0; 542 prev->lli.llp = 0;
534 dma_sync_single_for_device(chan->dev.parent, 543 dma_sync_single_for_device(chan2parent(chan),
535 prev->txd.phys, sizeof(prev->lli), 544 prev->txd.phys, sizeof(prev->lli),
536 DMA_TO_DEVICE); 545 DMA_TO_DEVICE);
537 546
@@ -562,15 +571,15 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
562 struct scatterlist *sg; 571 struct scatterlist *sg;
563 size_t total_len = 0; 572 size_t total_len = 0;
564 573
565 dev_vdbg(&chan->dev, "prep_dma_slave\n"); 574 dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
566 575
567 if (unlikely(!dws || !sg_len)) 576 if (unlikely(!dws || !sg_len))
568 return NULL; 577 return NULL;
569 578
570 reg_width = dws->slave.reg_width; 579 reg_width = dws->reg_width;
571 prev = first = NULL; 580 prev = first = NULL;
572 581
573 sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction); 582 sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction);
574 583
575 switch (direction) { 584 switch (direction) {
576 case DMA_TO_DEVICE: 585 case DMA_TO_DEVICE:
@@ -579,7 +588,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
579 | DWC_CTLL_DST_FIX 588 | DWC_CTLL_DST_FIX
580 | DWC_CTLL_SRC_INC 589 | DWC_CTLL_SRC_INC
581 | DWC_CTLL_FC_M2P); 590 | DWC_CTLL_FC_M2P);
582 reg = dws->slave.tx_reg; 591 reg = dws->tx_reg;
583 for_each_sg(sgl, sg, sg_len, i) { 592 for_each_sg(sgl, sg, sg_len, i) {
584 struct dw_desc *desc; 593 struct dw_desc *desc;
585 u32 len; 594 u32 len;
@@ -587,7 +596,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
587 596
588 desc = dwc_desc_get(dwc); 597 desc = dwc_desc_get(dwc);
589 if (!desc) { 598 if (!desc) {
590 dev_err(&chan->dev, 599 dev_err(chan2dev(chan),
591 "not enough descriptors available\n"); 600 "not enough descriptors available\n");
592 goto err_desc_get; 601 goto err_desc_get;
593 } 602 }
@@ -607,7 +616,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
607 first = desc; 616 first = desc;
608 } else { 617 } else {
609 prev->lli.llp = desc->txd.phys; 618 prev->lli.llp = desc->txd.phys;
610 dma_sync_single_for_device(chan->dev.parent, 619 dma_sync_single_for_device(chan2parent(chan),
611 prev->txd.phys, 620 prev->txd.phys,
612 sizeof(prev->lli), 621 sizeof(prev->lli),
613 DMA_TO_DEVICE); 622 DMA_TO_DEVICE);
@@ -625,7 +634,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
625 | DWC_CTLL_SRC_FIX 634 | DWC_CTLL_SRC_FIX
626 | DWC_CTLL_FC_P2M); 635 | DWC_CTLL_FC_P2M);
627 636
628 reg = dws->slave.rx_reg; 637 reg = dws->rx_reg;
629 for_each_sg(sgl, sg, sg_len, i) { 638 for_each_sg(sgl, sg, sg_len, i) {
630 struct dw_desc *desc; 639 struct dw_desc *desc;
631 u32 len; 640 u32 len;
@@ -633,7 +642,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
633 642
634 desc = dwc_desc_get(dwc); 643 desc = dwc_desc_get(dwc);
635 if (!desc) { 644 if (!desc) {
636 dev_err(&chan->dev, 645 dev_err(chan2dev(chan),
637 "not enough descriptors available\n"); 646 "not enough descriptors available\n");
638 goto err_desc_get; 647 goto err_desc_get;
639 } 648 }
@@ -653,7 +662,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
653 first = desc; 662 first = desc;
654 } else { 663 } else {
655 prev->lli.llp = desc->txd.phys; 664 prev->lli.llp = desc->txd.phys;
656 dma_sync_single_for_device(chan->dev.parent, 665 dma_sync_single_for_device(chan2parent(chan),
657 prev->txd.phys, 666 prev->txd.phys,
658 sizeof(prev->lli), 667 sizeof(prev->lli),
659 DMA_TO_DEVICE); 668 DMA_TO_DEVICE);
@@ -673,7 +682,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
673 prev->lli.ctllo |= DWC_CTLL_INT_EN; 682 prev->lli.ctllo |= DWC_CTLL_INT_EN;
674 683
675 prev->lli.llp = 0; 684 prev->lli.llp = 0;
676 dma_sync_single_for_device(chan->dev.parent, 685 dma_sync_single_for_device(chan2parent(chan),
677 prev->txd.phys, sizeof(prev->lli), 686 prev->txd.phys, sizeof(prev->lli),
678 DMA_TO_DEVICE); 687 DMA_TO_DEVICE);
679 688
@@ -758,29 +767,21 @@ static void dwc_issue_pending(struct dma_chan *chan)
758 spin_unlock_bh(&dwc->lock); 767 spin_unlock_bh(&dwc->lock);
759} 768}
760 769
761static int dwc_alloc_chan_resources(struct dma_chan *chan, 770static int dwc_alloc_chan_resources(struct dma_chan *chan)
762 struct dma_client *client)
763{ 771{
764 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 772 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
765 struct dw_dma *dw = to_dw_dma(chan->device); 773 struct dw_dma *dw = to_dw_dma(chan->device);
766 struct dw_desc *desc; 774 struct dw_desc *desc;
767 struct dma_slave *slave;
768 struct dw_dma_slave *dws; 775 struct dw_dma_slave *dws;
769 int i; 776 int i;
770 u32 cfghi; 777 u32 cfghi;
771 u32 cfglo; 778 u32 cfglo;
772 779
773 dev_vdbg(&chan->dev, "alloc_chan_resources\n"); 780 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
774
775 /* Channels doing slave DMA can only handle one client. */
776 if (dwc->dws || client->slave) {
777 if (chan->client_count)
778 return -EBUSY;
779 }
780 781
781 /* ASSERT: channel is idle */ 782 /* ASSERT: channel is idle */
782 if (dma_readl(dw, CH_EN) & dwc->mask) { 783 if (dma_readl(dw, CH_EN) & dwc->mask) {
783 dev_dbg(&chan->dev, "DMA channel not idle?\n"); 784 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
784 return -EIO; 785 return -EIO;
785 } 786 }
786 787
@@ -789,23 +790,17 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
789 cfghi = DWC_CFGH_FIFO_MODE; 790 cfghi = DWC_CFGH_FIFO_MODE;
790 cfglo = 0; 791 cfglo = 0;
791 792
792 slave = client->slave; 793 dws = dwc->dws;
793 if (slave) { 794 if (dws) {
794 /* 795 /*
795 * We need controller-specific data to set up slave 796 * We need controller-specific data to set up slave
796 * transfers. 797 * transfers.
797 */ 798 */
798 BUG_ON(!slave->dma_dev || slave->dma_dev != dw->dma.dev); 799 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
799
800 dws = container_of(slave, struct dw_dma_slave, slave);
801 800
802 dwc->dws = dws;
803 cfghi = dws->cfg_hi; 801 cfghi = dws->cfg_hi;
804 cfglo = dws->cfg_lo; 802 cfglo = dws->cfg_lo;
805 } else {
806 dwc->dws = NULL;
807 } 803 }
808
809 channel_writel(dwc, CFG_LO, cfglo); 804 channel_writel(dwc, CFG_LO, cfglo);
810 channel_writel(dwc, CFG_HI, cfghi); 805 channel_writel(dwc, CFG_HI, cfghi);
811 806
@@ -822,7 +817,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
822 817
823 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); 818 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
824 if (!desc) { 819 if (!desc) {
825 dev_info(&chan->dev, 820 dev_info(chan2dev(chan),
826 "only allocated %d descriptors\n", i); 821 "only allocated %d descriptors\n", i);
827 spin_lock_bh(&dwc->lock); 822 spin_lock_bh(&dwc->lock);
828 break; 823 break;
@@ -832,7 +827,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
832 desc->txd.tx_submit = dwc_tx_submit; 827 desc->txd.tx_submit = dwc_tx_submit;
833 desc->txd.flags = DMA_CTRL_ACK; 828 desc->txd.flags = DMA_CTRL_ACK;
834 INIT_LIST_HEAD(&desc->txd.tx_list); 829 INIT_LIST_HEAD(&desc->txd.tx_list);
835 desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli, 830 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
836 sizeof(desc->lli), DMA_TO_DEVICE); 831 sizeof(desc->lli), DMA_TO_DEVICE);
837 dwc_desc_put(dwc, desc); 832 dwc_desc_put(dwc, desc);
838 833
@@ -847,7 +842,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
847 842
848 spin_unlock_bh(&dwc->lock); 843 spin_unlock_bh(&dwc->lock);
849 844
850 dev_dbg(&chan->dev, 845 dev_dbg(chan2dev(chan),
851 "alloc_chan_resources allocated %d descriptors\n", i); 846 "alloc_chan_resources allocated %d descriptors\n", i);
852 847
853 return i; 848 return i;
@@ -860,7 +855,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
860 struct dw_desc *desc, *_desc; 855 struct dw_desc *desc, *_desc;
861 LIST_HEAD(list); 856 LIST_HEAD(list);
862 857
863 dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n", 858 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
864 dwc->descs_allocated); 859 dwc->descs_allocated);
865 860
866 /* ASSERT: channel is idle */ 861 /* ASSERT: channel is idle */
@@ -881,13 +876,13 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
881 spin_unlock_bh(&dwc->lock); 876 spin_unlock_bh(&dwc->lock);
882 877
883 list_for_each_entry_safe(desc, _desc, &list, desc_node) { 878 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
884 dev_vdbg(&chan->dev, " freeing descriptor %p\n", desc); 879 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
885 dma_unmap_single(chan->dev.parent, desc->txd.phys, 880 dma_unmap_single(chan2parent(chan), desc->txd.phys,
886 sizeof(desc->lli), DMA_TO_DEVICE); 881 sizeof(desc->lli), DMA_TO_DEVICE);
887 kfree(desc); 882 kfree(desc);
888 } 883 }
889 884
890 dev_vdbg(&chan->dev, "free_chan_resources done\n"); 885 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
891} 886}
892 887
893/*----------------------------------------------------------------------*/ 888/*----------------------------------------------------------------------*/
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 0b95dcce447e..ca70a21afc68 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -366,8 +366,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
366 * 366 *
367 * Return - The number of descriptors allocated. 367 * Return - The number of descriptors allocated.
368 */ 368 */
369static int fsl_dma_alloc_chan_resources(struct dma_chan *chan, 369static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
370 struct dma_client *client)
371{ 370{
372 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 371 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
373 372
@@ -823,7 +822,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
823 */ 822 */
824 WARN_ON(fdev->feature != new_fsl_chan->feature); 823 WARN_ON(fdev->feature != new_fsl_chan->feature);
825 824
826 new_fsl_chan->dev = &new_fsl_chan->common.dev; 825 new_fsl_chan->dev = &new_fsl_chan->common.dev->device;
827 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, 826 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
828 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); 827 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
829 828
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c
index 9b16a3af9a0a..4105d6575b64 100644
--- a/drivers/dma/ioat.c
+++ b/drivers/dma/ioat.c
@@ -75,60 +75,10 @@ static int ioat_dca_enabled = 1;
75module_param(ioat_dca_enabled, int, 0644); 75module_param(ioat_dca_enabled, int, 0644);
76MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); 76MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
77 77
78static int ioat_setup_functionality(struct pci_dev *pdev, void __iomem *iobase)
79{
80 struct ioat_device *device = pci_get_drvdata(pdev);
81 u8 version;
82 int err = 0;
83
84 version = readb(iobase + IOAT_VER_OFFSET);
85 switch (version) {
86 case IOAT_VER_1_2:
87 device->dma = ioat_dma_probe(pdev, iobase);
88 if (device->dma && ioat_dca_enabled)
89 device->dca = ioat_dca_init(pdev, iobase);
90 break;
91 case IOAT_VER_2_0:
92 device->dma = ioat_dma_probe(pdev, iobase);
93 if (device->dma && ioat_dca_enabled)
94 device->dca = ioat2_dca_init(pdev, iobase);
95 break;
96 case IOAT_VER_3_0:
97 device->dma = ioat_dma_probe(pdev, iobase);
98 if (device->dma && ioat_dca_enabled)
99 device->dca = ioat3_dca_init(pdev, iobase);
100 break;
101 default:
102 err = -ENODEV;
103 break;
104 }
105 if (!device->dma)
106 err = -ENODEV;
107 return err;
108}
109
110static void ioat_shutdown_functionality(struct pci_dev *pdev)
111{
112 struct ioat_device *device = pci_get_drvdata(pdev);
113
114 dev_err(&pdev->dev, "Removing dma and dca services\n");
115 if (device->dca) {
116 unregister_dca_provider(device->dca);
117 free_dca_provider(device->dca);
118 device->dca = NULL;
119 }
120
121 if (device->dma) {
122 ioat_dma_remove(device->dma);
123 device->dma = NULL;
124 }
125}
126
127static struct pci_driver ioat_pci_driver = { 78static struct pci_driver ioat_pci_driver = {
128 .name = "ioatdma", 79 .name = "ioatdma",
129 .id_table = ioat_pci_tbl, 80 .id_table = ioat_pci_tbl,
130 .probe = ioat_probe, 81 .probe = ioat_probe,
131 .shutdown = ioat_shutdown_functionality,
132 .remove = __devexit_p(ioat_remove), 82 .remove = __devexit_p(ioat_remove),
133}; 83};
134 84
@@ -179,7 +129,29 @@ static int __devinit ioat_probe(struct pci_dev *pdev,
179 129
180 pci_set_master(pdev); 130 pci_set_master(pdev);
181 131
182 err = ioat_setup_functionality(pdev, iobase); 132 switch (readb(iobase + IOAT_VER_OFFSET)) {
133 case IOAT_VER_1_2:
134 device->dma = ioat_dma_probe(pdev, iobase);
135 if (device->dma && ioat_dca_enabled)
136 device->dca = ioat_dca_init(pdev, iobase);
137 break;
138 case IOAT_VER_2_0:
139 device->dma = ioat_dma_probe(pdev, iobase);
140 if (device->dma && ioat_dca_enabled)
141 device->dca = ioat2_dca_init(pdev, iobase);
142 break;
143 case IOAT_VER_3_0:
144 device->dma = ioat_dma_probe(pdev, iobase);
145 if (device->dma && ioat_dca_enabled)
146 device->dca = ioat3_dca_init(pdev, iobase);
147 break;
148 default:
149 err = -ENODEV;
150 break;
151 }
152 if (!device->dma)
153 err = -ENODEV;
154
183 if (err) 155 if (err)
184 goto err_version; 156 goto err_version;
185 157
@@ -198,17 +170,21 @@ err_enable_device:
198 return err; 170 return err;
199} 171}
200 172
201/*
202 * It is unsafe to remove this module: if removed while a requested
203 * dma is outstanding, esp. from tcp, it is possible to hang while
204 * waiting for something that will never finish. However, if you're
205 * feeling lucky, this usually works just fine.
206 */
207static void __devexit ioat_remove(struct pci_dev *pdev) 173static void __devexit ioat_remove(struct pci_dev *pdev)
208{ 174{
209 struct ioat_device *device = pci_get_drvdata(pdev); 175 struct ioat_device *device = pci_get_drvdata(pdev);
210 176
211 ioat_shutdown_functionality(pdev); 177 dev_err(&pdev->dev, "Removing dma and dca services\n");
178 if (device->dca) {
179 unregister_dca_provider(device->dca);
180 free_dca_provider(device->dca);
181 device->dca = NULL;
182 }
183
184 if (device->dma) {
185 ioat_dma_remove(device->dma);
186 device->dma = NULL;
187 }
212 188
213 kfree(device); 189 kfree(device);
214} 190}
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 6607fdd00b1c..b3759c4b6536 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -734,8 +734,7 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
734 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors 734 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
735 * @chan: the channel to be filled out 735 * @chan: the channel to be filled out
736 */ 736 */
737static int ioat_dma_alloc_chan_resources(struct dma_chan *chan, 737static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
738 struct dma_client *client)
739{ 738{
740 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 739 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
741 struct ioat_desc_sw *desc; 740 struct ioat_desc_sw *desc;
@@ -1341,12 +1340,11 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
1341 */ 1340 */
1342#define IOAT_TEST_SIZE 2000 1341#define IOAT_TEST_SIZE 2000
1343 1342
1344DECLARE_COMPLETION(test_completion);
1345static void ioat_dma_test_callback(void *dma_async_param) 1343static void ioat_dma_test_callback(void *dma_async_param)
1346{ 1344{
1347 printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n", 1345 struct completion *cmp = dma_async_param;
1348 dma_async_param); 1346
1349 complete(&test_completion); 1347 complete(cmp);
1350} 1348}
1351 1349
1352/** 1350/**
@@ -1363,6 +1361,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1363 dma_addr_t dma_dest, dma_src; 1361 dma_addr_t dma_dest, dma_src;
1364 dma_cookie_t cookie; 1362 dma_cookie_t cookie;
1365 int err = 0; 1363 int err = 0;
1364 struct completion cmp;
1366 1365
1367 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); 1366 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1368 if (!src) 1367 if (!src)
@@ -1381,7 +1380,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1381 dma_chan = container_of(device->common.channels.next, 1380 dma_chan = container_of(device->common.channels.next,
1382 struct dma_chan, 1381 struct dma_chan,
1383 device_node); 1382 device_node);
1384 if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) { 1383 if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
1385 dev_err(&device->pdev->dev, 1384 dev_err(&device->pdev->dev,
1386 "selftest cannot allocate chan resource\n"); 1385 "selftest cannot allocate chan resource\n");
1387 err = -ENODEV; 1386 err = -ENODEV;
@@ -1402,8 +1401,9 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1402 } 1401 }
1403 1402
1404 async_tx_ack(tx); 1403 async_tx_ack(tx);
1404 init_completion(&cmp);
1405 tx->callback = ioat_dma_test_callback; 1405 tx->callback = ioat_dma_test_callback;
1406 tx->callback_param = (void *)0x8086; 1406 tx->callback_param = &cmp;
1407 cookie = tx->tx_submit(tx); 1407 cookie = tx->tx_submit(tx);
1408 if (cookie < 0) { 1408 if (cookie < 0) {
1409 dev_err(&device->pdev->dev, 1409 dev_err(&device->pdev->dev,
@@ -1413,7 +1413,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1413 } 1413 }
1414 device->common.device_issue_pending(dma_chan); 1414 device->common.device_issue_pending(dma_chan);
1415 1415
1416 wait_for_completion_timeout(&test_completion, msecs_to_jiffies(3000)); 1416 wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1417 1417
1418 if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) 1418 if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
1419 != DMA_SUCCESS) { 1419 != DMA_SUCCESS) {
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 6be317262200..ea5440dd10dc 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -24,7 +24,6 @@
24 24
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/async_tx.h>
28#include <linux/delay.h> 27#include <linux/delay.h>
29#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
30#include <linux/spinlock.h> 29#include <linux/spinlock.h>
@@ -116,7 +115,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
116 } 115 }
117 116
118 /* run dependent operations */ 117 /* run dependent operations */
119 async_tx_run_dependencies(&desc->async_tx); 118 dma_run_dependencies(&desc->async_tx);
120 119
121 return cookie; 120 return cookie;
122} 121}
@@ -270,8 +269,6 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
270 break; 269 break;
271 } 270 }
272 271
273 BUG_ON(!seen_current);
274
275 if (cookie > 0) { 272 if (cookie > 0) {
276 iop_chan->completed_cookie = cookie; 273 iop_chan->completed_cookie = cookie;
277 pr_debug("\tcompleted cookie %d\n", cookie); 274 pr_debug("\tcompleted cookie %d\n", cookie);
@@ -471,8 +468,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
471 * greater than 2x the number slots needed to satisfy a device->max_xor 468 * greater than 2x the number slots needed to satisfy a device->max_xor
472 * request. 469 * request.
473 * */ 470 * */
474static int iop_adma_alloc_chan_resources(struct dma_chan *chan, 471static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
475 struct dma_client *client)
476{ 472{
477 char *hw_desc; 473 char *hw_desc;
478 int idx; 474 int idx;
@@ -866,7 +862,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
866 dma_chan = container_of(device->common.channels.next, 862 dma_chan = container_of(device->common.channels.next,
867 struct dma_chan, 863 struct dma_chan,
868 device_node); 864 device_node);
869 if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { 865 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
870 err = -ENODEV; 866 err = -ENODEV;
871 goto out; 867 goto out;
872 } 868 }
@@ -964,7 +960,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
964 dma_chan = container_of(device->common.channels.next, 960 dma_chan = container_of(device->common.channels.next,
965 struct dma_chan, 961 struct dma_chan,
966 device_node); 962 device_node);
967 if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { 963 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
968 err = -ENODEV; 964 err = -ENODEV;
969 goto out; 965 goto out;
970 } 966 }
@@ -1115,26 +1111,13 @@ static int __devexit iop_adma_remove(struct platform_device *dev)
1115 struct iop_adma_device *device = platform_get_drvdata(dev); 1111 struct iop_adma_device *device = platform_get_drvdata(dev);
1116 struct dma_chan *chan, *_chan; 1112 struct dma_chan *chan, *_chan;
1117 struct iop_adma_chan *iop_chan; 1113 struct iop_adma_chan *iop_chan;
1118 int i;
1119 struct iop_adma_platform_data *plat_data = dev->dev.platform_data; 1114 struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
1120 1115
1121 dma_async_device_unregister(&device->common); 1116 dma_async_device_unregister(&device->common);
1122 1117
1123 for (i = 0; i < 3; i++) {
1124 unsigned int irq;
1125 irq = platform_get_irq(dev, i);
1126 free_irq(irq, device);
1127 }
1128
1129 dma_free_coherent(&dev->dev, plat_data->pool_size, 1118 dma_free_coherent(&dev->dev, plat_data->pool_size,
1130 device->dma_desc_pool_virt, device->dma_desc_pool); 1119 device->dma_desc_pool_virt, device->dma_desc_pool);
1131 1120
1132 do {
1133 struct resource *res;
1134 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
1135 release_mem_region(res->start, res->end - res->start);
1136 } while (0);
1137
1138 list_for_each_entry_safe(chan, _chan, &device->common.channels, 1121 list_for_each_entry_safe(chan, _chan, &device->common.channels,
1139 device_node) { 1122 device_node) {
1140 iop_chan = to_iop_adma_chan(chan); 1123 iop_chan = to_iop_adma_chan(chan);
@@ -1255,7 +1238,6 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
1255 spin_lock_init(&iop_chan->lock); 1238 spin_lock_init(&iop_chan->lock);
1256 INIT_LIST_HEAD(&iop_chan->chain); 1239 INIT_LIST_HEAD(&iop_chan->chain);
1257 INIT_LIST_HEAD(&iop_chan->all_slots); 1240 INIT_LIST_HEAD(&iop_chan->all_slots);
1258 INIT_RCU_HEAD(&iop_chan->common.rcu);
1259 iop_chan->common.device = dma_dev; 1241 iop_chan->common.device = dma_dev;
1260 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); 1242 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
1261 1243
@@ -1431,16 +1413,12 @@ static int __init iop_adma_init (void)
1431 return platform_driver_register(&iop_adma_driver); 1413 return platform_driver_register(&iop_adma_driver);
1432} 1414}
1433 1415
1434/* it's currently unsafe to unload this module */
1435#if 0
1436static void __exit iop_adma_exit (void) 1416static void __exit iop_adma_exit (void)
1437{ 1417{
1438 platform_driver_unregister(&iop_adma_driver); 1418 platform_driver_unregister(&iop_adma_driver);
1439 return; 1419 return;
1440} 1420}
1441module_exit(iop_adma_exit); 1421module_exit(iop_adma_exit);
1442#endif
1443
1444module_init(iop_adma_init); 1422module_init(iop_adma_init);
1445 1423
1446MODULE_AUTHOR("Intel Corporation"); 1424MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index bcda17426411..d35cbd1ff0b3 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -18,7 +18,6 @@
18 18
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/async_tx.h>
22#include <linux/delay.h> 21#include <linux/delay.h>
23#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
24#include <linux/spinlock.h> 23#include <linux/spinlock.h>
@@ -340,7 +339,7 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
340 } 339 }
341 340
342 /* run dependent operations */ 341 /* run dependent operations */
343 async_tx_run_dependencies(&desc->async_tx); 342 dma_run_dependencies(&desc->async_tx);
344 343
345 return cookie; 344 return cookie;
346} 345}
@@ -607,8 +606,7 @@ submit_done:
607} 606}
608 607
609/* returns the number of allocated descriptors */ 608/* returns the number of allocated descriptors */
610static int mv_xor_alloc_chan_resources(struct dma_chan *chan, 609static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
611 struct dma_client *client)
612{ 610{
613 char *hw_desc; 611 char *hw_desc;
614 int idx; 612 int idx;
@@ -958,7 +956,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
958 dma_chan = container_of(device->common.channels.next, 956 dma_chan = container_of(device->common.channels.next,
959 struct dma_chan, 957 struct dma_chan,
960 device_node); 958 device_node);
961 if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { 959 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
962 err = -ENODEV; 960 err = -ENODEV;
963 goto out; 961 goto out;
964 } 962 }
@@ -1053,7 +1051,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
1053 dma_chan = container_of(device->common.channels.next, 1051 dma_chan = container_of(device->common.channels.next,
1054 struct dma_chan, 1052 struct dma_chan,
1055 device_node); 1053 device_node);
1056 if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { 1054 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
1057 err = -ENODEV; 1055 err = -ENODEV;
1058 goto out; 1056 goto out;
1059 } 1057 }
@@ -1221,7 +1219,6 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1221 INIT_LIST_HEAD(&mv_chan->chain); 1219 INIT_LIST_HEAD(&mv_chan->chain);
1222 INIT_LIST_HEAD(&mv_chan->completed_slots); 1220 INIT_LIST_HEAD(&mv_chan->completed_slots);
1223 INIT_LIST_HEAD(&mv_chan->all_slots); 1221 INIT_LIST_HEAD(&mv_chan->all_slots);
1224 INIT_RCU_HEAD(&mv_chan->common.rcu);
1225 mv_chan->common.device = dma_dev; 1222 mv_chan->common.device = dma_dev;
1226 1223
1227 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); 1224 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 50a071f1c945..777fba48d2d3 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -238,11 +238,11 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
238} 238}
239 239
240/** 240/**
241 * smi_request: generate SMI request 241 * dcdbas_smi_request: generate SMI request
242 * 242 *
243 * Called with smi_data_lock. 243 * Called with smi_data_lock.
244 */ 244 */
245static int smi_request(struct smi_cmd *smi_cmd) 245int dcdbas_smi_request(struct smi_cmd *smi_cmd)
246{ 246{
247 cpumask_t old_mask; 247 cpumask_t old_mask;
248 int ret = 0; 248 int ret = 0;
@@ -309,14 +309,14 @@ static ssize_t smi_request_store(struct device *dev,
309 switch (val) { 309 switch (val) {
310 case 2: 310 case 2:
311 /* Raw SMI */ 311 /* Raw SMI */
312 ret = smi_request(smi_cmd); 312 ret = dcdbas_smi_request(smi_cmd);
313 if (!ret) 313 if (!ret)
314 ret = count; 314 ret = count;
315 break; 315 break;
316 case 1: 316 case 1:
317 /* Calling Interface SMI */ 317 /* Calling Interface SMI */
318 smi_cmd->ebx = (u32) virt_to_phys(smi_cmd->command_buffer); 318 smi_cmd->ebx = (u32) virt_to_phys(smi_cmd->command_buffer);
319 ret = smi_request(smi_cmd); 319 ret = dcdbas_smi_request(smi_cmd);
320 if (!ret) 320 if (!ret)
321 ret = count; 321 ret = count;
322 break; 322 break;
@@ -333,6 +333,7 @@ out:
333 mutex_unlock(&smi_data_lock); 333 mutex_unlock(&smi_data_lock);
334 return ret; 334 return ret;
335} 335}
336EXPORT_SYMBOL(dcdbas_smi_request);
336 337
337/** 338/**
338 * host_control_smi: generate host control SMI 339 * host_control_smi: generate host control SMI
diff --git a/drivers/firmware/dcdbas.h b/drivers/firmware/dcdbas.h
index 87bc3417de27..ca3cb0a54ab6 100644
--- a/drivers/firmware/dcdbas.h
+++ b/drivers/firmware/dcdbas.h
@@ -101,5 +101,7 @@ struct apm_cmd {
101 } __attribute__ ((packed)) parameters; 101 } __attribute__ ((packed)) parameters;
102} __attribute__ ((packed)); 102} __attribute__ ((packed));
103 103
104int dcdbas_smi_request(struct smi_cmd *smi_cmd);
105
104#endif /* _DCDBAS_H_ */ 106#endif /* _DCDBAS_H_ */
105 107
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 3bf8ee120d42..261b9aa3f248 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -56,9 +56,9 @@ struct memmap_attribute {
56 ssize_t (*show)(struct firmware_map_entry *entry, char *buf); 56 ssize_t (*show)(struct firmware_map_entry *entry, char *buf);
57}; 57};
58 58
59struct memmap_attribute memmap_start_attr = __ATTR_RO(start); 59static struct memmap_attribute memmap_start_attr = __ATTR_RO(start);
60struct memmap_attribute memmap_end_attr = __ATTR_RO(end); 60static struct memmap_attribute memmap_end_attr = __ATTR_RO(end);
61struct memmap_attribute memmap_type_attr = __ATTR_RO(type); 61static struct memmap_attribute memmap_type_attr = __ATTR_RO(type);
62 62
63/* 63/*
64 * These are default attributes that are added for every memmap entry. 64 * These are default attributes that are added for every memmap entry.
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 2f9e941968d6..d8f295bdad76 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -18,12 +18,6 @@
18#include <linux/dmi.h> 18#include <linux/dmi.h>
19 19
20#include <acpi/acpi_bus.h> 20#include <acpi/acpi_bus.h>
21#include <acpi/acnames.h>
22#include <acpi/acnamesp.h>
23#include <acpi/acparser.h>
24#include <acpi/acexcep.h>
25#include <acpi/acmacros.h>
26#include <acpi/actypes.h>
27 21
28#define REGS_PER_GTF 7 22#define REGS_PER_GTF 7
29struct taskfile_array { 23struct taskfile_array {
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index a074bfd5f825..1a919df809f8 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -245,12 +245,6 @@ static int ether1394_stop(struct net_device *dev)
245 return 0; 245 return 0;
246} 246}
247 247
248/* Return statistics to the caller */
249static struct net_device_stats *ether1394_stats(struct net_device *dev)
250{
251 return &(((struct eth1394_priv *)netdev_priv(dev))->stats);
252}
253
254/* FIXME: What to do if we timeout? I think a host reset is probably in order, 248/* FIXME: What to do if we timeout? I think a host reset is probably in order,
255 * so that's what we do. Should we increment the stat counters too? */ 249 * so that's what we do. Should we increment the stat counters too? */
256static void ether1394_tx_timeout(struct net_device *dev) 250static void ether1394_tx_timeout(struct net_device *dev)
@@ -516,16 +510,19 @@ static const struct header_ops ether1394_header_ops = {
516 .parse = ether1394_header_parse, 510 .parse = ether1394_header_parse,
517}; 511};
518 512
513static const struct net_device_ops ether1394_netdev_ops = {
514 .ndo_open = ether1394_open,
515 .ndo_stop = ether1394_stop,
516 .ndo_start_xmit = ether1394_tx,
517 .ndo_tx_timeout = ether1394_tx_timeout,
518 .ndo_change_mtu = ether1394_change_mtu,
519};
520
519static void ether1394_init_dev(struct net_device *dev) 521static void ether1394_init_dev(struct net_device *dev)
520{ 522{
521 dev->open = ether1394_open;
522 dev->stop = ether1394_stop;
523 dev->hard_start_xmit = ether1394_tx;
524 dev->get_stats = ether1394_stats;
525 dev->tx_timeout = ether1394_tx_timeout;
526 dev->change_mtu = ether1394_change_mtu;
527 523
528 dev->header_ops = &ether1394_header_ops; 524 dev->header_ops = &ether1394_header_ops;
525 dev->netdev_ops = &ether1394_netdev_ops;
529 526
530 SET_ETHTOOL_OPS(dev, &ethtool_ops); 527 SET_ETHTOOL_OPS(dev, &ethtool_ops);
531 528
@@ -1075,7 +1072,7 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
1075 HPSB_PRINT(KERN_ERR, "ether1394 rx: sender nodeid " 1072 HPSB_PRINT(KERN_ERR, "ether1394 rx: sender nodeid "
1076 "lookup failure: " NODE_BUS_FMT, 1073 "lookup failure: " NODE_BUS_FMT,
1077 NODE_BUS_ARGS(priv->host, srcid)); 1074 NODE_BUS_ARGS(priv->host, srcid));
1078 priv->stats.rx_dropped++; 1075 dev->stats.rx_dropped++;
1079 return -1; 1076 return -1;
1080 } 1077 }
1081 ud = node->ud; 1078 ud = node->ud;
@@ -1098,7 +1095,7 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
1098 skb = dev_alloc_skb(len + dev->hard_header_len + 15); 1095 skb = dev_alloc_skb(len + dev->hard_header_len + 15);
1099 if (unlikely(!skb)) { 1096 if (unlikely(!skb)) {
1100 ETH1394_PRINT_G(KERN_ERR, "Out of memory\n"); 1097 ETH1394_PRINT_G(KERN_ERR, "Out of memory\n");
1101 priv->stats.rx_dropped++; 1098 dev->stats.rx_dropped++;
1102 return -1; 1099 return -1;
1103 } 1100 }
1104 skb_reserve(skb, (dev->hard_header_len + 15) & ~15); 1101 skb_reserve(skb, (dev->hard_header_len + 15) & ~15);
@@ -1217,15 +1214,15 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
1217 spin_lock_irqsave(&priv->lock, flags); 1214 spin_lock_irqsave(&priv->lock, flags);
1218 1215
1219 if (!skb->protocol) { 1216 if (!skb->protocol) {
1220 priv->stats.rx_errors++; 1217 dev->stats.rx_errors++;
1221 priv->stats.rx_dropped++; 1218 dev->stats.rx_dropped++;
1222 dev_kfree_skb_any(skb); 1219 dev_kfree_skb_any(skb);
1223 } else if (netif_rx(skb) == NET_RX_DROP) { 1220 } else if (netif_rx(skb) == NET_RX_DROP) {
1224 priv->stats.rx_errors++; 1221 dev->stats.rx_errors++;
1225 priv->stats.rx_dropped++; 1222 dev->stats.rx_dropped++;
1226 } else { 1223 } else {
1227 priv->stats.rx_packets++; 1224 dev->stats.rx_packets++;
1228 priv->stats.rx_bytes += skb->len; 1225 dev->stats.rx_bytes += skb->len;
1229 } 1226 }
1230 1227
1231 spin_unlock_irqrestore(&priv->lock, flags); 1228 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1234,8 +1231,6 @@ bad_proto:
1234 if (netif_queue_stopped(dev)) 1231 if (netif_queue_stopped(dev))
1235 netif_wake_queue(dev); 1232 netif_wake_queue(dev);
1236 1233
1237 dev->last_rx = jiffies;
1238
1239 return 0; 1234 return 0;
1240} 1235}
1241 1236
@@ -1509,17 +1504,18 @@ static int ether1394_send_packet(struct packet_task *ptask, unsigned int tx_len)
1509static void ether1394_dg_complete(struct packet_task *ptask, int fail) 1504static void ether1394_dg_complete(struct packet_task *ptask, int fail)
1510{ 1505{
1511 struct sk_buff *skb = ptask->skb; 1506 struct sk_buff *skb = ptask->skb;
1512 struct eth1394_priv *priv = netdev_priv(skb->dev); 1507 struct net_device *dev = skb->dev;
1508 struct eth1394_priv *priv = netdev_priv(dev);
1513 unsigned long flags; 1509 unsigned long flags;
1514 1510
1515 /* Statistics */ 1511 /* Statistics */
1516 spin_lock_irqsave(&priv->lock, flags); 1512 spin_lock_irqsave(&priv->lock, flags);
1517 if (fail) { 1513 if (fail) {
1518 priv->stats.tx_dropped++; 1514 dev->stats.tx_dropped++;
1519 priv->stats.tx_errors++; 1515 dev->stats.tx_errors++;
1520 } else { 1516 } else {
1521 priv->stats.tx_bytes += skb->len; 1517 dev->stats.tx_bytes += skb->len;
1522 priv->stats.tx_packets++; 1518 dev->stats.tx_packets++;
1523 } 1519 }
1524 spin_unlock_irqrestore(&priv->lock, flags); 1520 spin_unlock_irqrestore(&priv->lock, flags);
1525 1521
@@ -1696,8 +1692,8 @@ fail:
1696 dev_kfree_skb(skb); 1692 dev_kfree_skb(skb);
1697 1693
1698 spin_lock_irqsave(&priv->lock, flags); 1694 spin_lock_irqsave(&priv->lock, flags);
1699 priv->stats.tx_dropped++; 1695 dev->stats.tx_dropped++;
1700 priv->stats.tx_errors++; 1696 dev->stats.tx_errors++;
1701 spin_unlock_irqrestore(&priv->lock, flags); 1697 spin_unlock_irqrestore(&priv->lock, flags);
1702 1698
1703 /* 1699 /*
diff --git a/drivers/ieee1394/eth1394.h b/drivers/ieee1394/eth1394.h
index e1b5ea80f623..d53bac47b86f 100644
--- a/drivers/ieee1394/eth1394.h
+++ b/drivers/ieee1394/eth1394.h
@@ -54,7 +54,6 @@ enum eth1394_bc_states { ETHER1394_BC_ERROR,
54 54
55/* Private structure for our ethernet driver */ 55/* Private structure for our ethernet driver */
56struct eth1394_priv { 56struct eth1394_priv {
57 struct net_device_stats stats; /* Device stats */
58 struct hpsb_host *host; /* The card for this dev */ 57 struct hpsb_host *host; /* The card for this dev */
59 u16 bc_maxpayload; /* Max broadcast payload */ 58 u16 bc_maxpayload; /* Max broadcast payload */
60 u8 bc_sspd; /* Max broadcast speed */ 59 u8 bc_sspd; /* Max broadcast speed */
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index a812db243477..6ba57e91d7ab 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -2705,7 +2705,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2705 sizeof(struct ietf_mpa_frame)); 2705 sizeof(struct ietf_mpa_frame));
2706 2706
2707 2707
2708 /* notify OF layer that accept event was successfull */ 2708 /* notify OF layer that accept event was successful */
2709 cm_id->add_ref(cm_id); 2709 cm_id->add_ref(cm_id);
2710 2710
2711 cm_event.event = IW_CM_EVENT_ESTABLISHED; 2711 cm_event.event = IW_CM_EVENT_ESTABLISHED;
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index a0f45c4fc198..d297accf9a7f 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -186,7 +186,7 @@ static int __devinit pxa930_trkball_probe(struct platform_device *pdev)
186 error = request_irq(irq, pxa930_trkball_interrupt, IRQF_DISABLED, 186 error = request_irq(irq, pxa930_trkball_interrupt, IRQF_DISABLED,
187 pdev->name, trkball); 187 pdev->name, trkball);
188 if (error) { 188 if (error) {
189 dev_err(&pdev->dev, "failed to request irq: %d\n", ret); 189 dev_err(&pdev->dev, "failed to request irq: %d\n", error);
190 goto failed_free_io; 190 goto failed_free_io;
191 } 191 }
192 192
@@ -227,7 +227,7 @@ failed_free_io:
227 iounmap(trkball->mmio_base); 227 iounmap(trkball->mmio_base);
228failed: 228failed:
229 kfree(trkball); 229 kfree(trkball);
230 return ret; 230 return error;
231} 231}
232 232
233static int __devexit pxa930_trkball_remove(struct platform_device *pdev) 233static int __devexit pxa930_trkball_remove(struct platform_device *pdev)
diff --git a/drivers/isdn/hardware/eicon/debuglib.h b/drivers/isdn/hardware/eicon/debuglib.h
index 016410cf2273..8ea587783e14 100644
--- a/drivers/isdn/hardware/eicon/debuglib.h
+++ b/drivers/isdn/hardware/eicon/debuglib.h
@@ -235,7 +235,7 @@ typedef void ( * DbgOld) (unsigned short, char *, va_list) ;
235typedef void ( * DbgEv) (unsigned short, unsigned long, va_list) ; 235typedef void ( * DbgEv) (unsigned short, unsigned long, va_list) ;
236typedef void ( * DbgIrq) (unsigned short, int, char *, va_list) ; 236typedef void ( * DbgIrq) (unsigned short, int, char *, va_list) ;
237typedef struct _DbgHandle_ 237typedef struct _DbgHandle_
238{ char Registered ; /* driver successfull registered */ 238{ char Registered ; /* driver successfully registered */
239#define DBG_HANDLE_REG_NEW 0x01 /* this (new) structure */ 239#define DBG_HANDLE_REG_NEW 0x01 /* this (new) structure */
240#define DBG_HANDLE_REG_OLD 0x7f /* old structure (see below) */ 240#define DBG_HANDLE_REG_OLD 0x7f /* old structure (see below) */
241 char Version; /* version of this structure */ 241 char Version; /* version of this structure */
diff --git a/drivers/isdn/hardware/eicon/os_4bri.c b/drivers/isdn/hardware/eicon/os_4bri.c
index 7b4ec3f60dbf..c964b8d91ada 100644
--- a/drivers/isdn/hardware/eicon/os_4bri.c
+++ b/drivers/isdn/hardware/eicon/os_4bri.c
@@ -997,7 +997,7 @@ diva_4bri_start_adapter(PISDN_ADAPTER IoAdapter,
997 diva_xdi_display_adapter_features(IoAdapter->ANum); 997 diva_xdi_display_adapter_features(IoAdapter->ANum);
998 998
999 for (i = 0; i < IoAdapter->tasks; i++) { 999 for (i = 0; i < IoAdapter->tasks; i++) {
1000 DBG_LOG(("A(%d) %s adapter successfull started", 1000 DBG_LOG(("A(%d) %s adapter successfully started",
1001 IoAdapter->QuadroList->QuadroAdapter[i]->ANum, 1001 IoAdapter->QuadroList->QuadroAdapter[i]->ANum,
1002 (IoAdapter->tasks == 1) ? "BRI 2.0" : "4BRI")) 1002 (IoAdapter->tasks == 1) ? "BRI 2.0" : "4BRI"))
1003 diva_xdi_didd_register_adapter(IoAdapter->QuadroList->QuadroAdapter[i]->ANum); 1003 diva_xdi_didd_register_adapter(IoAdapter->QuadroList->QuadroAdapter[i]->ANum);
diff --git a/drivers/isdn/hardware/eicon/os_bri.c b/drivers/isdn/hardware/eicon/os_bri.c
index f31bba5b16ff..08f01993f46b 100644
--- a/drivers/isdn/hardware/eicon/os_bri.c
+++ b/drivers/isdn/hardware/eicon/os_bri.c
@@ -736,7 +736,7 @@ diva_bri_start_adapter(PISDN_ADAPTER IoAdapter,
736 736
737 IoAdapter->Properties.Features = (word) features; 737 IoAdapter->Properties.Features = (word) features;
738 diva_xdi_display_adapter_features(IoAdapter->ANum); 738 diva_xdi_display_adapter_features(IoAdapter->ANum);
739 DBG_LOG(("A(%d) BRI adapter successfull started", IoAdapter->ANum)) 739 DBG_LOG(("A(%d) BRI adapter successfully started", IoAdapter->ANum))
740 /* 740 /*
741 Register with DIDD 741 Register with DIDD
742 */ 742 */
diff --git a/drivers/isdn/hardware/eicon/os_pri.c b/drivers/isdn/hardware/eicon/os_pri.c
index 903356547b79..5d65405c75f4 100644
--- a/drivers/isdn/hardware/eicon/os_pri.c
+++ b/drivers/isdn/hardware/eicon/os_pri.c
@@ -513,7 +513,7 @@ diva_pri_start_adapter(PISDN_ADAPTER IoAdapter,
513 513
514 diva_xdi_display_adapter_features(IoAdapter->ANum); 514 diva_xdi_display_adapter_features(IoAdapter->ANum);
515 515
516 DBG_LOG(("A(%d) PRI adapter successfull started", IoAdapter->ANum)) 516 DBG_LOG(("A(%d) PRI adapter successfully started", IoAdapter->ANum))
517 /* 517 /*
518 Register with DIDD 518 Register with DIDD
519 */ 519 */
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c
index 7ee5bd9f2bb4..579974cf4c9a 100644
--- a/drivers/isdn/hysdn/hysdn_net.c
+++ b/drivers/isdn/hysdn/hysdn_net.c
@@ -38,16 +38,12 @@ char *hysdn_net_revision = "$Revision: 1.8.6.4 $";
38/* inside the definition. */ 38/* inside the definition. */
39/****************************************************************************/ 39/****************************************************************************/
40struct net_local { 40struct net_local {
41 struct net_device netdev; /* the network device */
42 struct net_device_stats stats;
43 /* additional vars may be added here */
44 char dev_name[9]; /* our own device name */
45
46 /* Tx control lock. This protects the transmit buffer ring 41 /* Tx control lock. This protects the transmit buffer ring
47 * state along with the "tx full" state of the driver. This 42 * state along with the "tx full" state of the driver. This
48 * means all netif_queue flow control actions are protected 43 * means all netif_queue flow control actions are protected
49 * by this lock as well. 44 * by this lock as well.
50 */ 45 */
46 struct net_device *dev;
51 spinlock_t lock; 47 spinlock_t lock;
52 struct sk_buff *skbs[MAX_SKB_BUFFERS]; /* pointers to tx-skbs */ 48 struct sk_buff *skbs[MAX_SKB_BUFFERS]; /* pointers to tx-skbs */
53 int in_idx, out_idx; /* indexes to buffer ring */ 49 int in_idx, out_idx; /* indexes to buffer ring */
@@ -55,15 +51,6 @@ struct net_local {
55}; /* net_local */ 51}; /* net_local */
56 52
57 53
58/*****************************************************/
59/* Get the current statistics for this card. */
60/* This may be called with the card open or closed ! */
61/*****************************************************/
62static struct net_device_stats *
63net_get_stats(struct net_device *dev)
64{
65 return (&((struct net_local *) dev)->stats);
66} /* net_device_stats */
67 54
68/*********************************************************************/ 55/*********************************************************************/
69/* Open/initialize the board. This is called (in the current kernel) */ 56/* Open/initialize the board. This is called (in the current kernel) */
@@ -182,8 +169,8 @@ hysdn_tx_netack(hysdn_card * card)
182 if (!lp->sk_count) 169 if (!lp->sk_count)
183 return; /* error condition */ 170 return; /* error condition */
184 171
185 lp->stats.tx_packets++; 172 lp->dev->stats.tx_packets++;
186 lp->stats.tx_bytes += lp->skbs[lp->out_idx]->len; 173 lp->dev->stats.tx_bytes += lp->skbs[lp->out_idx]->len;
187 174
188 dev_kfree_skb(lp->skbs[lp->out_idx++]); /* free skb */ 175 dev_kfree_skb(lp->skbs[lp->out_idx++]); /* free skb */
189 if (lp->out_idx >= MAX_SKB_BUFFERS) 176 if (lp->out_idx >= MAX_SKB_BUFFERS)
@@ -200,29 +187,30 @@ void
200hysdn_rx_netpkt(hysdn_card * card, unsigned char *buf, unsigned short len) 187hysdn_rx_netpkt(hysdn_card * card, unsigned char *buf, unsigned short len)
201{ 188{
202 struct net_local *lp = card->netif; 189 struct net_local *lp = card->netif;
190 struct net_device *dev = lp->dev;
203 struct sk_buff *skb; 191 struct sk_buff *skb;
204 192
205 if (!lp) 193 if (!lp)
206 return; /* non existing device */ 194 return; /* non existing device */
207 195
208 lp->stats.rx_bytes += len; 196 dev->stats.rx_bytes += len;
209 197
210 skb = dev_alloc_skb(len); 198 skb = dev_alloc_skb(len);
211 if (skb == NULL) { 199 if (skb == NULL) {
212 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", 200 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
213 lp->netdev.name); 201 dev->name);
214 lp->stats.rx_dropped++; 202 dev->stats.rx_dropped++;
215 return; 203 return;
216 } 204 }
217 /* copy the data */ 205 /* copy the data */
218 memcpy(skb_put(skb, len), buf, len); 206 memcpy(skb_put(skb, len), buf, len);
219 207
220 /* determine the used protocol */ 208 /* determine the used protocol */
221 skb->protocol = eth_type_trans(skb, &lp->netdev); 209 skb->protocol = eth_type_trans(skb, dev);
222 210
223 netif_rx(skb); 211 dev->stats.rx_packets++; /* adjust packet count */
224 lp->stats.rx_packets++; /* adjust packet count */
225 212
213 netif_rx(skb);
226} /* hysdn_rx_netpkt */ 214} /* hysdn_rx_netpkt */
227 215
228/*****************************************************/ 216/*****************************************************/
@@ -242,24 +230,15 @@ hysdn_tx_netget(hysdn_card * card)
242 return (lp->skbs[lp->out_idx]); /* next packet to send */ 230 return (lp->skbs[lp->out_idx]); /* next packet to send */
243} /* hysdn_tx_netget */ 231} /* hysdn_tx_netget */
244 232
233static const struct net_device_ops hysdn_netdev_ops = {
234 .ndo_open = net_open,
235 .ndo_stop = net_close,
236 .ndo_start_xmit = net_send_packet,
237 .ndo_change_mtu = eth_change_mtu,
238 .ndo_set_mac_address = eth_mac_addr,
239 .ndo_validate_addr = eth_validate_addr,
240};
245 241
246/*******************************************/
247/* init function called by register device */
248/*******************************************/
249static int
250net_init(struct net_device *dev)
251{
252 /* setup the function table */
253 dev->open = net_open;
254 dev->stop = net_close;
255 dev->hard_start_xmit = net_send_packet;
256 dev->get_stats = net_get_stats;
257
258 /* Fill in the fields of the device structure with ethernet values. */
259 ether_setup(dev);
260
261 return (0); /* success */
262} /* net_init */
263 242
264/*****************************************************************************/ 243/*****************************************************************************/
265/* hysdn_net_create creates a new net device for the given card. If a device */ 244/* hysdn_net_create creates a new net device for the given card. If a device */
@@ -271,28 +250,34 @@ hysdn_net_create(hysdn_card * card)
271{ 250{
272 struct net_device *dev; 251 struct net_device *dev;
273 int i; 252 int i;
253 struct net_local *lp;
254
274 if(!card) { 255 if(!card) {
275 printk(KERN_WARNING "No card-pt in hysdn_net_create!\n"); 256 printk(KERN_WARNING "No card-pt in hysdn_net_create!\n");
276 return (-ENOMEM); 257 return (-ENOMEM);
277 } 258 }
278 hysdn_net_release(card); /* release an existing net device */ 259 hysdn_net_release(card); /* release an existing net device */
279 if ((dev = kzalloc(sizeof(struct net_local), GFP_KERNEL)) == NULL) { 260
261 dev = alloc_etherdev(sizeof(struct net_local));
262 if (!dev) {
280 printk(KERN_WARNING "HYSDN: unable to allocate mem\n"); 263 printk(KERN_WARNING "HYSDN: unable to allocate mem\n");
281 return (-ENOMEM); 264 return (-ENOMEM);
282 } 265 }
283 266
267 lp = netdev_priv(dev);
268 lp->dev = dev;
269
270 dev->netdev_ops = &hysdn_netdev_ops;
284 spin_lock_init(&((struct net_local *) dev)->lock); 271 spin_lock_init(&((struct net_local *) dev)->lock);
285 272
286 /* initialise necessary or informing fields */ 273 /* initialise necessary or informing fields */
287 dev->base_addr = card->iobase; /* IO address */ 274 dev->base_addr = card->iobase; /* IO address */
288 dev->irq = card->irq; /* irq */ 275 dev->irq = card->irq; /* irq */
289 dev->init = net_init; /* the init function of the device */ 276
290 if(dev->name) { 277 dev->netdev_ops = &hysdn_netdev_ops;
291 strcpy(dev->name, ((struct net_local *) dev)->dev_name);
292 }
293 if ((i = register_netdev(dev))) { 278 if ((i = register_netdev(dev))) {
294 printk(KERN_WARNING "HYSDN: unable to create network device\n"); 279 printk(KERN_WARNING "HYSDN: unable to create network device\n");
295 kfree(dev); 280 free_netdev(dev);
296 return (i); 281 return (i);
297 } 282 }
298 dev->ml_priv = card; /* remember pointer to own data structure */ 283 dev->ml_priv = card; /* remember pointer to own data structure */
@@ -316,7 +301,7 @@ hysdn_net_release(hysdn_card * card)
316 return (0); /* non existing */ 301 return (0); /* non existing */
317 302
318 card->netif = NULL; /* clear out pointer */ 303 card->netif = NULL; /* clear out pointer */
319 dev->stop(dev); /* close the device */ 304 net_close(dev);
320 305
321 flush_tx_buffers((struct net_local *) dev); /* empty buffers */ 306 flush_tx_buffers((struct net_local *) dev); /* empty buffers */
322 307
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 023ea11d2f9e..7c5f97033b9f 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1485,6 +1485,24 @@ isdn_ciscohdlck_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1485 return (rc); 1485 return (rc);
1486} 1486}
1487 1487
1488
1489static int isdn_net_ioctl(struct net_device *dev,
1490 struct ifreq *ifr, int cmd)
1491{
1492 isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
1493
1494 switch (lp->p_encap) {
1495#ifdef CONFIG_ISDN_PPP
1496 case ISDN_NET_ENCAP_SYNCPPP:
1497 return isdn_ppp_dev_ioctl(dev, ifr, cmd);
1498#endif
1499 case ISDN_NET_ENCAP_CISCOHDLCK:
1500 return isdn_ciscohdlck_dev_ioctl(dev, ifr, cmd);
1501 default:
1502 return -EINVAL;
1503 }
1504}
1505
1488/* called via cisco_timer.function */ 1506/* called via cisco_timer.function */
1489static void 1507static void
1490isdn_net_ciscohdlck_slarp_send_keepalive(unsigned long data) 1508isdn_net_ciscohdlck_slarp_send_keepalive(unsigned long data)
@@ -1998,23 +2016,6 @@ isdn_net_init(struct net_device *ndev)
1998 ushort max_hlhdr_len = 0; 2016 ushort max_hlhdr_len = 0;
1999 int drvidx; 2017 int drvidx;
2000 2018
2001 ether_setup(ndev);
2002 ndev->header_ops = NULL;
2003
2004 /* Setup the generic properties */
2005 ndev->mtu = 1500;
2006 ndev->flags = IFF_NOARP|IFF_POINTOPOINT;
2007 ndev->type = ARPHRD_ETHER;
2008 ndev->addr_len = ETH_ALEN;
2009 ndev->validate_addr = NULL;
2010
2011 /* for clients with MPPP maybe higher values better */
2012 ndev->tx_queue_len = 30;
2013
2014 /* The ISDN-specific entries in the device structure. */
2015 ndev->open = &isdn_net_open;
2016 ndev->hard_start_xmit = &isdn_net_start_xmit;
2017
2018 /* 2019 /*
2019 * up till binding we ask the protocol layer to reserve as much 2020 * up till binding we ask the protocol layer to reserve as much
2020 * as we might need for HL layer 2021 * as we might need for HL layer
@@ -2026,9 +2027,6 @@ isdn_net_init(struct net_device *ndev)
2026 max_hlhdr_len = dev->drv[drvidx]->interface->hl_hdrlen; 2027 max_hlhdr_len = dev->drv[drvidx]->interface->hl_hdrlen;
2027 2028
2028 ndev->hard_header_len = ETH_HLEN + max_hlhdr_len; 2029 ndev->hard_header_len = ETH_HLEN + max_hlhdr_len;
2029 ndev->stop = &isdn_net_close;
2030 ndev->get_stats = &isdn_net_get_stats;
2031 ndev->do_ioctl = NULL;
2032 return 0; 2030 return 0;
2033} 2031}
2034 2032
@@ -2508,6 +2506,19 @@ isdn_net_force_dial(char *name)
2508 return (isdn_net_force_dial_lp(p->local)); 2506 return (isdn_net_force_dial_lp(p->local));
2509} 2507}
2510 2508
2509/* The ISDN-specific entries in the device structure. */
2510static const struct net_device_ops isdn_netdev_ops = {
2511 .ndo_init = isdn_net_init,
2512 .ndo_open = isdn_net_open,
2513 .ndo_stop = isdn_net_close,
2514 .ndo_do_ioctl = isdn_net_ioctl,
2515
2516 .ndo_validate_addr = NULL,
2517 .ndo_start_xmit = isdn_net_start_xmit,
2518 .ndo_get_stats = isdn_net_get_stats,
2519 .ndo_tx_timeout = isdn_net_tx_timeout,
2520};
2521
2511/* 2522/*
2512 * Helper for alloc_netdev() 2523 * Helper for alloc_netdev()
2513 */ 2524 */
@@ -2515,7 +2526,20 @@ static void _isdn_setup(struct net_device *dev)
2515{ 2526{
2516 isdn_net_local *lp = netdev_priv(dev); 2527 isdn_net_local *lp = netdev_priv(dev);
2517 2528
2529 ether_setup(dev);
2530
2518 dev->flags = IFF_NOARP | IFF_POINTOPOINT; 2531 dev->flags = IFF_NOARP | IFF_POINTOPOINT;
2532 /* Setup the generic properties */
2533 dev->mtu = 1500;
2534 dev->flags = IFF_NOARP|IFF_POINTOPOINT;
2535 dev->type = ARPHRD_ETHER;
2536 dev->addr_len = ETH_ALEN;
2537 dev->header_ops = NULL;
2538 dev->netdev_ops = &isdn_netdev_ops;
2539
2540 /* for clients with MPPP maybe higher values better */
2541 dev->tx_queue_len = 30;
2542
2519 lp->p_encap = ISDN_NET_ENCAP_RAWIP; 2543 lp->p_encap = ISDN_NET_ENCAP_RAWIP;
2520 lp->magic = ISDN_NET_MAGIC; 2544 lp->magic = ISDN_NET_MAGIC;
2521 lp->last = lp; 2545 lp->last = lp;
@@ -2570,7 +2594,7 @@ isdn_net_new(char *name, struct net_device *master)
2570 return NULL; 2594 return NULL;
2571 } 2595 }
2572 netdev->local = netdev_priv(netdev->dev); 2596 netdev->local = netdev_priv(netdev->dev);
2573 netdev->dev->init = isdn_net_init; 2597
2574 if (master) { 2598 if (master) {
2575 /* Device shall be a slave */ 2599 /* Device shall be a slave */
2576 struct net_device *p = MASTER_TO_SLAVE(master); 2600 struct net_device *p = MASTER_TO_SLAVE(master);
@@ -2588,7 +2612,6 @@ isdn_net_new(char *name, struct net_device *master)
2588 /* 2612 /*
2589 * Watchdog timer (currently) for master only. 2613 * Watchdog timer (currently) for master only.
2590 */ 2614 */
2591 netdev->dev->tx_timeout = isdn_net_tx_timeout;
2592 netdev->dev->watchdog_timeo = ISDN_NET_TX_TIMEOUT; 2615 netdev->dev->watchdog_timeo = ISDN_NET_TX_TIMEOUT;
2593 if (register_netdev(netdev->dev) != 0) { 2616 if (register_netdev(netdev->dev) != 0) {
2594 printk(KERN_WARNING "isdn_net: Could not register net-device\n"); 2617 printk(KERN_WARNING "isdn_net: Could not register net-device\n");
@@ -2704,7 +2727,6 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg)
2704#else 2727#else
2705 p->dev->type = ARPHRD_PPP; /* change ARP type */ 2728 p->dev->type = ARPHRD_PPP; /* change ARP type */
2706 p->dev->addr_len = 0; 2729 p->dev->addr_len = 0;
2707 p->dev->do_ioctl = isdn_ppp_dev_ioctl;
2708#endif 2730#endif
2709 break; 2731 break;
2710 case ISDN_NET_ENCAP_X25IFACE: 2732 case ISDN_NET_ENCAP_X25IFACE:
@@ -2718,7 +2740,6 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg)
2718#endif 2740#endif
2719 break; 2741 break;
2720 case ISDN_NET_ENCAP_CISCOHDLCK: 2742 case ISDN_NET_ENCAP_CISCOHDLCK:
2721 p->dev->do_ioctl = isdn_ciscohdlck_dev_ioctl;
2722 break; 2743 break;
2723 default: 2744 default:
2724 if( cfg->p_encap >= 0 && 2745 if( cfg->p_encap >= 0 &&
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index e7fb7d2fcbfc..a4a1ae214630 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -63,6 +63,12 @@ config LEDS_WRAP
63 help 63 help
64 This option enables support for the PCEngines WRAP programmable LEDs. 64 This option enables support for the PCEngines WRAP programmable LEDs.
65 65
66config LEDS_ALIX2
67 tristate "LED Support for ALIX.2 and ALIX.3 series"
68 depends on LEDS_CLASS && X86 && EXPERIMENTAL
69 help
70 This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs.
71
66config LEDS_H1940 72config LEDS_H1940
67 tristate "LED Support for iPAQ H1940 device" 73 tristate "LED Support for iPAQ H1940 device"
68 depends on LEDS_CLASS && ARCH_H1940 74 depends on LEDS_CLASS && ARCH_H1940
@@ -77,7 +83,7 @@ config LEDS_COBALT_QUBE
77 83
78config LEDS_COBALT_RAQ 84config LEDS_COBALT_RAQ
79 bool "LED Support for the Cobalt Raq series" 85 bool "LED Support for the Cobalt Raq series"
80 depends on LEDS_CLASS && MIPS_COBALT 86 depends on LEDS_CLASS=y && MIPS_COBALT
81 select LEDS_TRIGGERS 87 select LEDS_TRIGGERS
82 help 88 help
83 This option enables support for the Cobalt Raq series LEDs. 89 This option enables support for the Cobalt Raq series LEDs.
@@ -158,6 +164,13 @@ config LEDS_PCA955X
158 LED driver chips accessed via the I2C bus. Supported 164 LED driver chips accessed via the I2C bus. Supported
159 devices include PCA9550, PCA9551, PCA9552, and PCA9553. 165 devices include PCA9550, PCA9551, PCA9552, and PCA9553.
160 166
167config LEDS_WM8350
168 tristate "LED Support for WM8350 AudioPlus PMIC"
169 depends on LEDS_CLASS && MFD_WM8350
170 help
171 This option enables support for LEDs driven by the Wolfson
172 Microelectronics WM8350 AudioPlus PMIC.
173
161config LEDS_DA903X 174config LEDS_DA903X
162 tristate "LED Support for DA9030/DA9034 PMIC" 175 tristate "LED Support for DA9030/DA9034 PMIC"
163 depends on LEDS_CLASS && PMIC_DA903X 176 depends on LEDS_CLASS && PMIC_DA903X
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index e1967a29850e..bc247cb02e82 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
11obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o 11obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o
12obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o 12obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
13obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o 13obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
14obj-$(CONFIG_LEDS_ALIX2) += leds-alix2.o
14obj-$(CONFIG_LEDS_H1940) += leds-h1940.o 15obj-$(CONFIG_LEDS_H1940) += leds-h1940.o
15obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o 16obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o
16obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o 17obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
@@ -23,6 +24,7 @@ obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
23obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o 24obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
24obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o 25obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
25obj-$(CONFIG_LEDS_HP_DISK) += leds-hp-disk.o 26obj-$(CONFIG_LEDS_HP_DISK) += leds-hp-disk.o
27obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
26 28
27# LED Triggers 29# LED Triggers
28obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o 30obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 6c4a326176d7..52f82e3ea13a 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -91,9 +91,29 @@ void led_classdev_resume(struct led_classdev *led_cdev)
91} 91}
92EXPORT_SYMBOL_GPL(led_classdev_resume); 92EXPORT_SYMBOL_GPL(led_classdev_resume);
93 93
94static int led_suspend(struct device *dev, pm_message_t state)
95{
96 struct led_classdev *led_cdev = dev_get_drvdata(dev);
97
98 if (led_cdev->flags & LED_CORE_SUSPENDRESUME)
99 led_classdev_suspend(led_cdev);
100
101 return 0;
102}
103
104static int led_resume(struct device *dev)
105{
106 struct led_classdev *led_cdev = dev_get_drvdata(dev);
107
108 if (led_cdev->flags & LED_CORE_SUSPENDRESUME)
109 led_classdev_resume(led_cdev);
110
111 return 0;
112}
113
94/** 114/**
95 * led_classdev_register - register a new object of led_classdev class. 115 * led_classdev_register - register a new object of led_classdev class.
96 * @dev: The device to register. 116 * @parent: The device to register.
97 * @led_cdev: the led_classdev structure for this device. 117 * @led_cdev: the led_classdev structure for this device.
98 */ 118 */
99int led_classdev_register(struct device *parent, struct led_classdev *led_cdev) 119int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
@@ -174,6 +194,8 @@ static int __init leds_init(void)
174 leds_class = class_create(THIS_MODULE, "leds"); 194 leds_class = class_create(THIS_MODULE, "leds");
175 if (IS_ERR(leds_class)) 195 if (IS_ERR(leds_class))
176 return PTR_ERR(leds_class); 196 return PTR_ERR(leds_class);
197 leds_class->suspend = led_suspend;
198 leds_class->resume = led_resume;
177 return 0; 199 return 0;
178} 200}
179 201
diff --git a/drivers/leds/leds-alix2.c b/drivers/leds/leds-alix2.c
new file mode 100644
index 000000000000..ddbd7730dfc8
--- /dev/null
+++ b/drivers/leds/leds-alix2.c
@@ -0,0 +1,181 @@
1/*
2 * LEDs driver for PCEngines ALIX.2 and ALIX.3
3 *
4 * Copyright (C) 2008 Constantin Baranov <const@mimas.ru>
5 */
6
7#include <linux/err.h>
8#include <linux/io.h>
9#include <linux/kernel.h>
10#include <linux/leds.h>
11#include <linux/module.h>
12#include <linux/platform_device.h>
13#include <linux/string.h>
14
15static int force = 0;
16module_param(force, bool, 0444);
17MODULE_PARM_DESC(force, "Assume system has ALIX.2 style LEDs");
18
19struct alix_led {
20 struct led_classdev cdev;
21 unsigned short port;
22 unsigned int on_value;
23 unsigned int off_value;
24};
25
26static void alix_led_set(struct led_classdev *led_cdev,
27 enum led_brightness brightness)
28{
29 struct alix_led *led_dev =
30 container_of(led_cdev, struct alix_led, cdev);
31
32 if (brightness)
33 outl(led_dev->on_value, led_dev->port);
34 else
35 outl(led_dev->off_value, led_dev->port);
36}
37
38static struct alix_led alix_leds[] = {
39 {
40 .cdev = {
41 .name = "alix:1",
42 .brightness_set = alix_led_set,
43 },
44 .port = 0x6100,
45 .on_value = 1 << 22,
46 .off_value = 1 << 6,
47 },
48 {
49 .cdev = {
50 .name = "alix:2",
51 .brightness_set = alix_led_set,
52 },
53 .port = 0x6180,
54 .on_value = 1 << 25,
55 .off_value = 1 << 9,
56 },
57 {
58 .cdev = {
59 .name = "alix:3",
60 .brightness_set = alix_led_set,
61 },
62 .port = 0x6180,
63 .on_value = 1 << 27,
64 .off_value = 1 << 11,
65 },
66};
67
68static int __init alix_led_probe(struct platform_device *pdev)
69{
70 int i;
71 int ret;
72
73 for (i = 0; i < ARRAY_SIZE(alix_leds); i++) {
74 alix_leds[i].cdev.flags |= LED_CORE_SUSPENDRESUME;
75 ret = led_classdev_register(&pdev->dev, &alix_leds[i].cdev);
76 if (ret < 0)
77 goto fail;
78 }
79 return 0;
80
81fail:
82 while (--i >= 0)
83 led_classdev_unregister(&alix_leds[i].cdev);
84 return ret;
85}
86
87static int alix_led_remove(struct platform_device *pdev)
88{
89 int i;
90
91 for (i = 0; i < ARRAY_SIZE(alix_leds); i++)
92 led_classdev_unregister(&alix_leds[i].cdev);
93 return 0;
94}
95
96static struct platform_driver alix_led_driver = {
97 .remove = alix_led_remove,
98 .driver = {
99 .name = KBUILD_MODNAME,
100 .owner = THIS_MODULE,
101 },
102};
103
104static int __init alix_present(void)
105{
106 const unsigned long bios_phys = 0x000f0000;
107 const size_t bios_len = 0x00010000;
108 const char alix_sig[] = "PC Engines ALIX.";
109 const size_t alix_sig_len = sizeof(alix_sig) - 1;
110
111 const char *bios_virt;
112 const char *scan_end;
113 const char *p;
114 int ret = 0;
115
116 if (force) {
117 printk(KERN_NOTICE "%s: forced to skip BIOS test, "
118 "assume system has ALIX.2 style LEDs\n",
119 KBUILD_MODNAME);
120 ret = 1;
121 goto out;
122 }
123
124 bios_virt = phys_to_virt(bios_phys);
125 scan_end = bios_virt + bios_len - (alix_sig_len + 2);
126 for (p = bios_virt; p < scan_end; p++) {
127 const char *tail;
128
129 if (memcmp(p, alix_sig, alix_sig_len) != 0) {
130 continue;
131 }
132
133 tail = p + alix_sig_len;
134 if ((tail[0] == '2' || tail[0] == '3') && tail[1] == '\0') {
135 printk(KERN_INFO
136 "%s: system is recognized as \"%s\"\n",
137 KBUILD_MODNAME, p);
138 ret = 1;
139 break;
140 }
141 }
142
143out:
144 return ret;
145}
146
147static struct platform_device *pdev;
148
149static int __init alix_led_init(void)
150{
151 int ret;
152
153 if (!alix_present()) {
154 ret = -ENODEV;
155 goto out;
156 }
157
158 pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
159 if (!IS_ERR(pdev)) {
160 ret = platform_driver_probe(&alix_led_driver, alix_led_probe);
161 if (ret)
162 platform_device_unregister(pdev);
163 } else
164 ret = PTR_ERR(pdev);
165
166out:
167 return ret;
168}
169
170static void __exit alix_led_exit(void)
171{
172 platform_device_unregister(pdev);
173 platform_driver_unregister(&alix_led_driver);
174}
175
176module_init(alix_led_init);
177module_exit(alix_led_exit);
178
179MODULE_AUTHOR("Constantin Baranov <const@mimas.ru>");
180MODULE_DESCRIPTION("PCEngines ALIX.2 and ALIX.3 LED driver");
181MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c
index 1bd590bb3a6e..446050759b4d 100644
--- a/drivers/leds/leds-ams-delta.c
+++ b/drivers/leds/leds-ams-delta.c
@@ -79,37 +79,12 @@ static struct ams_delta_led ams_delta_leds[] = {
79 }, 79 },
80}; 80};
81 81
82#ifdef CONFIG_PM
83static int ams_delta_led_suspend(struct platform_device *dev,
84 pm_message_t state)
85{
86 int i;
87
88 for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++)
89 led_classdev_suspend(&ams_delta_leds[i].cdev);
90
91 return 0;
92}
93
94static int ams_delta_led_resume(struct platform_device *dev)
95{
96 int i;
97
98 for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++)
99 led_classdev_resume(&ams_delta_leds[i].cdev);
100
101 return 0;
102}
103#else
104#define ams_delta_led_suspend NULL
105#define ams_delta_led_resume NULL
106#endif
107
108static int ams_delta_led_probe(struct platform_device *pdev) 82static int ams_delta_led_probe(struct platform_device *pdev)
109{ 83{
110 int i, ret; 84 int i, ret;
111 85
112 for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++) { 86 for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++) {
87 ams_delta_leds[i].cdev.flags |= LED_CORE_SUSPENDRESUME;
113 ret = led_classdev_register(&pdev->dev, 88 ret = led_classdev_register(&pdev->dev,
114 &ams_delta_leds[i].cdev); 89 &ams_delta_leds[i].cdev);
115 if (ret < 0) 90 if (ret < 0)
@@ -127,7 +102,7 @@ static int ams_delta_led_remove(struct platform_device *pdev)
127{ 102{
128 int i; 103 int i;
129 104
130 for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i--) 105 for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++)
131 led_classdev_unregister(&ams_delta_leds[i].cdev); 106 led_classdev_unregister(&ams_delta_leds[i].cdev);
132 107
133 return 0; 108 return 0;
@@ -136,8 +111,6 @@ static int ams_delta_led_remove(struct platform_device *pdev)
136static struct platform_driver ams_delta_led_driver = { 111static struct platform_driver ams_delta_led_driver = {
137 .probe = ams_delta_led_probe, 112 .probe = ams_delta_led_probe,
138 .remove = ams_delta_led_remove, 113 .remove = ams_delta_led_remove,
139 .suspend = ams_delta_led_suspend,
140 .resume = ams_delta_led_resume,
141 .driver = { 114 .driver = {
142 .name = "ams-delta-led", 115 .name = "ams-delta-led",
143 .owner = THIS_MODULE, 116 .owner = THIS_MODULE,
@@ -151,7 +124,7 @@ static int __init ams_delta_led_init(void)
151 124
152static void __exit ams_delta_led_exit(void) 125static void __exit ams_delta_led_exit(void)
153{ 126{
154 return platform_driver_unregister(&ams_delta_led_driver); 127 platform_driver_unregister(&ams_delta_led_driver);
155} 128}
156 129
157module_init(ams_delta_led_init); 130module_init(ams_delta_led_init);
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
index eb3415e88f43..1813c84ea5fc 100644
--- a/drivers/leds/leds-clevo-mail.c
+++ b/drivers/leds/leds-clevo-mail.c
@@ -142,6 +142,7 @@ static struct led_classdev clevo_mail_led = {
142 .name = "clevo::mail", 142 .name = "clevo::mail",
143 .brightness_set = clevo_mail_led_set, 143 .brightness_set = clevo_mail_led_set,
144 .blink_set = clevo_mail_led_blink, 144 .blink_set = clevo_mail_led_blink,
145 .flags = LED_CORE_SUSPENDRESUME,
145}; 146};
146 147
147static int __init clevo_mail_led_probe(struct platform_device *pdev) 148static int __init clevo_mail_led_probe(struct platform_device *pdev)
@@ -155,29 +156,9 @@ static int clevo_mail_led_remove(struct platform_device *pdev)
155 return 0; 156 return 0;
156} 157}
157 158
158#ifdef CONFIG_PM
159static int clevo_mail_led_suspend(struct platform_device *dev,
160 pm_message_t state)
161{
162 led_classdev_suspend(&clevo_mail_led);
163 return 0;
164}
165
166static int clevo_mail_led_resume(struct platform_device *dev)
167{
168 led_classdev_resume(&clevo_mail_led);
169 return 0;
170}
171#else
172#define clevo_mail_led_suspend NULL
173#define clevo_mail_led_resume NULL
174#endif
175
176static struct platform_driver clevo_mail_led_driver = { 159static struct platform_driver clevo_mail_led_driver = {
177 .probe = clevo_mail_led_probe, 160 .probe = clevo_mail_led_probe,
178 .remove = clevo_mail_led_remove, 161 .remove = clevo_mail_led_remove,
179 .suspend = clevo_mail_led_suspend,
180 .resume = clevo_mail_led_resume,
181 .driver = { 162 .driver = {
182 .name = KBUILD_MODNAME, 163 .name = KBUILD_MODNAME,
183 .owner = THIS_MODULE, 164 .owner = THIS_MODULE,
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c
index 34935155c1c0..5f7c9c5c09b1 100644
--- a/drivers/leds/leds-fsg.c
+++ b/drivers/leds/leds-fsg.c
@@ -99,64 +99,43 @@ static void fsg_led_ring_set(struct led_classdev *led_cdev,
99} 99}
100 100
101 101
102
103static struct led_classdev fsg_wlan_led = { 102static struct led_classdev fsg_wlan_led = {
104 .name = "fsg:blue:wlan", 103 .name = "fsg:blue:wlan",
105 .brightness_set = fsg_led_wlan_set, 104 .brightness_set = fsg_led_wlan_set,
105 .flags = LED_CORE_SUSPENDRESUME,
106}; 106};
107 107
108static struct led_classdev fsg_wan_led = { 108static struct led_classdev fsg_wan_led = {
109 .name = "fsg:blue:wan", 109 .name = "fsg:blue:wan",
110 .brightness_set = fsg_led_wan_set, 110 .brightness_set = fsg_led_wan_set,
111 .flags = LED_CORE_SUSPENDRESUME,
111}; 112};
112 113
113static struct led_classdev fsg_sata_led = { 114static struct led_classdev fsg_sata_led = {
114 .name = "fsg:blue:sata", 115 .name = "fsg:blue:sata",
115 .brightness_set = fsg_led_sata_set, 116 .brightness_set = fsg_led_sata_set,
117 .flags = LED_CORE_SUSPENDRESUME,
116}; 118};
117 119
118static struct led_classdev fsg_usb_led = { 120static struct led_classdev fsg_usb_led = {
119 .name = "fsg:blue:usb", 121 .name = "fsg:blue:usb",
120 .brightness_set = fsg_led_usb_set, 122 .brightness_set = fsg_led_usb_set,
123 .flags = LED_CORE_SUSPENDRESUME,
121}; 124};
122 125
123static struct led_classdev fsg_sync_led = { 126static struct led_classdev fsg_sync_led = {
124 .name = "fsg:blue:sync", 127 .name = "fsg:blue:sync",
125 .brightness_set = fsg_led_sync_set, 128 .brightness_set = fsg_led_sync_set,
129 .flags = LED_CORE_SUSPENDRESUME,
126}; 130};
127 131
128static struct led_classdev fsg_ring_led = { 132static struct led_classdev fsg_ring_led = {
129 .name = "fsg:blue:ring", 133 .name = "fsg:blue:ring",
130 .brightness_set = fsg_led_ring_set, 134 .brightness_set = fsg_led_ring_set,
135 .flags = LED_CORE_SUSPENDRESUME,
131}; 136};
132 137
133 138
134
135#ifdef CONFIG_PM
136static int fsg_led_suspend(struct platform_device *dev, pm_message_t state)
137{
138 led_classdev_suspend(&fsg_wlan_led);
139 led_classdev_suspend(&fsg_wan_led);
140 led_classdev_suspend(&fsg_sata_led);
141 led_classdev_suspend(&fsg_usb_led);
142 led_classdev_suspend(&fsg_sync_led);
143 led_classdev_suspend(&fsg_ring_led);
144 return 0;
145}
146
147static int fsg_led_resume(struct platform_device *dev)
148{
149 led_classdev_resume(&fsg_wlan_led);
150 led_classdev_resume(&fsg_wan_led);
151 led_classdev_resume(&fsg_sata_led);
152 led_classdev_resume(&fsg_usb_led);
153 led_classdev_resume(&fsg_sync_led);
154 led_classdev_resume(&fsg_ring_led);
155 return 0;
156}
157#endif
158
159
160static int fsg_led_probe(struct platform_device *pdev) 139static int fsg_led_probe(struct platform_device *pdev)
161{ 140{
162 int ret; 141 int ret;
@@ -232,10 +211,6 @@ static int fsg_led_remove(struct platform_device *pdev)
232static struct platform_driver fsg_led_driver = { 211static struct platform_driver fsg_led_driver = {
233 .probe = fsg_led_probe, 212 .probe = fsg_led_probe,
234 .remove = fsg_led_remove, 213 .remove = fsg_led_remove,
235#ifdef CONFIG_PM
236 .suspend = fsg_led_suspend,
237 .resume = fsg_led_resume,
238#endif
239 .driver = { 214 .driver = {
240 .name = "fsg-led", 215 .name = "fsg-led",
241 }, 216 },
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index b13bd2950e95..2e3df08b649b 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -105,6 +105,7 @@ static int gpio_led_probe(struct platform_device *pdev)
105 } 105 }
106 led_dat->cdev.brightness_set = gpio_led_set; 106 led_dat->cdev.brightness_set = gpio_led_set;
107 led_dat->cdev.brightness = LED_OFF; 107 led_dat->cdev.brightness = LED_OFF;
108 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
108 109
109 gpio_direction_output(led_dat->gpio, led_dat->active_low); 110 gpio_direction_output(led_dat->gpio, led_dat->active_low);
110 111
@@ -154,44 +155,9 @@ static int __devexit gpio_led_remove(struct platform_device *pdev)
154 return 0; 155 return 0;
155} 156}
156 157
157#ifdef CONFIG_PM
158static int gpio_led_suspend(struct platform_device *pdev, pm_message_t state)
159{
160 struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
161 struct gpio_led_data *leds_data;
162 int i;
163
164 leds_data = platform_get_drvdata(pdev);
165
166 for (i = 0; i < pdata->num_leds; i++)
167 led_classdev_suspend(&leds_data[i].cdev);
168
169 return 0;
170}
171
172static int gpio_led_resume(struct platform_device *pdev)
173{
174 struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
175 struct gpio_led_data *leds_data;
176 int i;
177
178 leds_data = platform_get_drvdata(pdev);
179
180 for (i = 0; i < pdata->num_leds; i++)
181 led_classdev_resume(&leds_data[i].cdev);
182
183 return 0;
184}
185#else
186#define gpio_led_suspend NULL
187#define gpio_led_resume NULL
188#endif
189
190static struct platform_driver gpio_led_driver = { 158static struct platform_driver gpio_led_driver = {
191 .probe = gpio_led_probe, 159 .probe = gpio_led_probe,
192 .remove = __devexit_p(gpio_led_remove), 160 .remove = __devexit_p(gpio_led_remove),
193 .suspend = gpio_led_suspend,
194 .resume = gpio_led_resume,
195 .driver = { 161 .driver = {
196 .name = "leds-gpio", 162 .name = "leds-gpio",
197 .owner = THIS_MODULE, 163 .owner = THIS_MODULE,
diff --git a/drivers/leds/leds-hp-disk.c b/drivers/leds/leds-hp-disk.c
index 44fa757d8254..d786adc8c5e3 100644
--- a/drivers/leds/leds-hp-disk.c
+++ b/drivers/leds/leds-hp-disk.c
@@ -68,25 +68,9 @@ static struct led_classdev hpled_led = {
68 .name = "hp:red:hddprotection", 68 .name = "hp:red:hddprotection",
69 .default_trigger = "heartbeat", 69 .default_trigger = "heartbeat",
70 .brightness_set = hpled_set, 70 .brightness_set = hpled_set,
71 .flags = LED_CORE_SUSPENDRESUME,
71}; 72};
72 73
73#ifdef CONFIG_PM
74static int hpled_suspend(struct acpi_device *dev, pm_message_t state)
75{
76 led_classdev_suspend(&hpled_led);
77 return 0;
78}
79
80static int hpled_resume(struct acpi_device *dev)
81{
82 led_classdev_resume(&hpled_led);
83 return 0;
84}
85#else
86#define hpled_suspend NULL
87#define hpled_resume NULL
88#endif
89
90static int hpled_add(struct acpi_device *device) 74static int hpled_add(struct acpi_device *device)
91{ 75{
92 int ret; 76 int ret;
@@ -121,8 +105,6 @@ static struct acpi_driver leds_hp_driver = {
121 .ops = { 105 .ops = {
122 .add = hpled_add, 106 .add = hpled_add,
123 .remove = hpled_remove, 107 .remove = hpled_remove,
124 .suspend = hpled_suspend,
125 .resume = hpled_resume,
126 } 108 }
127}; 109};
128 110
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c
index e8fb1baf8a50..e4ce1fd46338 100644
--- a/drivers/leds/leds-hp6xx.c
+++ b/drivers/leds/leds-hp6xx.c
@@ -45,30 +45,16 @@ static struct led_classdev hp6xx_red_led = {
45 .name = "hp6xx:red", 45 .name = "hp6xx:red",
46 .default_trigger = "hp6xx-charge", 46 .default_trigger = "hp6xx-charge",
47 .brightness_set = hp6xxled_red_set, 47 .brightness_set = hp6xxled_red_set,
48 .flags = LED_CORE_SUSPENDRESUME,
48}; 49};
49 50
50static struct led_classdev hp6xx_green_led = { 51static struct led_classdev hp6xx_green_led = {
51 .name = "hp6xx:green", 52 .name = "hp6xx:green",
52 .default_trigger = "ide-disk", 53 .default_trigger = "ide-disk",
53 .brightness_set = hp6xxled_green_set, 54 .brightness_set = hp6xxled_green_set,
55 .flags = LED_CORE_SUSPENDRESUME,
54}; 56};
55 57
56#ifdef CONFIG_PM
57static int hp6xxled_suspend(struct platform_device *dev, pm_message_t state)
58{
59 led_classdev_suspend(&hp6xx_red_led);
60 led_classdev_suspend(&hp6xx_green_led);
61 return 0;
62}
63
64static int hp6xxled_resume(struct platform_device *dev)
65{
66 led_classdev_resume(&hp6xx_red_led);
67 led_classdev_resume(&hp6xx_green_led);
68 return 0;
69}
70#endif
71
72static int hp6xxled_probe(struct platform_device *pdev) 58static int hp6xxled_probe(struct platform_device *pdev)
73{ 59{
74 int ret; 60 int ret;
@@ -98,10 +84,6 @@ MODULE_ALIAS("platform:hp6xx-led");
98static struct platform_driver hp6xxled_driver = { 84static struct platform_driver hp6xxled_driver = {
99 .probe = hp6xxled_probe, 85 .probe = hp6xxled_probe,
100 .remove = hp6xxled_remove, 86 .remove = hp6xxled_remove,
101#ifdef CONFIG_PM
102 .suspend = hp6xxled_suspend,
103 .resume = hp6xxled_resume,
104#endif
105 .driver = { 87 .driver = {
106 .name = "hp6xx-led", 88 .name = "hp6xx-led",
107 .owner = THIS_MODULE, 89 .owner = THIS_MODULE,
diff --git a/drivers/leds/leds-net48xx.c b/drivers/leds/leds-net48xx.c
index 054360473c94..93987a12da49 100644
--- a/drivers/leds/leds-net48xx.c
+++ b/drivers/leds/leds-net48xx.c
@@ -33,26 +33,9 @@ static void net48xx_error_led_set(struct led_classdev *led_cdev,
33static struct led_classdev net48xx_error_led = { 33static struct led_classdev net48xx_error_led = {
34 .name = "net48xx::error", 34 .name = "net48xx::error",
35 .brightness_set = net48xx_error_led_set, 35 .brightness_set = net48xx_error_led_set,
36 .flags = LED_CORE_SUSPENDRESUME,
36}; 37};
37 38
38#ifdef CONFIG_PM
39static int net48xx_led_suspend(struct platform_device *dev,
40 pm_message_t state)
41{
42 led_classdev_suspend(&net48xx_error_led);
43 return 0;
44}
45
46static int net48xx_led_resume(struct platform_device *dev)
47{
48 led_classdev_resume(&net48xx_error_led);
49 return 0;
50}
51#else
52#define net48xx_led_suspend NULL
53#define net48xx_led_resume NULL
54#endif
55
56static int net48xx_led_probe(struct platform_device *pdev) 39static int net48xx_led_probe(struct platform_device *pdev)
57{ 40{
58 return led_classdev_register(&pdev->dev, &net48xx_error_led); 41 return led_classdev_register(&pdev->dev, &net48xx_error_led);
@@ -67,8 +50,6 @@ static int net48xx_led_remove(struct platform_device *pdev)
67static struct platform_driver net48xx_led_driver = { 50static struct platform_driver net48xx_led_driver = {
68 .probe = net48xx_led_probe, 51 .probe = net48xx_led_probe,
69 .remove = net48xx_led_remove, 52 .remove = net48xx_led_remove,
70 .suspend = net48xx_led_suspend,
71 .resume = net48xx_led_resume,
72 .driver = { 53 .driver = {
73 .name = DRVNAME, 54 .name = DRVNAME,
74 .owner = THIS_MODULE, 55 .owner = THIS_MODULE,
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 4064d4f6b33b..76ec7498e2d5 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -16,6 +16,7 @@
16#include <linux/leds.h> 16#include <linux/leds.h>
17#include <linux/input.h> 17#include <linux/input.h>
18#include <linux/mutex.h> 18#include <linux/mutex.h>
19#include <linux/workqueue.h>
19#include <linux/leds-pca9532.h> 20#include <linux/leds-pca9532.h>
20 21
21static const unsigned short normal_i2c[] = { /*0x60,*/ I2C_CLIENT_END}; 22static const unsigned short normal_i2c[] = { /*0x60,*/ I2C_CLIENT_END};
@@ -34,6 +35,7 @@ struct pca9532_data {
34 struct pca9532_led leds[16]; 35 struct pca9532_led leds[16];
35 struct mutex update_lock; 36 struct mutex update_lock;
36 struct input_dev *idev; 37 struct input_dev *idev;
38 struct work_struct work;
37 u8 pwm[2]; 39 u8 pwm[2];
38 u8 psc[2]; 40 u8 psc[2];
39}; 41};
@@ -63,7 +65,7 @@ static struct i2c_driver pca9532_driver = {
63 * as a compromise we average one pwm to the values requested by all 65 * as a compromise we average one pwm to the values requested by all
64 * leds that are not ON/OFF. 66 * leds that are not ON/OFF.
65 * */ 67 * */
66static int pca9532_setpwm(struct i2c_client *client, int pwm, int blink, 68static int pca9532_calcpwm(struct i2c_client *client, int pwm, int blink,
67 enum led_brightness value) 69 enum led_brightness value)
68{ 70{
69 int a = 0, b = 0, i = 0; 71 int a = 0, b = 0, i = 0;
@@ -84,11 +86,17 @@ static int pca9532_setpwm(struct i2c_client *client, int pwm, int blink,
84 b = b/a; 86 b = b/a;
85 if (b > 0xFF) 87 if (b > 0xFF)
86 return -EINVAL; 88 return -EINVAL;
87 mutex_lock(&data->update_lock);
88 data->pwm[pwm] = b; 89 data->pwm[pwm] = b;
90 data->psc[pwm] = blink;
91 return 0;
92}
93
94static int pca9532_setpwm(struct i2c_client *client, int pwm)
95{
96 struct pca9532_data *data = i2c_get_clientdata(client);
97 mutex_lock(&data->update_lock);
89 i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm), 98 i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm),
90 data->pwm[pwm]); 99 data->pwm[pwm]);
91 data->psc[pwm] = blink;
92 i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm), 100 i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm),
93 data->psc[pwm]); 101 data->psc[pwm]);
94 mutex_unlock(&data->update_lock); 102 mutex_unlock(&data->update_lock);
@@ -124,11 +132,11 @@ static void pca9532_set_brightness(struct led_classdev *led_cdev,
124 led->state = PCA9532_ON; 132 led->state = PCA9532_ON;
125 else { 133 else {
126 led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */ 134 led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */
127 err = pca9532_setpwm(led->client, 0, 0, value); 135 err = pca9532_calcpwm(led->client, 0, 0, value);
128 if (err) 136 if (err)
129 return; /* XXX: led api doesn't allow error code? */ 137 return; /* XXX: led api doesn't allow error code? */
130 } 138 }
131 pca9532_setled(led); 139 schedule_work(&led->work);
132} 140}
133 141
134static int pca9532_set_blink(struct led_classdev *led_cdev, 142static int pca9532_set_blink(struct led_classdev *led_cdev,
@@ -137,6 +145,7 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
137 struct pca9532_led *led = ldev_to_led(led_cdev); 145 struct pca9532_led *led = ldev_to_led(led_cdev);
138 struct i2c_client *client = led->client; 146 struct i2c_client *client = led->client;
139 int psc; 147 int psc;
148 int err = 0;
140 149
141 if (*delay_on == 0 && *delay_off == 0) { 150 if (*delay_on == 0 && *delay_off == 0) {
142 /* led subsystem ask us for a blink rate */ 151 /* led subsystem ask us for a blink rate */
@@ -148,11 +157,15 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
148 157
149 /* Thecus specific: only use PSC/PWM 0 */ 158 /* Thecus specific: only use PSC/PWM 0 */
150 psc = (*delay_on * 152-1)/1000; 159 psc = (*delay_on * 152-1)/1000;
151 return pca9532_setpwm(client, 0, psc, led_cdev->brightness); 160 err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness);
161 if (err)
162 return err;
163 schedule_work(&led->work);
164 return 0;
152} 165}
153 166
154int pca9532_event(struct input_dev *dev, unsigned int type, unsigned int code, 167static int pca9532_event(struct input_dev *dev, unsigned int type,
155 int value) 168 unsigned int code, int value)
156{ 169{
157 struct pca9532_data *data = input_get_drvdata(dev); 170 struct pca9532_data *data = input_get_drvdata(dev);
158 171
@@ -165,13 +178,28 @@ int pca9532_event(struct input_dev *dev, unsigned int type, unsigned int code,
165 else 178 else
166 data->pwm[1] = 0; 179 data->pwm[1] = 0;
167 180
168 dev_info(&dev->dev, "setting beep to %d \n", data->pwm[1]); 181 schedule_work(&data->work);
182
183 return 0;
184}
185
186static void pca9532_input_work(struct work_struct *work)
187{
188 struct pca9532_data *data;
189 data = container_of(work, struct pca9532_data, work);
169 mutex_lock(&data->update_lock); 190 mutex_lock(&data->update_lock);
170 i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1), 191 i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1),
171 data->pwm[1]); 192 data->pwm[1]);
172 mutex_unlock(&data->update_lock); 193 mutex_unlock(&data->update_lock);
194}
173 195
174 return 0; 196static void pca9532_led_work(struct work_struct *work)
197{
198 struct pca9532_led *led;
199 led = container_of(work, struct pca9532_led, work);
200 if (led->state == PCA9532_PWM0)
201 pca9532_setpwm(led->client, 0);
202 pca9532_setled(led);
175} 203}
176 204
177static int pca9532_configure(struct i2c_client *client, 205static int pca9532_configure(struct i2c_client *client,
@@ -204,8 +232,9 @@ static int pca9532_configure(struct i2c_client *client,
204 led->ldev.brightness = LED_OFF; 232 led->ldev.brightness = LED_OFF;
205 led->ldev.brightness_set = pca9532_set_brightness; 233 led->ldev.brightness_set = pca9532_set_brightness;
206 led->ldev.blink_set = pca9532_set_blink; 234 led->ldev.blink_set = pca9532_set_blink;
207 if (led_classdev_register(&client->dev, 235 INIT_WORK(&led->work, pca9532_led_work);
208 &led->ldev) < 0) { 236 err = led_classdev_register(&client->dev, &led->ldev);
237 if (err < 0) {
209 dev_err(&client->dev, 238 dev_err(&client->dev,
210 "couldn't register LED %s\n", 239 "couldn't register LED %s\n",
211 led->name); 240 led->name);
@@ -233,9 +262,11 @@ static int pca9532_configure(struct i2c_client *client,
233 BIT_MASK(SND_TONE); 262 BIT_MASK(SND_TONE);
234 data->idev->event = pca9532_event; 263 data->idev->event = pca9532_event;
235 input_set_drvdata(data->idev, data); 264 input_set_drvdata(data->idev, data);
265 INIT_WORK(&data->work, pca9532_input_work);
236 err = input_register_device(data->idev); 266 err = input_register_device(data->idev);
237 if (err) { 267 if (err) {
238 input_free_device(data->idev); 268 input_free_device(data->idev);
269 cancel_work_sync(&data->work);
239 data->idev = NULL; 270 data->idev = NULL;
240 goto exit; 271 goto exit;
241 } 272 }
@@ -252,18 +283,19 @@ exit:
252 break; 283 break;
253 case PCA9532_TYPE_LED: 284 case PCA9532_TYPE_LED:
254 led_classdev_unregister(&data->leds[i].ldev); 285 led_classdev_unregister(&data->leds[i].ldev);
286 cancel_work_sync(&data->leds[i].work);
255 break; 287 break;
256 case PCA9532_TYPE_N2100_BEEP: 288 case PCA9532_TYPE_N2100_BEEP:
257 if (data->idev != NULL) { 289 if (data->idev != NULL) {
258 input_unregister_device(data->idev); 290 input_unregister_device(data->idev);
259 input_free_device(data->idev); 291 input_free_device(data->idev);
292 cancel_work_sync(&data->work);
260 data->idev = NULL; 293 data->idev = NULL;
261 } 294 }
262 break; 295 break;
263 } 296 }
264 297
265 return err; 298 return err;
266
267} 299}
268 300
269static int pca9532_probe(struct i2c_client *client, 301static int pca9532_probe(struct i2c_client *client,
@@ -271,12 +303,16 @@ static int pca9532_probe(struct i2c_client *client,
271{ 303{
272 struct pca9532_data *data = i2c_get_clientdata(client); 304 struct pca9532_data *data = i2c_get_clientdata(client);
273 struct pca9532_platform_data *pca9532_pdata = client->dev.platform_data; 305 struct pca9532_platform_data *pca9532_pdata = client->dev.platform_data;
306 int err;
307
308 if (!pca9532_pdata)
309 return -EIO;
274 310
275 if (!i2c_check_functionality(client->adapter, 311 if (!i2c_check_functionality(client->adapter,
276 I2C_FUNC_SMBUS_BYTE_DATA)) 312 I2C_FUNC_SMBUS_BYTE_DATA))
277 return -EIO; 313 return -EIO;
278 314
279 data = kzalloc(sizeof(struct pca9532_data), GFP_KERNEL); 315 data = kzalloc(sizeof(*data), GFP_KERNEL);
280 if (!data) 316 if (!data)
281 return -ENOMEM; 317 return -ENOMEM;
282 318
@@ -285,12 +321,13 @@ static int pca9532_probe(struct i2c_client *client,
285 data->client = client; 321 data->client = client;
286 mutex_init(&data->update_lock); 322 mutex_init(&data->update_lock);
287 323
288 if (pca9532_pdata == NULL) 324 err = pca9532_configure(client, data, pca9532_pdata);
289 return -EIO; 325 if (err) {
290 326 kfree(data);
291 pca9532_configure(client, data, pca9532_pdata); 327 i2c_set_clientdata(client, NULL);
292 return 0; 328 }
293 329
330 return err;
294} 331}
295 332
296static int pca9532_remove(struct i2c_client *client) 333static int pca9532_remove(struct i2c_client *client)
@@ -303,11 +340,13 @@ static int pca9532_remove(struct i2c_client *client)
303 break; 340 break;
304 case PCA9532_TYPE_LED: 341 case PCA9532_TYPE_LED:
305 led_classdev_unregister(&data->leds[i].ldev); 342 led_classdev_unregister(&data->leds[i].ldev);
343 cancel_work_sync(&data->leds[i].work);
306 break; 344 break;
307 case PCA9532_TYPE_N2100_BEEP: 345 case PCA9532_TYPE_N2100_BEEP:
308 if (data->idev != NULL) { 346 if (data->idev != NULL) {
309 input_unregister_device(data->idev); 347 input_unregister_device(data->idev);
310 input_free_device(data->idev); 348 input_free_device(data->idev);
349 cancel_work_sync(&data->work);
311 data->idev = NULL; 350 data->idev = NULL;
312 } 351 }
313 break; 352 break;
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index 25a07f2643ad..4d81131542ae 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -82,6 +82,7 @@ static int s3c24xx_led_probe(struct platform_device *dev)
82 led->cdev.brightness_set = s3c24xx_led_set; 82 led->cdev.brightness_set = s3c24xx_led_set;
83 led->cdev.default_trigger = pdata->def_trigger; 83 led->cdev.default_trigger = pdata->def_trigger;
84 led->cdev.name = pdata->name; 84 led->cdev.name = pdata->name;
85 led->cdev.flags |= LED_CORE_SUSPENDRESUME;
85 86
86 led->pdata = pdata; 87 led->pdata = pdata;
87 88
@@ -111,33 +112,9 @@ static int s3c24xx_led_probe(struct platform_device *dev)
111 return ret; 112 return ret;
112} 113}
113 114
114
115#ifdef CONFIG_PM
116static int s3c24xx_led_suspend(struct platform_device *dev, pm_message_t state)
117{
118 struct s3c24xx_gpio_led *led = pdev_to_gpio(dev);
119
120 led_classdev_suspend(&led->cdev);
121 return 0;
122}
123
124static int s3c24xx_led_resume(struct platform_device *dev)
125{
126 struct s3c24xx_gpio_led *led = pdev_to_gpio(dev);
127
128 led_classdev_resume(&led->cdev);
129 return 0;
130}
131#else
132#define s3c24xx_led_suspend NULL
133#define s3c24xx_led_resume NULL
134#endif
135
136static struct platform_driver s3c24xx_led_driver = { 115static struct platform_driver s3c24xx_led_driver = {
137 .probe = s3c24xx_led_probe, 116 .probe = s3c24xx_led_probe,
138 .remove = s3c24xx_led_remove, 117 .remove = s3c24xx_led_remove,
139 .suspend = s3c24xx_led_suspend,
140 .resume = s3c24xx_led_resume,
141 .driver = { 118 .driver = {
142 .name = "s3c24xx_led", 119 .name = "s3c24xx_led",
143 .owner = THIS_MODULE, 120 .owner = THIS_MODULE,
diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c
new file mode 100644
index 000000000000..38c6bcb07e6c
--- /dev/null
+++ b/drivers/leds/leds-wm8350.c
@@ -0,0 +1,311 @@
1/*
2 * LED driver for WM8350 driven LEDS.
3 *
4 * Copyright(C) 2007, 2008 Wolfson Microelectronics PLC.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/platform_device.h>
15#include <linux/leds.h>
16#include <linux/err.h>
17#include <linux/mfd/wm8350/pmic.h>
18#include <linux/regulator/consumer.h>
19
20/* Microamps */
21static const int isink_cur[] = {
22 4,
23 5,
24 6,
25 7,
26 8,
27 10,
28 11,
29 14,
30 16,
31 19,
32 23,
33 27,
34 32,
35 39,
36 46,
37 54,
38 65,
39 77,
40 92,
41 109,
42 130,
43 154,
44 183,
45 218,
46 259,
47 308,
48 367,
49 436,
50 518,
51 616,
52 733,
53 872,
54 1037,
55 1233,
56 1466,
57 1744,
58 2073,
59 2466,
60 2933,
61 3487,
62 4147,
63 4932,
64 5865,
65 6975,
66 8294,
67 9864,
68 11730,
69 13949,
70 16589,
71 19728,
72 23460,
73 27899,
74 33178,
75 39455,
76 46920,
77 55798,
78 66355,
79 78910,
80 93840,
81 111596,
82 132710,
83 157820,
84 187681,
85 223191
86};
87
88#define to_wm8350_led(led_cdev) \
89 container_of(led_cdev, struct wm8350_led, cdev)
90
91static void wm8350_led_enable(struct wm8350_led *led)
92{
93 int ret;
94
95 if (led->enabled)
96 return;
97
98 ret = regulator_enable(led->isink);
99 if (ret != 0) {
100 dev_err(led->cdev.dev, "Failed to enable ISINK: %d\n", ret);
101 return;
102 }
103
104 ret = regulator_enable(led->dcdc);
105 if (ret != 0) {
106 dev_err(led->cdev.dev, "Failed to enable DCDC: %d\n", ret);
107 regulator_disable(led->isink);
108 return;
109 }
110
111 led->enabled = 1;
112}
113
114static void wm8350_led_disable(struct wm8350_led *led)
115{
116 int ret;
117
118 if (!led->enabled)
119 return;
120
121 ret = regulator_disable(led->dcdc);
122 if (ret != 0) {
123 dev_err(led->cdev.dev, "Failed to disable DCDC: %d\n", ret);
124 return;
125 }
126
127 ret = regulator_disable(led->isink);
128 if (ret != 0) {
129 dev_err(led->cdev.dev, "Failed to disable ISINK: %d\n", ret);
130 regulator_enable(led->dcdc);
131 return;
132 }
133
134 led->enabled = 0;
135}
136
137static void led_work(struct work_struct *work)
138{
139 struct wm8350_led *led = container_of(work, struct wm8350_led, work);
140 int ret;
141 int uA;
142 unsigned long flags;
143
144 mutex_lock(&led->mutex);
145
146 spin_lock_irqsave(&led->value_lock, flags);
147
148 if (led->value == LED_OFF) {
149 spin_unlock_irqrestore(&led->value_lock, flags);
150 wm8350_led_disable(led);
151 goto out;
152 }
153
154 /* This scales linearly into the index of valid current
155 * settings which results in a linear scaling of perceived
156 * brightness due to the non-linear current settings provided
157 * by the hardware.
158 */
159 uA = (led->max_uA_index * led->value) / LED_FULL;
160 spin_unlock_irqrestore(&led->value_lock, flags);
161 BUG_ON(uA >= ARRAY_SIZE(isink_cur));
162
163 ret = regulator_set_current_limit(led->isink, isink_cur[uA],
164 isink_cur[uA]);
165 if (ret != 0)
166 dev_err(led->cdev.dev, "Failed to set %duA: %d\n",
167 isink_cur[uA], ret);
168
169 wm8350_led_enable(led);
170
171out:
172 mutex_unlock(&led->mutex);
173}
174
175static void wm8350_led_set(struct led_classdev *led_cdev,
176 enum led_brightness value)
177{
178 struct wm8350_led *led = to_wm8350_led(led_cdev);
179 unsigned long flags;
180
181 spin_lock_irqsave(&led->value_lock, flags);
182 led->value = value;
183 schedule_work(&led->work);
184 spin_unlock_irqrestore(&led->value_lock, flags);
185}
186
187static void wm8350_led_shutdown(struct platform_device *pdev)
188{
189 struct wm8350_led *led = platform_get_drvdata(pdev);
190
191 mutex_lock(&led->mutex);
192 led->value = LED_OFF;
193 wm8350_led_disable(led);
194 mutex_unlock(&led->mutex);
195}
196
197static int wm8350_led_probe(struct platform_device *pdev)
198{
199 struct regulator *isink, *dcdc;
200 struct wm8350_led *led;
201 struct wm8350_led_platform_data *pdata = pdev->dev.platform_data;
202 int ret, i;
203
204 if (pdata == NULL) {
205 dev_err(&pdev->dev, "no platform data\n");
206 return -ENODEV;
207 }
208
209 if (pdata->max_uA < isink_cur[0]) {
210 dev_err(&pdev->dev, "Invalid maximum current %duA\n",
211 pdata->max_uA);
212 return -EINVAL;
213 }
214
215 isink = regulator_get(&pdev->dev, "led_isink");
216 if (IS_ERR(isink)) {
217 printk(KERN_ERR "%s: cant get ISINK\n", __func__);
218 return PTR_ERR(isink);
219 }
220
221 dcdc = regulator_get(&pdev->dev, "led_vcc");
222 if (IS_ERR(dcdc)) {
223 printk(KERN_ERR "%s: cant get DCDC\n", __func__);
224 ret = PTR_ERR(dcdc);
225 goto err_isink;
226 }
227
228 led = kzalloc(sizeof(*led), GFP_KERNEL);
229 if (led == NULL) {
230 ret = -ENOMEM;
231 goto err_dcdc;
232 }
233
234 led->cdev.brightness_set = wm8350_led_set;
235 led->cdev.default_trigger = pdata->default_trigger;
236 led->cdev.name = pdata->name;
237 led->cdev.flags |= LED_CORE_SUSPENDRESUME;
238 led->enabled = regulator_is_enabled(isink);
239 led->isink = isink;
240 led->dcdc = dcdc;
241
242 for (i = 0; i < ARRAY_SIZE(isink_cur) - 1; i++)
243 if (isink_cur[i] >= pdata->max_uA)
244 break;
245 led->max_uA_index = i;
246 if (pdata->max_uA != isink_cur[i])
247 dev_warn(&pdev->dev,
248 "Maximum current %duA is not directly supported,"
249 " check platform data\n",
250 pdata->max_uA);
251
252 spin_lock_init(&led->value_lock);
253 mutex_init(&led->mutex);
254 INIT_WORK(&led->work, led_work);
255 led->value = LED_OFF;
256 platform_set_drvdata(pdev, led);
257
258 ret = led_classdev_register(&pdev->dev, &led->cdev);
259 if (ret < 0)
260 goto err_led;
261
262 return 0;
263
264 err_led:
265 kfree(led);
266 err_dcdc:
267 regulator_put(dcdc);
268 err_isink:
269 regulator_put(isink);
270 return ret;
271}
272
273static int wm8350_led_remove(struct platform_device *pdev)
274{
275 struct wm8350_led *led = platform_get_drvdata(pdev);
276
277 led_classdev_unregister(&led->cdev);
278 flush_scheduled_work();
279 wm8350_led_disable(led);
280 regulator_put(led->dcdc);
281 regulator_put(led->isink);
282 kfree(led);
283 return 0;
284}
285
286static struct platform_driver wm8350_led_driver = {
287 .driver = {
288 .name = "wm8350-led",
289 .owner = THIS_MODULE,
290 },
291 .probe = wm8350_led_probe,
292 .remove = wm8350_led_remove,
293 .shutdown = wm8350_led_shutdown,
294};
295
296static int __devinit wm8350_led_init(void)
297{
298 return platform_driver_register(&wm8350_led_driver);
299}
300module_init(wm8350_led_init);
301
302static void wm8350_led_exit(void)
303{
304 platform_driver_unregister(&wm8350_led_driver);
305}
306module_exit(wm8350_led_exit);
307
308MODULE_AUTHOR("Mark Brown");
309MODULE_DESCRIPTION("WM8350 LED driver");
310MODULE_LICENSE("GPL");
311MODULE_ALIAS("platform:wm8350-led");
diff --git a/drivers/leds/leds-wrap.c b/drivers/leds/leds-wrap.c
index 2f3aa87f2a1f..2982c86ac4cf 100644
--- a/drivers/leds/leds-wrap.c
+++ b/drivers/leds/leds-wrap.c
@@ -56,40 +56,21 @@ static struct led_classdev wrap_power_led = {
56 .name = "wrap::power", 56 .name = "wrap::power",
57 .brightness_set = wrap_power_led_set, 57 .brightness_set = wrap_power_led_set,
58 .default_trigger = "default-on", 58 .default_trigger = "default-on",
59 .flags = LED_CORE_SUSPENDRESUME,
59}; 60};
60 61
61static struct led_classdev wrap_error_led = { 62static struct led_classdev wrap_error_led = {
62 .name = "wrap::error", 63 .name = "wrap::error",
63 .brightness_set = wrap_error_led_set, 64 .brightness_set = wrap_error_led_set,
65 .flags = LED_CORE_SUSPENDRESUME,
64}; 66};
65 67
66static struct led_classdev wrap_extra_led = { 68static struct led_classdev wrap_extra_led = {
67 .name = "wrap::extra", 69 .name = "wrap::extra",
68 .brightness_set = wrap_extra_led_set, 70 .brightness_set = wrap_extra_led_set,
71 .flags = LED_CORE_SUSPENDRESUME,
69}; 72};
70 73
71#ifdef CONFIG_PM
72static int wrap_led_suspend(struct platform_device *dev,
73 pm_message_t state)
74{
75 led_classdev_suspend(&wrap_power_led);
76 led_classdev_suspend(&wrap_error_led);
77 led_classdev_suspend(&wrap_extra_led);
78 return 0;
79}
80
81static int wrap_led_resume(struct platform_device *dev)
82{
83 led_classdev_resume(&wrap_power_led);
84 led_classdev_resume(&wrap_error_led);
85 led_classdev_resume(&wrap_extra_led);
86 return 0;
87}
88#else
89#define wrap_led_suspend NULL
90#define wrap_led_resume NULL
91#endif
92
93static int wrap_led_probe(struct platform_device *pdev) 74static int wrap_led_probe(struct platform_device *pdev)
94{ 75{
95 int ret; 76 int ret;
@@ -127,8 +108,6 @@ static int wrap_led_remove(struct platform_device *pdev)
127static struct platform_driver wrap_led_driver = { 108static struct platform_driver wrap_led_driver = {
128 .probe = wrap_led_probe, 109 .probe = wrap_led_probe,
129 .remove = wrap_led_remove, 110 .remove = wrap_led_remove,
130 .suspend = wrap_led_suspend,
131 .resume = wrap_led_resume,
132 .driver = { 111 .driver = {
133 .name = DRVNAME, 112 .name = DRVNAME,
134 .owner = THIS_MODULE, 113 .owner = THIS_MODULE,
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index db681962d7bb..3d6531396dda 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -199,6 +199,7 @@ err_out:
199static void timer_trig_deactivate(struct led_classdev *led_cdev) 199static void timer_trig_deactivate(struct led_classdev *led_cdev)
200{ 200{
201 struct timer_trig_data *timer_data = led_cdev->trigger_data; 201 struct timer_trig_data *timer_data = led_cdev->trigger_data;
202 unsigned long on = 0, off = 0;
202 203
203 if (timer_data) { 204 if (timer_data) {
204 device_remove_file(led_cdev->dev, &dev_attr_delay_on); 205 device_remove_file(led_cdev->dev, &dev_attr_delay_on);
@@ -206,6 +207,10 @@ static void timer_trig_deactivate(struct led_classdev *led_cdev)
206 del_timer_sync(&timer_data->timer); 207 del_timer_sync(&timer_data->timer);
207 kfree(timer_data); 208 kfree(timer_data);
208 } 209 }
210
211 /* If there is hardware support for blinking, stop it */
212 if (led_cdev->blink_set)
213 led_cdev->blink_set(led_cdev, &on, &off);
209} 214}
210 215
211static struct led_trigger timer_led_trigger = { 216static struct led_trigger timer_led_trigger = {
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index ab7c8e4a61f9..719943763391 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -215,7 +215,6 @@ static struct page *read_sb_page(mddev_t *mddev, long offset,
215 /* choose a good rdev and read the page from there */ 215 /* choose a good rdev and read the page from there */
216 216
217 mdk_rdev_t *rdev; 217 mdk_rdev_t *rdev;
218 struct list_head *tmp;
219 sector_t target; 218 sector_t target;
220 219
221 if (!page) 220 if (!page)
@@ -223,7 +222,7 @@ static struct page *read_sb_page(mddev_t *mddev, long offset,
223 if (!page) 222 if (!page)
224 return ERR_PTR(-ENOMEM); 223 return ERR_PTR(-ENOMEM);
225 224
226 rdev_for_each(rdev, tmp, mddev) { 225 list_for_each_entry(rdev, &mddev->disks, same_set) {
227 if (! test_bit(In_sync, &rdev->flags) 226 if (! test_bit(In_sync, &rdev->flags)
228 || test_bit(Faulty, &rdev->flags)) 227 || test_bit(Faulty, &rdev->flags))
229 continue; 228 continue;
@@ -964,9 +963,11 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
964 */ 963 */
965 page = bitmap->sb_page; 964 page = bitmap->sb_page;
966 offset = sizeof(bitmap_super_t); 965 offset = sizeof(bitmap_super_t);
967 read_sb_page(bitmap->mddev, bitmap->offset, 966 if (!file)
968 page, 967 read_sb_page(bitmap->mddev,
969 index, count); 968 bitmap->offset,
969 page,
970 index, count);
970 } else if (file) { 971 } else if (file) {
971 page = read_page(file, index, bitmap, count); 972 page = read_page(file, index, bitmap, count);
972 offset = 0; 973 offset = 0;
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index f26c1f9a475b..86d9adf90e79 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -283,7 +283,6 @@ static int reconfig(mddev_t *mddev, int layout, int chunk_size)
283static int run(mddev_t *mddev) 283static int run(mddev_t *mddev)
284{ 284{
285 mdk_rdev_t *rdev; 285 mdk_rdev_t *rdev;
286 struct list_head *tmp;
287 int i; 286 int i;
288 287
289 conf_t *conf = kmalloc(sizeof(*conf), GFP_KERNEL); 288 conf_t *conf = kmalloc(sizeof(*conf), GFP_KERNEL);
@@ -296,7 +295,7 @@ static int run(mddev_t *mddev)
296 } 295 }
297 conf->nfaults = 0; 296 conf->nfaults = 0;
298 297
299 rdev_for_each(rdev, tmp, mddev) 298 list_for_each_entry(rdev, &mddev->disks, same_set)
300 conf->rdev = rdev; 299 conf->rdev = rdev;
301 300
302 mddev->array_sectors = mddev->size * 2; 301 mddev->array_sectors = mddev->size * 2;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 3b90c5c924ec..1e3aea9eecf1 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -105,7 +105,6 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
105 int i, nb_zone, cnt; 105 int i, nb_zone, cnt;
106 sector_t min_sectors; 106 sector_t min_sectors;
107 sector_t curr_sector; 107 sector_t curr_sector;
108 struct list_head *tmp;
109 108
110 conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t), 109 conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t),
111 GFP_KERNEL); 110 GFP_KERNEL);
@@ -115,7 +114,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
115 cnt = 0; 114 cnt = 0;
116 conf->array_sectors = 0; 115 conf->array_sectors = 0;
117 116
118 rdev_for_each(rdev, tmp, mddev) { 117 list_for_each_entry(rdev, &mddev->disks, same_set) {
119 int j = rdev->raid_disk; 118 int j = rdev->raid_disk;
120 dev_info_t *disk = conf->disks + j; 119 dev_info_t *disk = conf->disks + j;
121 120
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 1b1d32694f6f..41e2509bf896 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -214,20 +214,33 @@ static inline mddev_t *mddev_get(mddev_t *mddev)
214 return mddev; 214 return mddev;
215} 215}
216 216
217static void mddev_delayed_delete(struct work_struct *ws)
218{
219 mddev_t *mddev = container_of(ws, mddev_t, del_work);
220 kobject_del(&mddev->kobj);
221 kobject_put(&mddev->kobj);
222}
223
217static void mddev_put(mddev_t *mddev) 224static void mddev_put(mddev_t *mddev)
218{ 225{
219 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 226 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
220 return; 227 return;
221 if (!mddev->raid_disks && list_empty(&mddev->disks)) { 228 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
229 !mddev->hold_active) {
222 list_del(&mddev->all_mddevs); 230 list_del(&mddev->all_mddevs);
223 spin_unlock(&all_mddevs_lock); 231 if (mddev->gendisk) {
224 blk_cleanup_queue(mddev->queue); 232 /* we did a probe so need to clean up.
225 if (mddev->sysfs_state) 233 * Call schedule_work inside the spinlock
226 sysfs_put(mddev->sysfs_state); 234 * so that flush_scheduled_work() after
227 mddev->sysfs_state = NULL; 235 * mddev_find will succeed in waiting for the
228 kobject_put(&mddev->kobj); 236 * work to be done.
229 } else 237 */
230 spin_unlock(&all_mddevs_lock); 238 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
239 schedule_work(&mddev->del_work);
240 } else
241 kfree(mddev);
242 }
243 spin_unlock(&all_mddevs_lock);
231} 244}
232 245
233static mddev_t * mddev_find(dev_t unit) 246static mddev_t * mddev_find(dev_t unit)
@@ -236,15 +249,50 @@ static mddev_t * mddev_find(dev_t unit)
236 249
237 retry: 250 retry:
238 spin_lock(&all_mddevs_lock); 251 spin_lock(&all_mddevs_lock);
239 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 252
240 if (mddev->unit == unit) { 253 if (unit) {
241 mddev_get(mddev); 254 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
255 if (mddev->unit == unit) {
256 mddev_get(mddev);
257 spin_unlock(&all_mddevs_lock);
258 kfree(new);
259 return mddev;
260 }
261
262 if (new) {
263 list_add(&new->all_mddevs, &all_mddevs);
242 spin_unlock(&all_mddevs_lock); 264 spin_unlock(&all_mddevs_lock);
243 kfree(new); 265 new->hold_active = UNTIL_IOCTL;
244 return mddev; 266 return new;
245 } 267 }
246 268 } else if (new) {
247 if (new) { 269 /* find an unused unit number */
270 static int next_minor = 512;
271 int start = next_minor;
272 int is_free = 0;
273 int dev = 0;
274 while (!is_free) {
275 dev = MKDEV(MD_MAJOR, next_minor);
276 next_minor++;
277 if (next_minor > MINORMASK)
278 next_minor = 0;
279 if (next_minor == start) {
280 /* Oh dear, all in use. */
281 spin_unlock(&all_mddevs_lock);
282 kfree(new);
283 return NULL;
284 }
285
286 is_free = 1;
287 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
288 if (mddev->unit == dev) {
289 is_free = 0;
290 break;
291 }
292 }
293 new->unit = dev;
294 new->md_minor = MINOR(dev);
295 new->hold_active = UNTIL_STOP;
248 list_add(&new->all_mddevs, &all_mddevs); 296 list_add(&new->all_mddevs, &all_mddevs);
249 spin_unlock(&all_mddevs_lock); 297 spin_unlock(&all_mddevs_lock);
250 return new; 298 return new;
@@ -275,16 +323,6 @@ static mddev_t * mddev_find(dev_t unit)
275 new->resync_max = MaxSector; 323 new->resync_max = MaxSector;
276 new->level = LEVEL_NONE; 324 new->level = LEVEL_NONE;
277 325
278 new->queue = blk_alloc_queue(GFP_KERNEL);
279 if (!new->queue) {
280 kfree(new);
281 return NULL;
282 }
283 /* Can be unlocked because the queue is new: no concurrency */
284 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, new->queue);
285
286 blk_queue_make_request(new->queue, md_fail_request);
287
288 goto retry; 326 goto retry;
289} 327}
290 328
@@ -307,25 +345,23 @@ static inline void mddev_unlock(mddev_t * mddev)
307 345
308static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) 346static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
309{ 347{
310 mdk_rdev_t * rdev; 348 mdk_rdev_t *rdev;
311 struct list_head *tmp;
312 349
313 rdev_for_each(rdev, tmp, mddev) { 350 list_for_each_entry(rdev, &mddev->disks, same_set)
314 if (rdev->desc_nr == nr) 351 if (rdev->desc_nr == nr)
315 return rdev; 352 return rdev;
316 } 353
317 return NULL; 354 return NULL;
318} 355}
319 356
320static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) 357static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
321{ 358{
322 struct list_head *tmp;
323 mdk_rdev_t *rdev; 359 mdk_rdev_t *rdev;
324 360
325 rdev_for_each(rdev, tmp, mddev) { 361 list_for_each_entry(rdev, &mddev->disks, same_set)
326 if (rdev->bdev->bd_dev == dev) 362 if (rdev->bdev->bd_dev == dev)
327 return rdev; 363 return rdev;
328 } 364
329 return NULL; 365 return NULL;
330} 366}
331 367
@@ -861,7 +897,6 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
861static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) 897static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
862{ 898{
863 mdp_super_t *sb; 899 mdp_super_t *sb;
864 struct list_head *tmp;
865 mdk_rdev_t *rdev2; 900 mdk_rdev_t *rdev2;
866 int next_spare = mddev->raid_disks; 901 int next_spare = mddev->raid_disks;
867 902
@@ -933,7 +968,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
933 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 968 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
934 969
935 sb->disks[0].state = (1<<MD_DISK_REMOVED); 970 sb->disks[0].state = (1<<MD_DISK_REMOVED);
936 rdev_for_each(rdev2, tmp, mddev) { 971 list_for_each_entry(rdev2, &mddev->disks, same_set) {
937 mdp_disk_t *d; 972 mdp_disk_t *d;
938 int desc_nr; 973 int desc_nr;
939 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 974 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
@@ -1259,7 +1294,6 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1259static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) 1294static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1260{ 1295{
1261 struct mdp_superblock_1 *sb; 1296 struct mdp_superblock_1 *sb;
1262 struct list_head *tmp;
1263 mdk_rdev_t *rdev2; 1297 mdk_rdev_t *rdev2;
1264 int max_dev, i; 1298 int max_dev, i;
1265 /* make rdev->sb match mddev and rdev data. */ 1299 /* make rdev->sb match mddev and rdev data. */
@@ -1307,7 +1341,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1307 } 1341 }
1308 1342
1309 max_dev = 0; 1343 max_dev = 0;
1310 rdev_for_each(rdev2, tmp, mddev) 1344 list_for_each_entry(rdev2, &mddev->disks, same_set)
1311 if (rdev2->desc_nr+1 > max_dev) 1345 if (rdev2->desc_nr+1 > max_dev)
1312 max_dev = rdev2->desc_nr+1; 1346 max_dev = rdev2->desc_nr+1;
1313 1347
@@ -1316,7 +1350,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1316 for (i=0; i<max_dev;i++) 1350 for (i=0; i<max_dev;i++)
1317 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1351 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1318 1352
1319 rdev_for_each(rdev2, tmp, mddev) { 1353 list_for_each_entry(rdev2, &mddev->disks, same_set) {
1320 i = rdev2->desc_nr; 1354 i = rdev2->desc_nr;
1321 if (test_bit(Faulty, &rdev2->flags)) 1355 if (test_bit(Faulty, &rdev2->flags))
1322 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1356 sb->dev_roles[i] = cpu_to_le16(0xfffe);
@@ -1466,6 +1500,9 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1466 1500
1467 list_add_rcu(&rdev->same_set, &mddev->disks); 1501 list_add_rcu(&rdev->same_set, &mddev->disks);
1468 bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk); 1502 bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
1503
1504 /* May as well allow recovery to be retried once */
1505 mddev->recovery_disabled = 0;
1469 return 0; 1506 return 0;
1470 1507
1471 fail: 1508 fail:
@@ -1571,8 +1608,7 @@ static void kick_rdev_from_array(mdk_rdev_t * rdev)
1571 1608
1572static void export_array(mddev_t *mddev) 1609static void export_array(mddev_t *mddev)
1573{ 1610{
1574 struct list_head *tmp; 1611 mdk_rdev_t *rdev, *tmp;
1575 mdk_rdev_t *rdev;
1576 1612
1577 rdev_for_each(rdev, tmp, mddev) { 1613 rdev_for_each(rdev, tmp, mddev) {
1578 if (!rdev->mddev) { 1614 if (!rdev->mddev) {
@@ -1593,7 +1629,7 @@ static void print_desc(mdp_disk_t *desc)
1593 desc->major,desc->minor,desc->raid_disk,desc->state); 1629 desc->major,desc->minor,desc->raid_disk,desc->state);
1594} 1630}
1595 1631
1596static void print_sb(mdp_super_t *sb) 1632static void print_sb_90(mdp_super_t *sb)
1597{ 1633{
1598 int i; 1634 int i;
1599 1635
@@ -1624,10 +1660,57 @@ static void print_sb(mdp_super_t *sb)
1624 } 1660 }
1625 printk(KERN_INFO "md: THIS: "); 1661 printk(KERN_INFO "md: THIS: ");
1626 print_desc(&sb->this_disk); 1662 print_desc(&sb->this_disk);
1627
1628} 1663}
1629 1664
1630static void print_rdev(mdk_rdev_t *rdev) 1665static void print_sb_1(struct mdp_superblock_1 *sb)
1666{
1667 __u8 *uuid;
1668
1669 uuid = sb->set_uuid;
1670 printk(KERN_INFO "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
1671 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
1672 KERN_INFO "md: Name: \"%s\" CT:%llu\n",
1673 le32_to_cpu(sb->major_version),
1674 le32_to_cpu(sb->feature_map),
1675 uuid[0], uuid[1], uuid[2], uuid[3],
1676 uuid[4], uuid[5], uuid[6], uuid[7],
1677 uuid[8], uuid[9], uuid[10], uuid[11],
1678 uuid[12], uuid[13], uuid[14], uuid[15],
1679 sb->set_name,
1680 (unsigned long long)le64_to_cpu(sb->ctime)
1681 & MD_SUPERBLOCK_1_TIME_SEC_MASK);
1682
1683 uuid = sb->device_uuid;
1684 printk(KERN_INFO "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
1685 " RO:%llu\n"
1686 KERN_INFO "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
1687 ":%02x%02x%02x%02x%02x%02x\n"
1688 KERN_INFO "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
1689 KERN_INFO "md: (MaxDev:%u) \n",
1690 le32_to_cpu(sb->level),
1691 (unsigned long long)le64_to_cpu(sb->size),
1692 le32_to_cpu(sb->raid_disks),
1693 le32_to_cpu(sb->layout),
1694 le32_to_cpu(sb->chunksize),
1695 (unsigned long long)le64_to_cpu(sb->data_offset),
1696 (unsigned long long)le64_to_cpu(sb->data_size),
1697 (unsigned long long)le64_to_cpu(sb->super_offset),
1698 (unsigned long long)le64_to_cpu(sb->recovery_offset),
1699 le32_to_cpu(sb->dev_number),
1700 uuid[0], uuid[1], uuid[2], uuid[3],
1701 uuid[4], uuid[5], uuid[6], uuid[7],
1702 uuid[8], uuid[9], uuid[10], uuid[11],
1703 uuid[12], uuid[13], uuid[14], uuid[15],
1704 sb->devflags,
1705 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
1706 (unsigned long long)le64_to_cpu(sb->events),
1707 (unsigned long long)le64_to_cpu(sb->resync_offset),
1708 le32_to_cpu(sb->sb_csum),
1709 le32_to_cpu(sb->max_dev)
1710 );
1711}
1712
1713static void print_rdev(mdk_rdev_t *rdev, int major_version)
1631{ 1714{
1632 char b[BDEVNAME_SIZE]; 1715 char b[BDEVNAME_SIZE];
1633 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n", 1716 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
@@ -1635,15 +1718,22 @@ static void print_rdev(mdk_rdev_t *rdev)
1635 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), 1718 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1636 rdev->desc_nr); 1719 rdev->desc_nr);
1637 if (rdev->sb_loaded) { 1720 if (rdev->sb_loaded) {
1638 printk(KERN_INFO "md: rdev superblock:\n"); 1721 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
1639 print_sb((mdp_super_t*)page_address(rdev->sb_page)); 1722 switch (major_version) {
1723 case 0:
1724 print_sb_90((mdp_super_t*)page_address(rdev->sb_page));
1725 break;
1726 case 1:
1727 print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page));
1728 break;
1729 }
1640 } else 1730 } else
1641 printk(KERN_INFO "md: no rdev superblock!\n"); 1731 printk(KERN_INFO "md: no rdev superblock!\n");
1642} 1732}
1643 1733
1644static void md_print_devices(void) 1734static void md_print_devices(void)
1645{ 1735{
1646 struct list_head *tmp, *tmp2; 1736 struct list_head *tmp;
1647 mdk_rdev_t *rdev; 1737 mdk_rdev_t *rdev;
1648 mddev_t *mddev; 1738 mddev_t *mddev;
1649 char b[BDEVNAME_SIZE]; 1739 char b[BDEVNAME_SIZE];
@@ -1658,12 +1748,12 @@ static void md_print_devices(void)
1658 bitmap_print_sb(mddev->bitmap); 1748 bitmap_print_sb(mddev->bitmap);
1659 else 1749 else
1660 printk("%s: ", mdname(mddev)); 1750 printk("%s: ", mdname(mddev));
1661 rdev_for_each(rdev, tmp2, mddev) 1751 list_for_each_entry(rdev, &mddev->disks, same_set)
1662 printk("<%s>", bdevname(rdev->bdev,b)); 1752 printk("<%s>", bdevname(rdev->bdev,b));
1663 printk("\n"); 1753 printk("\n");
1664 1754
1665 rdev_for_each(rdev, tmp2, mddev) 1755 list_for_each_entry(rdev, &mddev->disks, same_set)
1666 print_rdev(rdev); 1756 print_rdev(rdev, mddev->major_version);
1667 } 1757 }
1668 printk("md: **********************************\n"); 1758 printk("md: **********************************\n");
1669 printk("\n"); 1759 printk("\n");
@@ -1679,9 +1769,8 @@ static void sync_sbs(mddev_t * mddev, int nospares)
1679 * with the rest of the array) 1769 * with the rest of the array)
1680 */ 1770 */
1681 mdk_rdev_t *rdev; 1771 mdk_rdev_t *rdev;
1682 struct list_head *tmp;
1683 1772
1684 rdev_for_each(rdev, tmp, mddev) { 1773 list_for_each_entry(rdev, &mddev->disks, same_set) {
1685 if (rdev->sb_events == mddev->events || 1774 if (rdev->sb_events == mddev->events ||
1686 (nospares && 1775 (nospares &&
1687 rdev->raid_disk < 0 && 1776 rdev->raid_disk < 0 &&
@@ -1699,7 +1788,6 @@ static void sync_sbs(mddev_t * mddev, int nospares)
1699 1788
1700static void md_update_sb(mddev_t * mddev, int force_change) 1789static void md_update_sb(mddev_t * mddev, int force_change)
1701{ 1790{
1702 struct list_head *tmp;
1703 mdk_rdev_t *rdev; 1791 mdk_rdev_t *rdev;
1704 int sync_req; 1792 int sync_req;
1705 int nospares = 0; 1793 int nospares = 0;
@@ -1790,7 +1878,7 @@ repeat:
1790 mdname(mddev),mddev->in_sync); 1878 mdname(mddev),mddev->in_sync);
1791 1879
1792 bitmap_update_sb(mddev->bitmap); 1880 bitmap_update_sb(mddev->bitmap);
1793 rdev_for_each(rdev, tmp, mddev) { 1881 list_for_each_entry(rdev, &mddev->disks, same_set) {
1794 char b[BDEVNAME_SIZE]; 1882 char b[BDEVNAME_SIZE];
1795 dprintk(KERN_INFO "md: "); 1883 dprintk(KERN_INFO "md: ");
1796 if (rdev->sb_loaded != 1) 1884 if (rdev->sb_loaded != 1)
@@ -1999,7 +2087,6 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1999 md_wakeup_thread(rdev->mddev->thread); 2087 md_wakeup_thread(rdev->mddev->thread);
2000 } else if (rdev->mddev->pers) { 2088 } else if (rdev->mddev->pers) {
2001 mdk_rdev_t *rdev2; 2089 mdk_rdev_t *rdev2;
2002 struct list_head *tmp;
2003 /* Activating a spare .. or possibly reactivating 2090 /* Activating a spare .. or possibly reactivating
2004 * if we every get bitmaps working here. 2091 * if we every get bitmaps working here.
2005 */ 2092 */
@@ -2010,7 +2097,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2010 if (rdev->mddev->pers->hot_add_disk == NULL) 2097 if (rdev->mddev->pers->hot_add_disk == NULL)
2011 return -EINVAL; 2098 return -EINVAL;
2012 2099
2013 rdev_for_each(rdev2, tmp, rdev->mddev) 2100 list_for_each_entry(rdev2, &rdev->mddev->disks, same_set)
2014 if (rdev2->raid_disk == slot) 2101 if (rdev2->raid_disk == slot)
2015 return -EEXIST; 2102 return -EEXIST;
2016 2103
@@ -2125,14 +2212,14 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2125 */ 2212 */
2126 mddev_t *mddev; 2213 mddev_t *mddev;
2127 int overlap = 0; 2214 int overlap = 0;
2128 struct list_head *tmp, *tmp2; 2215 struct list_head *tmp;
2129 2216
2130 mddev_unlock(my_mddev); 2217 mddev_unlock(my_mddev);
2131 for_each_mddev(mddev, tmp) { 2218 for_each_mddev(mddev, tmp) {
2132 mdk_rdev_t *rdev2; 2219 mdk_rdev_t *rdev2;
2133 2220
2134 mddev_lock(mddev); 2221 mddev_lock(mddev);
2135 rdev_for_each(rdev2, tmp2, mddev) 2222 list_for_each_entry(rdev2, &mddev->disks, same_set)
2136 if (test_bit(AllReserved, &rdev2->flags) || 2223 if (test_bit(AllReserved, &rdev2->flags) ||
2137 (rdev->bdev == rdev2->bdev && 2224 (rdev->bdev == rdev2->bdev &&
2138 rdev != rdev2 && 2225 rdev != rdev2 &&
@@ -2328,8 +2415,7 @@ abort_free:
2328static void analyze_sbs(mddev_t * mddev) 2415static void analyze_sbs(mddev_t * mddev)
2329{ 2416{
2330 int i; 2417 int i;
2331 struct list_head *tmp; 2418 mdk_rdev_t *rdev, *freshest, *tmp;
2332 mdk_rdev_t *rdev, *freshest;
2333 char b[BDEVNAME_SIZE]; 2419 char b[BDEVNAME_SIZE];
2334 2420
2335 freshest = NULL; 2421 freshest = NULL;
@@ -3046,7 +3132,7 @@ action_store(mddev_t *mddev, const char *page, size_t len)
3046 } 3132 }
3047 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3133 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3048 md_wakeup_thread(mddev->thread); 3134 md_wakeup_thread(mddev->thread);
3049 sysfs_notify(&mddev->kobj, NULL, "sync_action"); 3135 sysfs_notify_dirent(mddev->sysfs_action);
3050 return len; 3136 return len;
3051} 3137}
3052 3138
@@ -3404,6 +3490,8 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
3404 if (!capable(CAP_SYS_ADMIN)) 3490 if (!capable(CAP_SYS_ADMIN))
3405 return -EACCES; 3491 return -EACCES;
3406 rv = mddev_lock(mddev); 3492 rv = mddev_lock(mddev);
3493 if (mddev->hold_active == UNTIL_IOCTL)
3494 mddev->hold_active = 0;
3407 if (!rv) { 3495 if (!rv) {
3408 rv = entry->store(mddev, page, length); 3496 rv = entry->store(mddev, page, length);
3409 mddev_unlock(mddev); 3497 mddev_unlock(mddev);
@@ -3414,6 +3502,17 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
3414static void md_free(struct kobject *ko) 3502static void md_free(struct kobject *ko)
3415{ 3503{
3416 mddev_t *mddev = container_of(ko, mddev_t, kobj); 3504 mddev_t *mddev = container_of(ko, mddev_t, kobj);
3505
3506 if (mddev->sysfs_state)
3507 sysfs_put(mddev->sysfs_state);
3508
3509 if (mddev->gendisk) {
3510 del_gendisk(mddev->gendisk);
3511 put_disk(mddev->gendisk);
3512 }
3513 if (mddev->queue)
3514 blk_cleanup_queue(mddev->queue);
3515
3417 kfree(mddev); 3516 kfree(mddev);
3418} 3517}
3419 3518
@@ -3429,34 +3528,74 @@ static struct kobj_type md_ktype = {
3429 3528
3430int mdp_major = 0; 3529int mdp_major = 0;
3431 3530
3432static struct kobject *md_probe(dev_t dev, int *part, void *data) 3531static int md_alloc(dev_t dev, char *name)
3433{ 3532{
3434 static DEFINE_MUTEX(disks_mutex); 3533 static DEFINE_MUTEX(disks_mutex);
3435 mddev_t *mddev = mddev_find(dev); 3534 mddev_t *mddev = mddev_find(dev);
3436 struct gendisk *disk; 3535 struct gendisk *disk;
3437 int partitioned = (MAJOR(dev) != MD_MAJOR); 3536 int partitioned;
3438 int shift = partitioned ? MdpMinorShift : 0; 3537 int shift;
3439 int unit = MINOR(dev) >> shift; 3538 int unit;
3440 int error; 3539 int error;
3441 3540
3442 if (!mddev) 3541 if (!mddev)
3443 return NULL; 3542 return -ENODEV;
3543
3544 partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
3545 shift = partitioned ? MdpMinorShift : 0;
3546 unit = MINOR(mddev->unit) >> shift;
3547
3548 /* wait for any previous instance if this device
3549 * to be completed removed (mddev_delayed_delete).
3550 */
3551 flush_scheduled_work();
3444 3552
3445 mutex_lock(&disks_mutex); 3553 mutex_lock(&disks_mutex);
3446 if (mddev->gendisk) { 3554 if (mddev->gendisk) {
3447 mutex_unlock(&disks_mutex); 3555 mutex_unlock(&disks_mutex);
3448 mddev_put(mddev); 3556 mddev_put(mddev);
3449 return NULL; 3557 return -EEXIST;
3558 }
3559
3560 if (name) {
3561 /* Need to ensure that 'name' is not a duplicate.
3562 */
3563 mddev_t *mddev2;
3564 spin_lock(&all_mddevs_lock);
3565
3566 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
3567 if (mddev2->gendisk &&
3568 strcmp(mddev2->gendisk->disk_name, name) == 0) {
3569 spin_unlock(&all_mddevs_lock);
3570 return -EEXIST;
3571 }
3572 spin_unlock(&all_mddevs_lock);
3573 }
3574
3575 mddev->queue = blk_alloc_queue(GFP_KERNEL);
3576 if (!mddev->queue) {
3577 mutex_unlock(&disks_mutex);
3578 mddev_put(mddev);
3579 return -ENOMEM;
3450 } 3580 }
3581 /* Can be unlocked because the queue is new: no concurrency */
3582 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
3583
3584 blk_queue_make_request(mddev->queue, md_fail_request);
3585
3451 disk = alloc_disk(1 << shift); 3586 disk = alloc_disk(1 << shift);
3452 if (!disk) { 3587 if (!disk) {
3453 mutex_unlock(&disks_mutex); 3588 mutex_unlock(&disks_mutex);
3589 blk_cleanup_queue(mddev->queue);
3590 mddev->queue = NULL;
3454 mddev_put(mddev); 3591 mddev_put(mddev);
3455 return NULL; 3592 return -ENOMEM;
3456 } 3593 }
3457 disk->major = MAJOR(dev); 3594 disk->major = MAJOR(mddev->unit);
3458 disk->first_minor = unit << shift; 3595 disk->first_minor = unit << shift;
3459 if (partitioned) 3596 if (name)
3597 strcpy(disk->disk_name, name);
3598 else if (partitioned)
3460 sprintf(disk->disk_name, "md_d%d", unit); 3599 sprintf(disk->disk_name, "md_d%d", unit);
3461 else 3600 else
3462 sprintf(disk->disk_name, "md%d", unit); 3601 sprintf(disk->disk_name, "md%d", unit);
@@ -3464,7 +3603,7 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
3464 disk->private_data = mddev; 3603 disk->private_data = mddev;
3465 disk->queue = mddev->queue; 3604 disk->queue = mddev->queue;
3466 /* Allow extended partitions. This makes the 3605 /* Allow extended partitions. This makes the
3467 * 'mdp' device redundant, but we can really 3606 * 'mdp' device redundant, but we can't really
3468 * remove it now. 3607 * remove it now.
3469 */ 3608 */
3470 disk->flags |= GENHD_FL_EXT_DEVT; 3609 disk->flags |= GENHD_FL_EXT_DEVT;
@@ -3480,9 +3619,35 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
3480 kobject_uevent(&mddev->kobj, KOBJ_ADD); 3619 kobject_uevent(&mddev->kobj, KOBJ_ADD);
3481 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state"); 3620 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
3482 } 3621 }
3622 mddev_put(mddev);
3623 return 0;
3624}
3625
3626static struct kobject *md_probe(dev_t dev, int *part, void *data)
3627{
3628 md_alloc(dev, NULL);
3483 return NULL; 3629 return NULL;
3484} 3630}
3485 3631
3632static int add_named_array(const char *val, struct kernel_param *kp)
3633{
3634 /* val must be "md_*" where * is not all digits.
3635 * We allocate an array with a large free minor number, and
3636 * set the name to val. val must not already be an active name.
3637 */
3638 int len = strlen(val);
3639 char buf[DISK_NAME_LEN];
3640
3641 while (len && val[len-1] == '\n')
3642 len--;
3643 if (len >= DISK_NAME_LEN)
3644 return -E2BIG;
3645 strlcpy(buf, val, len+1);
3646 if (strncmp(buf, "md_", 3) != 0)
3647 return -EINVAL;
3648 return md_alloc(0, buf);
3649}
3650
3486static void md_safemode_timeout(unsigned long data) 3651static void md_safemode_timeout(unsigned long data)
3487{ 3652{
3488 mddev_t *mddev = (mddev_t *) data; 3653 mddev_t *mddev = (mddev_t *) data;
@@ -3501,7 +3666,6 @@ static int do_md_run(mddev_t * mddev)
3501{ 3666{
3502 int err; 3667 int err;
3503 int chunk_size; 3668 int chunk_size;
3504 struct list_head *tmp;
3505 mdk_rdev_t *rdev; 3669 mdk_rdev_t *rdev;
3506 struct gendisk *disk; 3670 struct gendisk *disk;
3507 struct mdk_personality *pers; 3671 struct mdk_personality *pers;
@@ -3540,7 +3704,7 @@ static int do_md_run(mddev_t * mddev)
3540 } 3704 }
3541 3705
3542 /* devices must have minimum size of one chunk */ 3706 /* devices must have minimum size of one chunk */
3543 rdev_for_each(rdev, tmp, mddev) { 3707 list_for_each_entry(rdev, &mddev->disks, same_set) {
3544 if (test_bit(Faulty, &rdev->flags)) 3708 if (test_bit(Faulty, &rdev->flags))
3545 continue; 3709 continue;
3546 if (rdev->size < chunk_size / 1024) { 3710 if (rdev->size < chunk_size / 1024) {
@@ -3565,7 +3729,7 @@ static int do_md_run(mddev_t * mddev)
3565 * the only valid external interface is through the md 3729 * the only valid external interface is through the md
3566 * device. 3730 * device.
3567 */ 3731 */
3568 rdev_for_each(rdev, tmp, mddev) { 3732 list_for_each_entry(rdev, &mddev->disks, same_set) {
3569 if (test_bit(Faulty, &rdev->flags)) 3733 if (test_bit(Faulty, &rdev->flags))
3570 continue; 3734 continue;
3571 sync_blockdev(rdev->bdev); 3735 sync_blockdev(rdev->bdev);
@@ -3630,10 +3794,10 @@ static int do_md_run(mddev_t * mddev)
3630 */ 3794 */
3631 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 3795 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3632 mdk_rdev_t *rdev2; 3796 mdk_rdev_t *rdev2;
3633 struct list_head *tmp2;
3634 int warned = 0; 3797 int warned = 0;
3635 rdev_for_each(rdev, tmp, mddev) { 3798
3636 rdev_for_each(rdev2, tmp2, mddev) { 3799 list_for_each_entry(rdev, &mddev->disks, same_set)
3800 list_for_each_entry(rdev2, &mddev->disks, same_set) {
3637 if (rdev < rdev2 && 3801 if (rdev < rdev2 &&
3638 rdev->bdev->bd_contains == 3802 rdev->bdev->bd_contains ==
3639 rdev2->bdev->bd_contains) { 3803 rdev2->bdev->bd_contains) {
@@ -3647,7 +3811,7 @@ static int do_md_run(mddev_t * mddev)
3647 warned = 1; 3811 warned = 1;
3648 } 3812 }
3649 } 3813 }
3650 } 3814
3651 if (warned) 3815 if (warned)
3652 printk(KERN_WARNING 3816 printk(KERN_WARNING
3653 "True protection against single-disk" 3817 "True protection against single-disk"
@@ -3684,6 +3848,7 @@ static int do_md_run(mddev_t * mddev)
3684 printk(KERN_WARNING 3848 printk(KERN_WARNING
3685 "md: cannot register extra attributes for %s\n", 3849 "md: cannot register extra attributes for %s\n",
3686 mdname(mddev)); 3850 mdname(mddev));
3851 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
3687 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 3852 } else if (mddev->ro == 2) /* auto-readonly not meaningful */
3688 mddev->ro = 0; 3853 mddev->ro = 0;
3689 3854
@@ -3694,7 +3859,7 @@ static int do_md_run(mddev_t * mddev)
3694 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 3859 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
3695 mddev->in_sync = 1; 3860 mddev->in_sync = 1;
3696 3861
3697 rdev_for_each(rdev, tmp, mddev) 3862 list_for_each_entry(rdev, &mddev->disks, same_set)
3698 if (rdev->raid_disk >= 0) { 3863 if (rdev->raid_disk >= 0) {
3699 char nm[20]; 3864 char nm[20];
3700 sprintf(nm, "rd%d", rdev->raid_disk); 3865 sprintf(nm, "rd%d", rdev->raid_disk);
@@ -3725,9 +3890,8 @@ static int do_md_run(mddev_t * mddev)
3725 * it will remove the drives and not do the right thing 3890 * it will remove the drives and not do the right thing
3726 */ 3891 */
3727 if (mddev->degraded && !mddev->sync_thread) { 3892 if (mddev->degraded && !mddev->sync_thread) {
3728 struct list_head *rtmp;
3729 int spares = 0; 3893 int spares = 0;
3730 rdev_for_each(rdev, rtmp, mddev) 3894 list_for_each_entry(rdev, &mddev->disks, same_set)
3731 if (rdev->raid_disk >= 0 && 3895 if (rdev->raid_disk >= 0 &&
3732 !test_bit(In_sync, &rdev->flags) && 3896 !test_bit(In_sync, &rdev->flags) &&
3733 !test_bit(Faulty, &rdev->flags)) 3897 !test_bit(Faulty, &rdev->flags))
@@ -3754,7 +3918,8 @@ static int do_md_run(mddev_t * mddev)
3754 mddev->changed = 1; 3918 mddev->changed = 1;
3755 md_new_event(mddev); 3919 md_new_event(mddev);
3756 sysfs_notify_dirent(mddev->sysfs_state); 3920 sysfs_notify_dirent(mddev->sysfs_state);
3757 sysfs_notify(&mddev->kobj, NULL, "sync_action"); 3921 if (mddev->sysfs_action)
3922 sysfs_notify_dirent(mddev->sysfs_action);
3758 sysfs_notify(&mddev->kobj, NULL, "degraded"); 3923 sysfs_notify(&mddev->kobj, NULL, "degraded");
3759 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 3924 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
3760 return 0; 3925 return 0;
@@ -3854,9 +4019,12 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
3854 mddev->queue->merge_bvec_fn = NULL; 4019 mddev->queue->merge_bvec_fn = NULL;
3855 mddev->queue->unplug_fn = NULL; 4020 mddev->queue->unplug_fn = NULL;
3856 mddev->queue->backing_dev_info.congested_fn = NULL; 4021 mddev->queue->backing_dev_info.congested_fn = NULL;
3857 if (mddev->pers->sync_request) 4022 if (mddev->pers->sync_request) {
3858 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 4023 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3859 4024 if (mddev->sysfs_action)
4025 sysfs_put(mddev->sysfs_action);
4026 mddev->sysfs_action = NULL;
4027 }
3860 module_put(mddev->pers->owner); 4028 module_put(mddev->pers->owner);
3861 mddev->pers = NULL; 4029 mddev->pers = NULL;
3862 /* tell userspace to handle 'inactive' */ 4030 /* tell userspace to handle 'inactive' */
@@ -3883,7 +4051,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
3883 */ 4051 */
3884 if (mode == 0) { 4052 if (mode == 0) {
3885 mdk_rdev_t *rdev; 4053 mdk_rdev_t *rdev;
3886 struct list_head *tmp;
3887 4054
3888 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 4055 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
3889 4056
@@ -3895,7 +4062,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
3895 } 4062 }
3896 mddev->bitmap_offset = 0; 4063 mddev->bitmap_offset = 0;
3897 4064
3898 rdev_for_each(rdev, tmp, mddev) 4065 list_for_each_entry(rdev, &mddev->disks, same_set)
3899 if (rdev->raid_disk >= 0) { 4066 if (rdev->raid_disk >= 0) {
3900 char nm[20]; 4067 char nm[20];
3901 sprintf(nm, "rd%d", rdev->raid_disk); 4068 sprintf(nm, "rd%d", rdev->raid_disk);
@@ -3941,6 +4108,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
3941 mddev->barriers_work = 0; 4108 mddev->barriers_work = 0;
3942 mddev->safemode = 0; 4109 mddev->safemode = 0;
3943 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4110 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4111 if (mddev->hold_active == UNTIL_STOP)
4112 mddev->hold_active = 0;
3944 4113
3945 } else if (mddev->pers) 4114 } else if (mddev->pers)
3946 printk(KERN_INFO "md: %s switched to read-only mode.\n", 4115 printk(KERN_INFO "md: %s switched to read-only mode.\n",
@@ -3956,7 +4125,6 @@ out:
3956static void autorun_array(mddev_t *mddev) 4125static void autorun_array(mddev_t *mddev)
3957{ 4126{
3958 mdk_rdev_t *rdev; 4127 mdk_rdev_t *rdev;
3959 struct list_head *tmp;
3960 int err; 4128 int err;
3961 4129
3962 if (list_empty(&mddev->disks)) 4130 if (list_empty(&mddev->disks))
@@ -3964,7 +4132,7 @@ static void autorun_array(mddev_t *mddev)
3964 4132
3965 printk(KERN_INFO "md: running: "); 4133 printk(KERN_INFO "md: running: ");
3966 4134
3967 rdev_for_each(rdev, tmp, mddev) { 4135 list_for_each_entry(rdev, &mddev->disks, same_set) {
3968 char b[BDEVNAME_SIZE]; 4136 char b[BDEVNAME_SIZE];
3969 printk("<%s>", bdevname(rdev->bdev,b)); 4137 printk("<%s>", bdevname(rdev->bdev,b));
3970 } 4138 }
@@ -3991,8 +4159,7 @@ static void autorun_array(mddev_t *mddev)
3991 */ 4159 */
3992static void autorun_devices(int part) 4160static void autorun_devices(int part)
3993{ 4161{
3994 struct list_head *tmp; 4162 mdk_rdev_t *rdev0, *rdev, *tmp;
3995 mdk_rdev_t *rdev0, *rdev;
3996 mddev_t *mddev; 4163 mddev_t *mddev;
3997 char b[BDEVNAME_SIZE]; 4164 char b[BDEVNAME_SIZE];
3998 4165
@@ -4007,7 +4174,7 @@ static void autorun_devices(int part)
4007 printk(KERN_INFO "md: considering %s ...\n", 4174 printk(KERN_INFO "md: considering %s ...\n",
4008 bdevname(rdev0->bdev,b)); 4175 bdevname(rdev0->bdev,b));
4009 INIT_LIST_HEAD(&candidates); 4176 INIT_LIST_HEAD(&candidates);
4010 rdev_for_each_list(rdev, tmp, pending_raid_disks) 4177 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
4011 if (super_90_load(rdev, rdev0, 0) >= 0) { 4178 if (super_90_load(rdev, rdev0, 0) >= 0) {
4012 printk(KERN_INFO "md: adding %s ...\n", 4179 printk(KERN_INFO "md: adding %s ...\n",
4013 bdevname(rdev->bdev,b)); 4180 bdevname(rdev->bdev,b));
@@ -4053,7 +4220,7 @@ static void autorun_devices(int part)
4053 } else { 4220 } else {
4054 printk(KERN_INFO "md: created %s\n", mdname(mddev)); 4221 printk(KERN_INFO "md: created %s\n", mdname(mddev));
4055 mddev->persistent = 1; 4222 mddev->persistent = 1;
4056 rdev_for_each_list(rdev, tmp, candidates) { 4223 rdev_for_each_list(rdev, tmp, &candidates) {
4057 list_del_init(&rdev->same_set); 4224 list_del_init(&rdev->same_set);
4058 if (bind_rdev_to_array(rdev, mddev)) 4225 if (bind_rdev_to_array(rdev, mddev))
4059 export_rdev(rdev); 4226 export_rdev(rdev);
@@ -4064,7 +4231,7 @@ static void autorun_devices(int part)
4064 /* on success, candidates will be empty, on error 4231 /* on success, candidates will be empty, on error
4065 * it won't... 4232 * it won't...
4066 */ 4233 */
4067 rdev_for_each_list(rdev, tmp, candidates) { 4234 rdev_for_each_list(rdev, tmp, &candidates) {
4068 list_del_init(&rdev->same_set); 4235 list_del_init(&rdev->same_set);
4069 export_rdev(rdev); 4236 export_rdev(rdev);
4070 } 4237 }
@@ -4093,10 +4260,9 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
4093 mdu_array_info_t info; 4260 mdu_array_info_t info;
4094 int nr,working,active,failed,spare; 4261 int nr,working,active,failed,spare;
4095 mdk_rdev_t *rdev; 4262 mdk_rdev_t *rdev;
4096 struct list_head *tmp;
4097 4263
4098 nr=working=active=failed=spare=0; 4264 nr=working=active=failed=spare=0;
4099 rdev_for_each(rdev, tmp, mddev) { 4265 list_for_each_entry(rdev, &mddev->disks, same_set) {
4100 nr++; 4266 nr++;
4101 if (test_bit(Faulty, &rdev->flags)) 4267 if (test_bit(Faulty, &rdev->flags))
4102 failed++; 4268 failed++;
@@ -4614,9 +4780,8 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
4614 4780
4615static int update_size(mddev_t *mddev, sector_t num_sectors) 4781static int update_size(mddev_t *mddev, sector_t num_sectors)
4616{ 4782{
4617 mdk_rdev_t * rdev; 4783 mdk_rdev_t *rdev;
4618 int rv; 4784 int rv;
4619 struct list_head *tmp;
4620 int fit = (num_sectors == 0); 4785 int fit = (num_sectors == 0);
4621 4786
4622 if (mddev->pers->resize == NULL) 4787 if (mddev->pers->resize == NULL)
@@ -4638,7 +4803,7 @@ static int update_size(mddev_t *mddev, sector_t num_sectors)
4638 * grow, and re-add. 4803 * grow, and re-add.
4639 */ 4804 */
4640 return -EBUSY; 4805 return -EBUSY;
4641 rdev_for_each(rdev, tmp, mddev) { 4806 list_for_each_entry(rdev, &mddev->disks, same_set) {
4642 sector_t avail; 4807 sector_t avail;
4643 avail = rdev->size * 2; 4808 avail = rdev->size * 2;
4644 4809
@@ -5000,6 +5165,9 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
5000 5165
5001done_unlock: 5166done_unlock:
5002abort_unlock: 5167abort_unlock:
5168 if (mddev->hold_active == UNTIL_IOCTL &&
5169 err != -EINVAL)
5170 mddev->hold_active = 0;
5003 mddev_unlock(mddev); 5171 mddev_unlock(mddev);
5004 5172
5005 return err; 5173 return err;
@@ -5016,14 +5184,25 @@ static int md_open(struct block_device *bdev, fmode_t mode)
5016 * Succeed if we can lock the mddev, which confirms that 5184 * Succeed if we can lock the mddev, which confirms that
5017 * it isn't being stopped right now. 5185 * it isn't being stopped right now.
5018 */ 5186 */
5019 mddev_t *mddev = bdev->bd_disk->private_data; 5187 mddev_t *mddev = mddev_find(bdev->bd_dev);
5020 int err; 5188 int err;
5021 5189
5190 if (mddev->gendisk != bdev->bd_disk) {
5191 /* we are racing with mddev_put which is discarding this
5192 * bd_disk.
5193 */
5194 mddev_put(mddev);
5195 /* Wait until bdev->bd_disk is definitely gone */
5196 flush_scheduled_work();
5197 /* Then retry the open from the top */
5198 return -ERESTARTSYS;
5199 }
5200 BUG_ON(mddev != bdev->bd_disk->private_data);
5201
5022 if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1))) 5202 if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
5023 goto out; 5203 goto out;
5024 5204
5025 err = 0; 5205 err = 0;
5026 mddev_get(mddev);
5027 atomic_inc(&mddev->openers); 5206 atomic_inc(&mddev->openers);
5028 mddev_unlock(mddev); 5207 mddev_unlock(mddev);
5029 5208
@@ -5187,11 +5366,10 @@ static void status_unused(struct seq_file *seq)
5187{ 5366{
5188 int i = 0; 5367 int i = 0;
5189 mdk_rdev_t *rdev; 5368 mdk_rdev_t *rdev;
5190 struct list_head *tmp;
5191 5369
5192 seq_printf(seq, "unused devices: "); 5370 seq_printf(seq, "unused devices: ");
5193 5371
5194 rdev_for_each_list(rdev, tmp, pending_raid_disks) { 5372 list_for_each_entry(rdev, &pending_raid_disks, same_set) {
5195 char b[BDEVNAME_SIZE]; 5373 char b[BDEVNAME_SIZE];
5196 i++; 5374 i++;
5197 seq_printf(seq, "%s ", 5375 seq_printf(seq, "%s ",
@@ -5350,7 +5528,6 @@ static int md_seq_show(struct seq_file *seq, void *v)
5350{ 5528{
5351 mddev_t *mddev = v; 5529 mddev_t *mddev = v;
5352 sector_t size; 5530 sector_t size;
5353 struct list_head *tmp2;
5354 mdk_rdev_t *rdev; 5531 mdk_rdev_t *rdev;
5355 struct mdstat_info *mi = seq->private; 5532 struct mdstat_info *mi = seq->private;
5356 struct bitmap *bitmap; 5533 struct bitmap *bitmap;
@@ -5387,7 +5564,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
5387 } 5564 }
5388 5565
5389 size = 0; 5566 size = 0;
5390 rdev_for_each(rdev, tmp2, mddev) { 5567 list_for_each_entry(rdev, &mddev->disks, same_set) {
5391 char b[BDEVNAME_SIZE]; 5568 char b[BDEVNAME_SIZE];
5392 seq_printf(seq, " %s[%d]", 5569 seq_printf(seq, " %s[%d]",
5393 bdevname(rdev->bdev,b), rdev->desc_nr); 5570 bdevname(rdev->bdev,b), rdev->desc_nr);
@@ -5694,7 +5871,6 @@ void md_do_sync(mddev_t *mddev)
5694 struct list_head *tmp; 5871 struct list_head *tmp;
5695 sector_t last_check; 5872 sector_t last_check;
5696 int skipped = 0; 5873 int skipped = 0;
5697 struct list_head *rtmp;
5698 mdk_rdev_t *rdev; 5874 mdk_rdev_t *rdev;
5699 char *desc; 5875 char *desc;
5700 5876
@@ -5799,7 +5975,7 @@ void md_do_sync(mddev_t *mddev)
5799 /* recovery follows the physical size of devices */ 5975 /* recovery follows the physical size of devices */
5800 max_sectors = mddev->size << 1; 5976 max_sectors = mddev->size << 1;
5801 j = MaxSector; 5977 j = MaxSector;
5802 rdev_for_each(rdev, rtmp, mddev) 5978 list_for_each_entry(rdev, &mddev->disks, same_set)
5803 if (rdev->raid_disk >= 0 && 5979 if (rdev->raid_disk >= 0 &&
5804 !test_bit(Faulty, &rdev->flags) && 5980 !test_bit(Faulty, &rdev->flags) &&
5805 !test_bit(In_sync, &rdev->flags) && 5981 !test_bit(In_sync, &rdev->flags) &&
@@ -5949,7 +6125,7 @@ void md_do_sync(mddev_t *mddev)
5949 } else { 6125 } else {
5950 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6126 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5951 mddev->curr_resync = MaxSector; 6127 mddev->curr_resync = MaxSector;
5952 rdev_for_each(rdev, rtmp, mddev) 6128 list_for_each_entry(rdev, &mddev->disks, same_set)
5953 if (rdev->raid_disk >= 0 && 6129 if (rdev->raid_disk >= 0 &&
5954 !test_bit(Faulty, &rdev->flags) && 6130 !test_bit(Faulty, &rdev->flags) &&
5955 !test_bit(In_sync, &rdev->flags) && 6131 !test_bit(In_sync, &rdev->flags) &&
@@ -5985,10 +6161,9 @@ EXPORT_SYMBOL_GPL(md_do_sync);
5985static int remove_and_add_spares(mddev_t *mddev) 6161static int remove_and_add_spares(mddev_t *mddev)
5986{ 6162{
5987 mdk_rdev_t *rdev; 6163 mdk_rdev_t *rdev;
5988 struct list_head *rtmp;
5989 int spares = 0; 6164 int spares = 0;
5990 6165
5991 rdev_for_each(rdev, rtmp, mddev) 6166 list_for_each_entry(rdev, &mddev->disks, same_set)
5992 if (rdev->raid_disk >= 0 && 6167 if (rdev->raid_disk >= 0 &&
5993 !test_bit(Blocked, &rdev->flags) && 6168 !test_bit(Blocked, &rdev->flags) &&
5994 (test_bit(Faulty, &rdev->flags) || 6169 (test_bit(Faulty, &rdev->flags) ||
@@ -6003,8 +6178,8 @@ static int remove_and_add_spares(mddev_t *mddev)
6003 } 6178 }
6004 } 6179 }
6005 6180
6006 if (mddev->degraded && ! mddev->ro) { 6181 if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) {
6007 rdev_for_each(rdev, rtmp, mddev) { 6182 list_for_each_entry(rdev, &mddev->disks, same_set) {
6008 if (rdev->raid_disk >= 0 && 6183 if (rdev->raid_disk >= 0 &&
6009 !test_bit(In_sync, &rdev->flags) && 6184 !test_bit(In_sync, &rdev->flags) &&
6010 !test_bit(Blocked, &rdev->flags)) 6185 !test_bit(Blocked, &rdev->flags))
@@ -6056,7 +6231,6 @@ static int remove_and_add_spares(mddev_t *mddev)
6056void md_check_recovery(mddev_t *mddev) 6231void md_check_recovery(mddev_t *mddev)
6057{ 6232{
6058 mdk_rdev_t *rdev; 6233 mdk_rdev_t *rdev;
6059 struct list_head *rtmp;
6060 6234
6061 6235
6062 if (mddev->bitmap) 6236 if (mddev->bitmap)
@@ -6120,7 +6294,7 @@ void md_check_recovery(mddev_t *mddev)
6120 if (mddev->flags) 6294 if (mddev->flags)
6121 md_update_sb(mddev, 0); 6295 md_update_sb(mddev, 0);
6122 6296
6123 rdev_for_each(rdev, rtmp, mddev) 6297 list_for_each_entry(rdev, &mddev->disks, same_set)
6124 if (test_and_clear_bit(StateChanged, &rdev->flags)) 6298 if (test_and_clear_bit(StateChanged, &rdev->flags))
6125 sysfs_notify_dirent(rdev->sysfs_state); 6299 sysfs_notify_dirent(rdev->sysfs_state);
6126 6300
@@ -6149,13 +6323,13 @@ void md_check_recovery(mddev_t *mddev)
6149 * information must be scrapped 6323 * information must be scrapped
6150 */ 6324 */
6151 if (!mddev->degraded) 6325 if (!mddev->degraded)
6152 rdev_for_each(rdev, rtmp, mddev) 6326 list_for_each_entry(rdev, &mddev->disks, same_set)
6153 rdev->saved_raid_disk = -1; 6327 rdev->saved_raid_disk = -1;
6154 6328
6155 mddev->recovery = 0; 6329 mddev->recovery = 0;
6156 /* flag recovery needed just to double check */ 6330 /* flag recovery needed just to double check */
6157 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6331 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6158 sysfs_notify(&mddev->kobj, NULL, "sync_action"); 6332 sysfs_notify_dirent(mddev->sysfs_action);
6159 md_new_event(mddev); 6333 md_new_event(mddev);
6160 goto unlock; 6334 goto unlock;
6161 } 6335 }
@@ -6216,7 +6390,7 @@ void md_check_recovery(mddev_t *mddev)
6216 mddev->recovery = 0; 6390 mddev->recovery = 0;
6217 } else 6391 } else
6218 md_wakeup_thread(mddev->sync_thread); 6392 md_wakeup_thread(mddev->sync_thread);
6219 sysfs_notify(&mddev->kobj, NULL, "sync_action"); 6393 sysfs_notify_dirent(mddev->sysfs_action);
6220 md_new_event(mddev); 6394 md_new_event(mddev);
6221 } 6395 }
6222 unlock: 6396 unlock:
@@ -6224,7 +6398,8 @@ void md_check_recovery(mddev_t *mddev)
6224 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 6398 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6225 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 6399 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
6226 &mddev->recovery)) 6400 &mddev->recovery))
6227 sysfs_notify(&mddev->kobj, NULL, "sync_action"); 6401 if (mddev->sysfs_action)
6402 sysfs_notify_dirent(mddev->sysfs_action);
6228 } 6403 }
6229 mddev_unlock(mddev); 6404 mddev_unlock(mddev);
6230 } 6405 }
@@ -6386,14 +6561,8 @@ static __exit void md_exit(void)
6386 unregister_sysctl_table(raid_table_header); 6561 unregister_sysctl_table(raid_table_header);
6387 remove_proc_entry("mdstat", NULL); 6562 remove_proc_entry("mdstat", NULL);
6388 for_each_mddev(mddev, tmp) { 6563 for_each_mddev(mddev, tmp) {
6389 struct gendisk *disk = mddev->gendisk;
6390 if (!disk)
6391 continue;
6392 export_array(mddev); 6564 export_array(mddev);
6393 del_gendisk(disk); 6565 mddev->hold_active = 0;
6394 put_disk(disk);
6395 mddev->gendisk = NULL;
6396 mddev_put(mddev);
6397 } 6566 }
6398} 6567}
6399 6568
@@ -6418,6 +6587,7 @@ static int set_ro(const char *val, struct kernel_param *kp)
6418module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 6587module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
6419module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 6588module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
6420 6589
6590module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
6421 6591
6422EXPORT_SYMBOL(register_md_personality); 6592EXPORT_SYMBOL(register_md_personality);
6423EXPORT_SYMBOL(unregister_md_personality); 6593EXPORT_SYMBOL(unregister_md_personality);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index d4ac47d11279..f6d08f241671 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -408,7 +408,6 @@ static int multipath_run (mddev_t *mddev)
408 int disk_idx; 408 int disk_idx;
409 struct multipath_info *disk; 409 struct multipath_info *disk;
410 mdk_rdev_t *rdev; 410 mdk_rdev_t *rdev;
411 struct list_head *tmp;
412 411
413 if (mddev->level != LEVEL_MULTIPATH) { 412 if (mddev->level != LEVEL_MULTIPATH) {
414 printk("multipath: %s: raid level not set to multipath IO (%d)\n", 413 printk("multipath: %s: raid level not set to multipath IO (%d)\n",
@@ -441,7 +440,7 @@ static int multipath_run (mddev_t *mddev)
441 } 440 }
442 441
443 conf->working_disks = 0; 442 conf->working_disks = 0;
444 rdev_for_each(rdev, tmp, mddev) { 443 list_for_each_entry(rdev, &mddev->disks, same_set) {
445 disk_idx = rdev->raid_disk; 444 disk_idx = rdev->raid_disk;
446 if (disk_idx < 0 || 445 if (disk_idx < 0 ||
447 disk_idx >= mddev->raid_disks) 446 disk_idx >= mddev->raid_disks)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 8ac6488ad0dc..c605ba805586 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -53,11 +53,10 @@ static int raid0_congested(void *data, int bits)
53static int create_strip_zones (mddev_t *mddev) 53static int create_strip_zones (mddev_t *mddev)
54{ 54{
55 int i, c, j; 55 int i, c, j;
56 sector_t current_offset, curr_zone_offset; 56 sector_t current_start, curr_zone_start;
57 sector_t min_spacing; 57 sector_t min_spacing;
58 raid0_conf_t *conf = mddev_to_conf(mddev); 58 raid0_conf_t *conf = mddev_to_conf(mddev);
59 mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev; 59 mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
60 struct list_head *tmp1, *tmp2;
61 struct strip_zone *zone; 60 struct strip_zone *zone;
62 int cnt; 61 int cnt;
63 char b[BDEVNAME_SIZE]; 62 char b[BDEVNAME_SIZE];
@@ -67,19 +66,19 @@ static int create_strip_zones (mddev_t *mddev)
67 */ 66 */
68 conf->nr_strip_zones = 0; 67 conf->nr_strip_zones = 0;
69 68
70 rdev_for_each(rdev1, tmp1, mddev) { 69 list_for_each_entry(rdev1, &mddev->disks, same_set) {
71 printk("raid0: looking at %s\n", 70 printk(KERN_INFO "raid0: looking at %s\n",
72 bdevname(rdev1->bdev,b)); 71 bdevname(rdev1->bdev,b));
73 c = 0; 72 c = 0;
74 rdev_for_each(rdev2, tmp2, mddev) { 73 list_for_each_entry(rdev2, &mddev->disks, same_set) {
75 printk("raid0: comparing %s(%llu)", 74 printk(KERN_INFO "raid0: comparing %s(%llu)",
76 bdevname(rdev1->bdev,b), 75 bdevname(rdev1->bdev,b),
77 (unsigned long long)rdev1->size); 76 (unsigned long long)rdev1->size);
78 printk(" with %s(%llu)\n", 77 printk(KERN_INFO " with %s(%llu)\n",
79 bdevname(rdev2->bdev,b), 78 bdevname(rdev2->bdev,b),
80 (unsigned long long)rdev2->size); 79 (unsigned long long)rdev2->size);
81 if (rdev2 == rdev1) { 80 if (rdev2 == rdev1) {
82 printk("raid0: END\n"); 81 printk(KERN_INFO "raid0: END\n");
83 break; 82 break;
84 } 83 }
85 if (rdev2->size == rdev1->size) 84 if (rdev2->size == rdev1->size)
@@ -88,19 +87,20 @@ static int create_strip_zones (mddev_t *mddev)
88 * Not unique, don't count it as a new 87 * Not unique, don't count it as a new
89 * group 88 * group
90 */ 89 */
91 printk("raid0: EQUAL\n"); 90 printk(KERN_INFO "raid0: EQUAL\n");
92 c = 1; 91 c = 1;
93 break; 92 break;
94 } 93 }
95 printk("raid0: NOT EQUAL\n"); 94 printk(KERN_INFO "raid0: NOT EQUAL\n");
96 } 95 }
97 if (!c) { 96 if (!c) {
98 printk("raid0: ==> UNIQUE\n"); 97 printk(KERN_INFO "raid0: ==> UNIQUE\n");
99 conf->nr_strip_zones++; 98 conf->nr_strip_zones++;
100 printk("raid0: %d zones\n", conf->nr_strip_zones); 99 printk(KERN_INFO "raid0: %d zones\n",
100 conf->nr_strip_zones);
101 } 101 }
102 } 102 }
103 printk("raid0: FINAL %d zones\n", conf->nr_strip_zones); 103 printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones);
104 104
105 conf->strip_zone = kzalloc(sizeof(struct strip_zone)* 105 conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
106 conf->nr_strip_zones, GFP_KERNEL); 106 conf->nr_strip_zones, GFP_KERNEL);
@@ -119,16 +119,17 @@ static int create_strip_zones (mddev_t *mddev)
119 cnt = 0; 119 cnt = 0;
120 smallest = NULL; 120 smallest = NULL;
121 zone->dev = conf->devlist; 121 zone->dev = conf->devlist;
122 rdev_for_each(rdev1, tmp1, mddev) { 122 list_for_each_entry(rdev1, &mddev->disks, same_set) {
123 int j = rdev1->raid_disk; 123 int j = rdev1->raid_disk;
124 124
125 if (j < 0 || j >= mddev->raid_disks) { 125 if (j < 0 || j >= mddev->raid_disks) {
126 printk("raid0: bad disk number %d - aborting!\n", j); 126 printk(KERN_ERR "raid0: bad disk number %d - "
127 "aborting!\n", j);
127 goto abort; 128 goto abort;
128 } 129 }
129 if (zone->dev[j]) { 130 if (zone->dev[j]) {
130 printk("raid0: multiple devices for %d - aborting!\n", 131 printk(KERN_ERR "raid0: multiple devices for %d - "
131 j); 132 "aborting!\n", j);
132 goto abort; 133 goto abort;
133 } 134 }
134 zone->dev[j] = rdev1; 135 zone->dev[j] = rdev1;
@@ -149,16 +150,16 @@ static int create_strip_zones (mddev_t *mddev)
149 cnt++; 150 cnt++;
150 } 151 }
151 if (cnt != mddev->raid_disks) { 152 if (cnt != mddev->raid_disks) {
152 printk("raid0: too few disks (%d of %d) - aborting!\n", 153 printk(KERN_ERR "raid0: too few disks (%d of %d) - "
153 cnt, mddev->raid_disks); 154 "aborting!\n", cnt, mddev->raid_disks);
154 goto abort; 155 goto abort;
155 } 156 }
156 zone->nb_dev = cnt; 157 zone->nb_dev = cnt;
157 zone->size = smallest->size * cnt; 158 zone->sectors = smallest->size * cnt * 2;
158 zone->zone_offset = 0; 159 zone->zone_start = 0;
159 160
160 current_offset = smallest->size; 161 current_start = smallest->size * 2;
161 curr_zone_offset = zone->size; 162 curr_zone_start = zone->sectors;
162 163
163 /* now do the other zones */ 164 /* now do the other zones */
164 for (i = 1; i < conf->nr_strip_zones; i++) 165 for (i = 1; i < conf->nr_strip_zones; i++)
@@ -166,40 +167,41 @@ static int create_strip_zones (mddev_t *mddev)
166 zone = conf->strip_zone + i; 167 zone = conf->strip_zone + i;
167 zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks; 168 zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks;
168 169
169 printk("raid0: zone %d\n", i); 170 printk(KERN_INFO "raid0: zone %d\n", i);
170 zone->dev_offset = current_offset; 171 zone->dev_start = current_start;
171 smallest = NULL; 172 smallest = NULL;
172 c = 0; 173 c = 0;
173 174
174 for (j=0; j<cnt; j++) { 175 for (j=0; j<cnt; j++) {
175 char b[BDEVNAME_SIZE]; 176 char b[BDEVNAME_SIZE];
176 rdev = conf->strip_zone[0].dev[j]; 177 rdev = conf->strip_zone[0].dev[j];
177 printk("raid0: checking %s ...", bdevname(rdev->bdev,b)); 178 printk(KERN_INFO "raid0: checking %s ...",
178 if (rdev->size > current_offset) 179 bdevname(rdev->bdev, b));
179 { 180 if (rdev->size > current_start / 2) {
180 printk(" contained as device %d\n", c); 181 printk(KERN_INFO " contained as device %d\n",
182 c);
181 zone->dev[c] = rdev; 183 zone->dev[c] = rdev;
182 c++; 184 c++;
183 if (!smallest || (rdev->size <smallest->size)) { 185 if (!smallest || (rdev->size <smallest->size)) {
184 smallest = rdev; 186 smallest = rdev;
185 printk(" (%llu) is smallest!.\n", 187 printk(KERN_INFO " (%llu) is smallest!.\n",
186 (unsigned long long)rdev->size); 188 (unsigned long long)rdev->size);
187 } 189 }
188 } else 190 } else
189 printk(" nope.\n"); 191 printk(KERN_INFO " nope.\n");
190 } 192 }
191 193
192 zone->nb_dev = c; 194 zone->nb_dev = c;
193 zone->size = (smallest->size - current_offset) * c; 195 zone->sectors = (smallest->size * 2 - current_start) * c;
194 printk("raid0: zone->nb_dev: %d, size: %llu\n", 196 printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n",
195 zone->nb_dev, (unsigned long long)zone->size); 197 zone->nb_dev, (unsigned long long)zone->sectors);
196 198
197 zone->zone_offset = curr_zone_offset; 199 zone->zone_start = curr_zone_start;
198 curr_zone_offset += zone->size; 200 curr_zone_start += zone->sectors;
199 201
200 current_offset = smallest->size; 202 current_start = smallest->size * 2;
201 printk("raid0: current zone offset: %llu\n", 203 printk(KERN_INFO "raid0: current zone start: %llu\n",
202 (unsigned long long)current_offset); 204 (unsigned long long)current_start);
203 } 205 }
204 206
205 /* Now find appropriate hash spacing. 207 /* Now find appropriate hash spacing.
@@ -210,16 +212,16 @@ static int create_strip_zones (mddev_t *mddev)
210 * strip though as it's size has no bearing on the efficacy of the hash 212 * strip though as it's size has no bearing on the efficacy of the hash
211 * table. 213 * table.
212 */ 214 */
213 conf->hash_spacing = curr_zone_offset; 215 conf->spacing = curr_zone_start;
214 min_spacing = curr_zone_offset; 216 min_spacing = curr_zone_start;
215 sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*)); 217 sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*));
216 for (i=0; i < conf->nr_strip_zones-1; i++) { 218 for (i=0; i < conf->nr_strip_zones-1; i++) {
217 sector_t sz = 0; 219 sector_t s = 0;
218 for (j=i; j<conf->nr_strip_zones-1 && 220 for (j = i; j < conf->nr_strip_zones - 1 &&
219 sz < min_spacing ; j++) 221 s < min_spacing; j++)
220 sz += conf->strip_zone[j].size; 222 s += conf->strip_zone[j].sectors;
221 if (sz >= min_spacing && sz < conf->hash_spacing) 223 if (s >= min_spacing && s < conf->spacing)
222 conf->hash_spacing = sz; 224 conf->spacing = s;
223 } 225 }
224 226
225 mddev->queue->unplug_fn = raid0_unplug; 227 mddev->queue->unplug_fn = raid0_unplug;
@@ -227,7 +229,7 @@ static int create_strip_zones (mddev_t *mddev)
227 mddev->queue->backing_dev_info.congested_fn = raid0_congested; 229 mddev->queue->backing_dev_info.congested_fn = raid0_congested;
228 mddev->queue->backing_dev_info.congested_data = mddev; 230 mddev->queue->backing_dev_info.congested_data = mddev;
229 231
230 printk("raid0: done.\n"); 232 printk(KERN_INFO "raid0: done.\n");
231 return 0; 233 return 0;
232 abort: 234 abort:
233 return 1; 235 return 1;
@@ -262,10 +264,9 @@ static int raid0_mergeable_bvec(struct request_queue *q,
262static int raid0_run (mddev_t *mddev) 264static int raid0_run (mddev_t *mddev)
263{ 265{
264 unsigned cur=0, i=0, nb_zone; 266 unsigned cur=0, i=0, nb_zone;
265 s64 size; 267 s64 sectors;
266 raid0_conf_t *conf; 268 raid0_conf_t *conf;
267 mdk_rdev_t *rdev; 269 mdk_rdev_t *rdev;
268 struct list_head *tmp;
269 270
270 if (mddev->chunk_size == 0) { 271 if (mddev->chunk_size == 0) {
271 printk(KERN_ERR "md/raid0: non-zero chunk size required.\n"); 272 printk(KERN_ERR "md/raid0: non-zero chunk size required.\n");
@@ -291,54 +292,54 @@ static int raid0_run (mddev_t *mddev)
291 292
292 /* calculate array device size */ 293 /* calculate array device size */
293 mddev->array_sectors = 0; 294 mddev->array_sectors = 0;
294 rdev_for_each(rdev, tmp, mddev) 295 list_for_each_entry(rdev, &mddev->disks, same_set)
295 mddev->array_sectors += rdev->size * 2; 296 mddev->array_sectors += rdev->size * 2;
296 297
297 printk("raid0 : md_size is %llu blocks.\n", 298 printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
298 (unsigned long long)mddev->array_sectors / 2); 299 (unsigned long long)mddev->array_sectors);
299 printk("raid0 : conf->hash_spacing is %llu blocks.\n", 300 printk(KERN_INFO "raid0 : conf->spacing is %llu sectors.\n",
300 (unsigned long long)conf->hash_spacing); 301 (unsigned long long)conf->spacing);
301 { 302 {
302 sector_t s = mddev->array_sectors / 2; 303 sector_t s = mddev->array_sectors;
303 sector_t space = conf->hash_spacing; 304 sector_t space = conf->spacing;
304 int round; 305 int round;
305 conf->preshift = 0; 306 conf->sector_shift = 0;
306 if (sizeof(sector_t) > sizeof(u32)) { 307 if (sizeof(sector_t) > sizeof(u32)) {
307 /*shift down space and s so that sector_div will work */ 308 /*shift down space and s so that sector_div will work */
308 while (space > (sector_t) (~(u32)0)) { 309 while (space > (sector_t) (~(u32)0)) {
309 s >>= 1; 310 s >>= 1;
310 space >>= 1; 311 space >>= 1;
311 s += 1; /* force round-up */ 312 s += 1; /* force round-up */
312 conf->preshift++; 313 conf->sector_shift++;
313 } 314 }
314 } 315 }
315 round = sector_div(s, (u32)space) ? 1 : 0; 316 round = sector_div(s, (u32)space) ? 1 : 0;
316 nb_zone = s + round; 317 nb_zone = s + round;
317 } 318 }
318 printk("raid0 : nb_zone is %d.\n", nb_zone); 319 printk(KERN_INFO "raid0 : nb_zone is %d.\n", nb_zone);
319 320
320 printk("raid0 : Allocating %Zd bytes for hash.\n", 321 printk(KERN_INFO "raid0 : Allocating %zu bytes for hash.\n",
321 nb_zone*sizeof(struct strip_zone*)); 322 nb_zone*sizeof(struct strip_zone*));
322 conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL); 323 conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL);
323 if (!conf->hash_table) 324 if (!conf->hash_table)
324 goto out_free_conf; 325 goto out_free_conf;
325 size = conf->strip_zone[cur].size; 326 sectors = conf->strip_zone[cur].sectors;
326 327
327 conf->hash_table[0] = conf->strip_zone + cur; 328 conf->hash_table[0] = conf->strip_zone + cur;
328 for (i=1; i< nb_zone; i++) { 329 for (i=1; i< nb_zone; i++) {
329 while (size <= conf->hash_spacing) { 330 while (sectors <= conf->spacing) {
330 cur++; 331 cur++;
331 size += conf->strip_zone[cur].size; 332 sectors += conf->strip_zone[cur].sectors;
332 } 333 }
333 size -= conf->hash_spacing; 334 sectors -= conf->spacing;
334 conf->hash_table[i] = conf->strip_zone + cur; 335 conf->hash_table[i] = conf->strip_zone + cur;
335 } 336 }
336 if (conf->preshift) { 337 if (conf->sector_shift) {
337 conf->hash_spacing >>= conf->preshift; 338 conf->spacing >>= conf->sector_shift;
338 /* round hash_spacing up so when we divide by it, we 339 /* round spacing up so when we divide by it, we
339 * err on the side of too-low, which is safest 340 * err on the side of too-low, which is safest
340 */ 341 */
341 conf->hash_spacing++; 342 conf->spacing++;
342 } 343 }
343 344
344 /* calculate the max read-ahead size. 345 /* calculate the max read-ahead size.
@@ -387,12 +388,12 @@ static int raid0_stop (mddev_t *mddev)
387static int raid0_make_request (struct request_queue *q, struct bio *bio) 388static int raid0_make_request (struct request_queue *q, struct bio *bio)
388{ 389{
389 mddev_t *mddev = q->queuedata; 390 mddev_t *mddev = q->queuedata;
390 unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects; 391 unsigned int sect_in_chunk, chunksect_bits, chunk_sects;
391 raid0_conf_t *conf = mddev_to_conf(mddev); 392 raid0_conf_t *conf = mddev_to_conf(mddev);
392 struct strip_zone *zone; 393 struct strip_zone *zone;
393 mdk_rdev_t *tmp_dev; 394 mdk_rdev_t *tmp_dev;
394 sector_t chunk; 395 sector_t chunk;
395 sector_t block, rsect; 396 sector_t sector, rsect;
396 const int rw = bio_data_dir(bio); 397 const int rw = bio_data_dir(bio);
397 int cpu; 398 int cpu;
398 399
@@ -407,11 +408,9 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
407 bio_sectors(bio)); 408 bio_sectors(bio));
408 part_stat_unlock(); 409 part_stat_unlock();
409 410
410 chunk_size = mddev->chunk_size >> 10;
411 chunk_sects = mddev->chunk_size >> 9; 411 chunk_sects = mddev->chunk_size >> 9;
412 chunksize_bits = ffz(~chunk_size); 412 chunksect_bits = ffz(~chunk_sects);
413 block = bio->bi_sector >> 1; 413 sector = bio->bi_sector;
414
415 414
416 if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) { 415 if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
417 struct bio_pair *bp; 416 struct bio_pair *bp;
@@ -434,28 +433,27 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
434 433
435 434
436 { 435 {
437 sector_t x = block >> conf->preshift; 436 sector_t x = sector >> conf->sector_shift;
438 sector_div(x, (u32)conf->hash_spacing); 437 sector_div(x, (u32)conf->spacing);
439 zone = conf->hash_table[x]; 438 zone = conf->hash_table[x];
440 } 439 }
441 440
442 while (block >= (zone->zone_offset + zone->size)) 441 while (sector >= zone->zone_start + zone->sectors)
443 zone++; 442 zone++;
444 443
445 sect_in_chunk = bio->bi_sector & ((chunk_size<<1) -1); 444 sect_in_chunk = bio->bi_sector & (chunk_sects - 1);
446 445
447 446
448 { 447 {
449 sector_t x = (block - zone->zone_offset) >> chunksize_bits; 448 sector_t x = (sector - zone->zone_start) >> chunksect_bits;
450 449
451 sector_div(x, zone->nb_dev); 450 sector_div(x, zone->nb_dev);
452 chunk = x; 451 chunk = x;
453 452
454 x = block >> chunksize_bits; 453 x = sector >> chunksect_bits;
455 tmp_dev = zone->dev[sector_div(x, zone->nb_dev)]; 454 tmp_dev = zone->dev[sector_div(x, zone->nb_dev)];
456 } 455 }
457 rsect = (((chunk << chunksize_bits) + zone->dev_offset)<<1) 456 rsect = (chunk << chunksect_bits) + zone->dev_start + sect_in_chunk;
458 + sect_in_chunk;
459 457
460 bio->bi_bdev = tmp_dev->bdev; 458 bio->bi_bdev = tmp_dev->bdev;
461 bio->bi_sector = rsect + tmp_dev->data_offset; 459 bio->bi_sector = rsect + tmp_dev->data_offset;
@@ -467,7 +465,7 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
467 465
468bad_map: 466bad_map:
469 printk("raid0_make_request bug: can't convert block across chunks" 467 printk("raid0_make_request bug: can't convert block across chunks"
470 " or bigger than %dk %llu %d\n", chunk_size, 468 " or bigger than %dk %llu %d\n", chunk_sects / 2,
471 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 469 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
472 470
473 bio_io_error(bio); 471 bio_io_error(bio);
@@ -492,10 +490,10 @@ static void raid0_status (struct seq_file *seq, mddev_t *mddev)
492 seq_printf(seq, "%s/", bdevname( 490 seq_printf(seq, "%s/", bdevname(
493 conf->strip_zone[j].dev[k]->bdev,b)); 491 conf->strip_zone[j].dev[k]->bdev,b));
494 492
495 seq_printf(seq, "] zo=%d do=%d s=%d\n", 493 seq_printf(seq, "] zs=%d ds=%d s=%d\n",
496 conf->strip_zone[j].zone_offset, 494 conf->strip_zone[j].zone_start,
497 conf->strip_zone[j].dev_offset, 495 conf->strip_zone[j].dev_start,
498 conf->strip_zone[j].size); 496 conf->strip_zone[j].sectors);
499 } 497 }
500#endif 498#endif
501 seq_printf(seq, " %dk chunks", mddev->chunk_size/1024); 499 seq_printf(seq, " %dk chunks", mddev->chunk_size/1024);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 9c788e2489b1..7b4f5f7155d8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1016,12 +1016,16 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1016 * else mark the drive as failed 1016 * else mark the drive as failed
1017 */ 1017 */
1018 if (test_bit(In_sync, &rdev->flags) 1018 if (test_bit(In_sync, &rdev->flags)
1019 && (conf->raid_disks - mddev->degraded) == 1) 1019 && (conf->raid_disks - mddev->degraded) == 1) {
1020 /* 1020 /*
1021 * Don't fail the drive, act as though we were just a 1021 * Don't fail the drive, act as though we were just a
1022 * normal single drive 1022 * normal single drive.
1023 * However don't try a recovery from this drive as
1024 * it is very likely to fail.
1023 */ 1025 */
1026 mddev->recovery_disabled = 1;
1024 return; 1027 return;
1028 }
1025 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1029 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1026 unsigned long flags; 1030 unsigned long flags;
1027 spin_lock_irqsave(&conf->device_lock, flags); 1031 spin_lock_irqsave(&conf->device_lock, flags);
@@ -1919,7 +1923,6 @@ static int run(mddev_t *mddev)
1919 int i, j, disk_idx; 1923 int i, j, disk_idx;
1920 mirror_info_t *disk; 1924 mirror_info_t *disk;
1921 mdk_rdev_t *rdev; 1925 mdk_rdev_t *rdev;
1922 struct list_head *tmp;
1923 1926
1924 if (mddev->level != 1) { 1927 if (mddev->level != 1) {
1925 printk("raid1: %s: raid level not set to mirroring (%d)\n", 1928 printk("raid1: %s: raid level not set to mirroring (%d)\n",
@@ -1964,7 +1967,7 @@ static int run(mddev_t *mddev)
1964 spin_lock_init(&conf->device_lock); 1967 spin_lock_init(&conf->device_lock);
1965 mddev->queue->queue_lock = &conf->device_lock; 1968 mddev->queue->queue_lock = &conf->device_lock;
1966 1969
1967 rdev_for_each(rdev, tmp, mddev) { 1970 list_for_each_entry(rdev, &mddev->disks, same_set) {
1968 disk_idx = rdev->raid_disk; 1971 disk_idx = rdev->raid_disk;
1969 if (disk_idx >= mddev->raid_disks 1972 if (disk_idx >= mddev->raid_disks
1970 || disk_idx < 0) 1973 || disk_idx < 0)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 970a96ef9b18..6736d6dff981 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2025,7 +2025,6 @@ static int run(mddev_t *mddev)
2025 int i, disk_idx; 2025 int i, disk_idx;
2026 mirror_info_t *disk; 2026 mirror_info_t *disk;
2027 mdk_rdev_t *rdev; 2027 mdk_rdev_t *rdev;
2028 struct list_head *tmp;
2029 int nc, fc, fo; 2028 int nc, fc, fo;
2030 sector_t stride, size; 2029 sector_t stride, size;
2031 2030
@@ -2108,7 +2107,7 @@ static int run(mddev_t *mddev)
2108 spin_lock_init(&conf->device_lock); 2107 spin_lock_init(&conf->device_lock);
2109 mddev->queue->queue_lock = &conf->device_lock; 2108 mddev->queue->queue_lock = &conf->device_lock;
2110 2109
2111 rdev_for_each(rdev, tmp, mddev) { 2110 list_for_each_entry(rdev, &mddev->disks, same_set) {
2112 disk_idx = rdev->raid_disk; 2111 disk_idx = rdev->raid_disk;
2113 if (disk_idx >= mddev->raid_disks 2112 if (disk_idx >= mddev->raid_disks
2114 || disk_idx < 0) 2113 || disk_idx < 0)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a36a7435edf5..a5ba080d303b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3998,7 +3998,6 @@ static int run(mddev_t *mddev)
3998 int raid_disk, memory; 3998 int raid_disk, memory;
3999 mdk_rdev_t *rdev; 3999 mdk_rdev_t *rdev;
4000 struct disk_info *disk; 4000 struct disk_info *disk;
4001 struct list_head *tmp;
4002 int working_disks = 0; 4001 int working_disks = 0;
4003 4002
4004 if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) { 4003 if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) {
@@ -4108,7 +4107,7 @@ static int run(mddev_t *mddev)
4108 4107
4109 pr_debug("raid5: run(%s) called.\n", mdname(mddev)); 4108 pr_debug("raid5: run(%s) called.\n", mdname(mddev));
4110 4109
4111 rdev_for_each(rdev, tmp, mddev) { 4110 list_for_each_entry(rdev, &mddev->disks, same_set) {
4112 raid_disk = rdev->raid_disk; 4111 raid_disk = rdev->raid_disk;
4113 if (raid_disk >= conf->raid_disks 4112 if (raid_disk >= conf->raid_disks
4114 || raid_disk < 0) 4113 || raid_disk < 0)
@@ -4533,7 +4532,6 @@ static int raid5_start_reshape(mddev_t *mddev)
4533{ 4532{
4534 raid5_conf_t *conf = mddev_to_conf(mddev); 4533 raid5_conf_t *conf = mddev_to_conf(mddev);
4535 mdk_rdev_t *rdev; 4534 mdk_rdev_t *rdev;
4536 struct list_head *rtmp;
4537 int spares = 0; 4535 int spares = 0;
4538 int added_devices = 0; 4536 int added_devices = 0;
4539 unsigned long flags; 4537 unsigned long flags;
@@ -4541,7 +4539,7 @@ static int raid5_start_reshape(mddev_t *mddev)
4541 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4539 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4542 return -EBUSY; 4540 return -EBUSY;
4543 4541
4544 rdev_for_each(rdev, rtmp, mddev) 4542 list_for_each_entry(rdev, &mddev->disks, same_set)
4545 if (rdev->raid_disk < 0 && 4543 if (rdev->raid_disk < 0 &&
4546 !test_bit(Faulty, &rdev->flags)) 4544 !test_bit(Faulty, &rdev->flags))
4547 spares++; 4545 spares++;
@@ -4563,7 +4561,7 @@ static int raid5_start_reshape(mddev_t *mddev)
4563 /* Add some new drives, as many as will fit. 4561 /* Add some new drives, as many as will fit.
4564 * We know there are enough to make the newly sized array work. 4562 * We know there are enough to make the newly sized array work.
4565 */ 4563 */
4566 rdev_for_each(rdev, rtmp, mddev) 4564 list_for_each_entry(rdev, &mddev->disks, same_set)
4567 if (rdev->raid_disk < 0 && 4565 if (rdev->raid_disk < 0 &&
4568 !test_bit(Faulty, &rdev->flags)) { 4566 !test_bit(Faulty, &rdev->flags)) {
4569 if (raid5_add_disk(mddev, rdev) == 0) { 4567 if (raid5_add_disk(mddev, rdev) == 0) {
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 03fd9dd5c685..f6ba8468858e 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -125,7 +125,6 @@ static void hexdump( const unsigned char *buf, unsigned short len )
125 125
126struct dvb_net_priv { 126struct dvb_net_priv {
127 int in_use; 127 int in_use;
128 struct net_device_stats stats;
129 u16 pid; 128 u16 pid;
130 struct net_device *net; 129 struct net_device *net;
131 struct dvb_net *host; 130 struct dvb_net *host;
@@ -384,8 +383,8 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
384 if (priv->ule_skb) { 383 if (priv->ule_skb) {
385 dev_kfree_skb( priv->ule_skb ); 384 dev_kfree_skb( priv->ule_skb );
386 /* Prepare for next SNDU. */ 385 /* Prepare for next SNDU. */
387 priv->stats.rx_errors++; 386 dev->stats.rx_errors++;
388 priv->stats.rx_frame_errors++; 387 dev->stats.rx_frame_errors++;
389 } 388 }
390 reset_ule(priv); 389 reset_ule(priv);
391 priv->need_pusi = 1; 390 priv->need_pusi = 1;
@@ -438,8 +437,8 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
438 dev_kfree_skb( priv->ule_skb ); 437 dev_kfree_skb( priv->ule_skb );
439 /* Prepare for next SNDU. */ 438 /* Prepare for next SNDU. */
440 // reset_ule(priv); moved to below. 439 // reset_ule(priv); moved to below.
441 priv->stats.rx_errors++; 440 dev->stats.rx_errors++;
442 priv->stats.rx_frame_errors++; 441 dev->stats.rx_frame_errors++;
443 } 442 }
444 reset_ule(priv); 443 reset_ule(priv);
445 /* skip to next PUSI. */ 444 /* skip to next PUSI. */
@@ -460,8 +459,8 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
460 /* Drop partly decoded SNDU, reset state, resync on PUSI. */ 459 /* Drop partly decoded SNDU, reset state, resync on PUSI. */
461 if (priv->ule_skb) { 460 if (priv->ule_skb) {
462 dev_kfree_skb( priv->ule_skb ); 461 dev_kfree_skb( priv->ule_skb );
463 priv->stats.rx_errors++; 462 dev->stats.rx_errors++;
464 priv->stats.rx_frame_errors++; 463 dev->stats.rx_frame_errors++;
465 } 464 }
466 reset_ule(priv); 465 reset_ule(priv);
467 priv->need_pusi = 1; 466 priv->need_pusi = 1;
@@ -477,8 +476,8 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
477 if (priv->ule_sndu_remain > 183) { 476 if (priv->ule_sndu_remain > 183) {
478 /* Current SNDU lacks more data than there could be available in the 477 /* Current SNDU lacks more data than there could be available in the
479 * current TS cell. */ 478 * current TS cell. */
480 priv->stats.rx_errors++; 479 dev->stats.rx_errors++;
481 priv->stats.rx_length_errors++; 480 dev->stats.rx_length_errors++;
482 printk(KERN_WARNING "%lu: Expected %d more SNDU bytes, but " 481 printk(KERN_WARNING "%lu: Expected %d more SNDU bytes, but "
483 "got PUSI (pf %d, ts_remain %d). Flushing incomplete payload.\n", 482 "got PUSI (pf %d, ts_remain %d). Flushing incomplete payload.\n",
484 priv->ts_count, priv->ule_sndu_remain, ts[4], ts_remain); 483 priv->ts_count, priv->ule_sndu_remain, ts[4], ts_remain);
@@ -520,8 +519,8 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
520 if (priv->ule_sndu_len < 5) { 519 if (priv->ule_sndu_len < 5) {
521 printk(KERN_WARNING "%lu: Invalid ULE SNDU length %u. " 520 printk(KERN_WARNING "%lu: Invalid ULE SNDU length %u. "
522 "Resyncing.\n", priv->ts_count, priv->ule_sndu_len); 521 "Resyncing.\n", priv->ts_count, priv->ule_sndu_len);
523 priv->stats.rx_errors++; 522 dev->stats.rx_errors++;
524 priv->stats.rx_length_errors++; 523 dev->stats.rx_length_errors++;
525 priv->ule_sndu_len = 0; 524 priv->ule_sndu_len = 0;
526 priv->need_pusi = 1; 525 priv->need_pusi = 1;
527 new_ts = 1; 526 new_ts = 1;
@@ -573,7 +572,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
573 if (priv->ule_skb == NULL) { 572 if (priv->ule_skb == NULL) {
574 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", 573 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
575 dev->name); 574 dev->name);
576 priv->stats.rx_dropped++; 575 dev->stats.rx_dropped++;
577 return; 576 return;
578 } 577 }
579 578
@@ -637,8 +636,8 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
637 ule_dump = 1; 636 ule_dump = 1;
638#endif 637#endif
639 638
640 priv->stats.rx_errors++; 639 dev->stats.rx_errors++;
641 priv->stats.rx_crc_errors++; 640 dev->stats.rx_crc_errors++;
642 dev_kfree_skb(priv->ule_skb); 641 dev_kfree_skb(priv->ule_skb);
643 } else { 642 } else {
644 /* CRC32 verified OK. */ 643 /* CRC32 verified OK. */
@@ -744,8 +743,8 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
744 * receive the packet anyhow. */ 743 * receive the packet anyhow. */
745 /* if (priv->ule_dbit && skb->pkt_type == PACKET_OTHERHOST) 744 /* if (priv->ule_dbit && skb->pkt_type == PACKET_OTHERHOST)
746 priv->ule_skb->pkt_type = PACKET_HOST; */ 745 priv->ule_skb->pkt_type = PACKET_HOST; */
747 priv->stats.rx_packets++; 746 dev->stats.rx_packets++;
748 priv->stats.rx_bytes += priv->ule_skb->len; 747 dev->stats.rx_bytes += priv->ule_skb->len;
749 netif_rx(priv->ule_skb); 748 netif_rx(priv->ule_skb);
750 } 749 }
751 sndu_done: 750 sndu_done:
@@ -800,8 +799,7 @@ static void dvb_net_sec(struct net_device *dev,
800{ 799{
801 u8 *eth; 800 u8 *eth;
802 struct sk_buff *skb; 801 struct sk_buff *skb;
803 struct net_device_stats *stats = 802 struct net_device_stats *stats = &dev->stats;
804 &((struct dvb_net_priv *) netdev_priv(dev))->stats;
805 int snap = 0; 803 int snap = 0;
806 804
807 /* note: pkt_len includes a 32bit checksum */ 805 /* note: pkt_len includes a 32bit checksum */
@@ -1216,28 +1214,29 @@ static int dvb_net_stop(struct net_device *dev)
1216 return dvb_net_feed_stop(dev); 1214 return dvb_net_feed_stop(dev);
1217} 1215}
1218 1216
1219static struct net_device_stats * dvb_net_get_stats(struct net_device *dev)
1220{
1221 return &((struct dvb_net_priv *) netdev_priv(dev))->stats;
1222}
1223
1224static const struct header_ops dvb_header_ops = { 1217static const struct header_ops dvb_header_ops = {
1225 .create = eth_header, 1218 .create = eth_header,
1226 .parse = eth_header_parse, 1219 .parse = eth_header_parse,
1227 .rebuild = eth_rebuild_header, 1220 .rebuild = eth_rebuild_header,
1228}; 1221};
1229 1222
1223
1224static const struct net_device_ops dvb_netdev_ops = {
1225 .ndo_open = dvb_net_open,
1226 .ndo_stop = dvb_net_stop,
1227 .ndo_start_xmit = dvb_net_tx,
1228 .ndo_set_multicast_list = dvb_net_set_multicast_list,
1229 .ndo_set_mac_address = dvb_net_set_mac,
1230 .ndo_change_mtu = eth_change_mtu,
1231 .ndo_validate_addr = eth_validate_addr,
1232};
1233
1230static void dvb_net_setup(struct net_device *dev) 1234static void dvb_net_setup(struct net_device *dev)
1231{ 1235{
1232 ether_setup(dev); 1236 ether_setup(dev);
1233 1237
1234 dev->header_ops = &dvb_header_ops; 1238 dev->header_ops = &dvb_header_ops;
1235 dev->open = dvb_net_open; 1239 dev->netdev_ops = &dvb_netdev_ops;
1236 dev->stop = dvb_net_stop;
1237 dev->hard_start_xmit = dvb_net_tx;
1238 dev->get_stats = dvb_net_get_stats;
1239 dev->set_multicast_list = dvb_net_set_multicast_list;
1240 dev->set_mac_address = dvb_net_set_mac;
1241 dev->mtu = 4096; 1240 dev->mtu = 4096;
1242 dev->mc_count = 0; 1241 dev->mc_count = 0;
1243 1242
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index b89f476cd0a9..c63817117c0a 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -308,10 +308,11 @@ static void mptctl_timeout_expired (MPT_IOCTL *ioctl)
308{ 308{
309 int rc = 1; 309 int rc = 1;
310 310
311 dctlprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT ": Timeout Expired! Host %d\n",
312 ioctl->ioc->name, ioctl->ioc->id));
313 if (ioctl == NULL) 311 if (ioctl == NULL)
314 return; 312 return;
313 dctlprintk(ioctl->ioc,
314 printk(MYIOC_s_DEBUG_FMT ": Timeout Expired! Host %d\n",
315 ioctl->ioc->name, ioctl->ioc->id));
315 316
316 ioctl->wait_done = 0; 317 ioctl->wait_done = 0;
317 if (ioctl->reset & MPTCTL_RESET_OK) 318 if (ioctl->reset & MPTCTL_RESET_OK)
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index a13f6eecd25b..c2804f26cb44 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -106,7 +106,6 @@ struct mpt_lan_priv {
106 106
107 u32 total_posted; 107 u32 total_posted;
108 u32 total_received; 108 u32 total_received;
109 struct net_device_stats stats; /* Per device statistics */
110 109
111 struct delayed_work post_buckets_task; 110 struct delayed_work post_buckets_task;
112 struct net_device *dev; 111 struct net_device *dev;
@@ -548,15 +547,6 @@ mpt_lan_close(struct net_device *dev)
548} 547}
549 548
550/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 549/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
551static struct net_device_stats *
552mpt_lan_get_stats(struct net_device *dev)
553{
554 struct mpt_lan_priv *priv = netdev_priv(dev);
555
556 return (struct net_device_stats *) &priv->stats;
557}
558
559/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
560static int 550static int
561mpt_lan_change_mtu(struct net_device *dev, int new_mtu) 551mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
562{ 552{
@@ -594,8 +584,8 @@ mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
594 ctx = GET_LAN_BUFFER_CONTEXT(tmsg); 584 ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
595 sent = priv->SendCtl[ctx].skb; 585 sent = priv->SendCtl[ctx].skb;
596 586
597 priv->stats.tx_packets++; 587 dev->stats.tx_packets++;
598 priv->stats.tx_bytes += sent->len; 588 dev->stats.tx_bytes += sent->len;
599 589
600 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n", 590 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
601 IOC_AND_NETDEV_NAMES_s_s(dev), 591 IOC_AND_NETDEV_NAMES_s_s(dev),
@@ -636,7 +626,7 @@ mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
636 626
637 switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) { 627 switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
638 case MPI_IOCSTATUS_SUCCESS: 628 case MPI_IOCSTATUS_SUCCESS:
639 priv->stats.tx_packets += count; 629 dev->stats.tx_packets += count;
640 break; 630 break;
641 631
642 case MPI_IOCSTATUS_LAN_CANCELED: 632 case MPI_IOCSTATUS_LAN_CANCELED:
@@ -644,13 +634,13 @@ mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
644 break; 634 break;
645 635
646 case MPI_IOCSTATUS_INVALID_SGL: 636 case MPI_IOCSTATUS_INVALID_SGL:
647 priv->stats.tx_errors += count; 637 dev->stats.tx_errors += count;
648 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n", 638 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
649 IOC_AND_NETDEV_NAMES_s_s(dev)); 639 IOC_AND_NETDEV_NAMES_s_s(dev));
650 goto out; 640 goto out;
651 641
652 default: 642 default:
653 priv->stats.tx_errors += count; 643 dev->stats.tx_errors += count;
654 break; 644 break;
655 } 645 }
656 646
@@ -661,7 +651,7 @@ mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
661 ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext)); 651 ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
662 652
663 sent = priv->SendCtl[ctx].skb; 653 sent = priv->SendCtl[ctx].skb;
664 priv->stats.tx_bytes += sent->len; 654 dev->stats.tx_bytes += sent->len;
665 655
666 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n", 656 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
667 IOC_AND_NETDEV_NAMES_s_s(dev), 657 IOC_AND_NETDEV_NAMES_s_s(dev),
@@ -842,8 +832,8 @@ mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
842 "delivered to upper level.\n", 832 "delivered to upper level.\n",
843 IOC_AND_NETDEV_NAMES_s_s(dev), skb->len)); 833 IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
844 834
845 priv->stats.rx_bytes += skb->len; 835 dev->stats.rx_bytes += skb->len;
846 priv->stats.rx_packets++; 836 dev->stats.rx_packets++;
847 837
848 skb->dev = dev; 838 skb->dev = dev;
849 netif_rx(skb); 839 netif_rx(skb);
@@ -1308,6 +1298,14 @@ mpt_lan_post_receive_buckets_work(struct work_struct *work)
1308 post_buckets_task.work)); 1298 post_buckets_task.work));
1309} 1299}
1310 1300
1301static const struct net_device_ops mpt_netdev_ops = {
1302 .ndo_open = mpt_lan_open,
1303 .ndo_stop = mpt_lan_close,
1304 .ndo_start_xmit = mpt_lan_sdu_send,
1305 .ndo_change_mtu = mpt_lan_change_mtu,
1306 .ndo_tx_timeout = mpt_lan_tx_timeout,
1307};
1308
1311/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1309/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1312static struct net_device * 1310static struct net_device *
1313mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum) 1311mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
@@ -1372,15 +1370,7 @@ mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1372 priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ? 1370 priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1373 tx_max_out_p : MPT_TX_MAX_OUT_LIM; 1371 tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1374 1372
1375 dev->open = mpt_lan_open; 1373 dev->netdev_ops = &mpt_netdev_ops;
1376 dev->stop = mpt_lan_close;
1377 dev->get_stats = mpt_lan_get_stats;
1378 dev->set_multicast_list = NULL;
1379 dev->change_mtu = mpt_lan_change_mtu;
1380 dev->hard_start_xmit = mpt_lan_sdu_send;
1381
1382/* Not in 2.3.42. Need 2.3.45+ */
1383 dev->tx_timeout = mpt_lan_tx_timeout;
1384 dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT; 1374 dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1385 1375
1386 dlprintk((KERN_INFO MYNAM ": Finished registering dev " 1376 dlprintk((KERN_INFO MYNAM ": Finished registering dev "
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index 3a273ccef3f2..f92595c8f165 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -1453,6 +1453,9 @@ void wm8350_device_exit(struct wm8350 *wm8350)
1453{ 1453{
1454 int i; 1454 int i;
1455 1455
1456 for (i = 0; i < ARRAY_SIZE(wm8350->pmic.led); i++)
1457 platform_device_unregister(wm8350->pmic.led[i].pdev);
1458
1456 for (i = 0; i < ARRAY_SIZE(wm8350->pmic.pdev); i++) 1459 for (i = 0; i < ARRAY_SIZE(wm8350->pmic.pdev); i++)
1457 platform_device_unregister(wm8350->pmic.pdev[i]); 1460 platform_device_unregister(wm8350->pmic.pdev[i]);
1458 1461
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index fee7304102af..419c378bd24b 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -120,7 +120,7 @@ config TIFM_CORE
120 cards are supported via 'MMC/SD Card support: TI Flash Media MMC/SD 120 cards are supported via 'MMC/SD Card support: TI Flash Media MMC/SD
121 Interface support (MMC_TIFM_SD)'. 121 Interface support (MMC_TIFM_SD)'.
122 122
123 To compile this driver as a module, choose M here: the module will 123 To compile this driver as a module, choose M here: the module will
124 be called tifm_core. 124 be called tifm_core.
125 125
126config TIFM_7XX1 126config TIFM_7XX1
@@ -133,100 +133,9 @@ config TIFM_7XX1
133 To make actual use of the device, you will have to select some 133 To make actual use of the device, you will have to select some
134 flash card format drivers, as outlined in the TIFM_CORE Help. 134 flash card format drivers, as outlined in the TIFM_CORE Help.
135 135
136 To compile this driver as a module, choose M here: the module will 136 To compile this driver as a module, choose M here: the module will
137 be called tifm_7xx1. 137 be called tifm_7xx1.
138 138
139config ACER_WMI
140 tristate "Acer WMI Laptop Extras (EXPERIMENTAL)"
141 depends on X86
142 depends on EXPERIMENTAL
143 depends on ACPI
144 depends on LEDS_CLASS
145 depends on NEW_LEDS
146 depends on BACKLIGHT_CLASS_DEVICE
147 depends on SERIO_I8042
148 depends on RFKILL
149 select ACPI_WMI
150 ---help---
151 This is a driver for newer Acer (and Wistron) laptops. It adds
152 wireless radio and bluetooth control, and on some laptops,
153 exposes the mail LED and LCD backlight.
154
155 For more information about this driver see
156 <file:Documentation/laptops/acer-wmi.txt>
157
158 If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M
159 here.
160
161config ASUS_LAPTOP
162 tristate "Asus Laptop Extras (EXPERIMENTAL)"
163 depends on X86
164 depends on ACPI
165 depends on EXPERIMENTAL && !ACPI_ASUS
166 depends on LEDS_CLASS
167 depends on NEW_LEDS
168 depends on BACKLIGHT_CLASS_DEVICE
169 ---help---
170 This is the new Linux driver for Asus laptops. It may also support some
171 MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate
172 standard ACPI events that go through /proc/acpi/events. It also adds
173 support for video output switching, LCD backlight control, Bluetooth and
174 Wlan control, and most importantly, allows you to blink those fancy LEDs.
175
176 For more information and a userspace daemon for handling the extra
177 buttons see <http://acpi4asus.sf.net/>.
178
179 If you have an ACPI-compatible ASUS laptop, say Y or M here.
180
181config FUJITSU_LAPTOP
182 tristate "Fujitsu Laptop Extras"
183 depends on X86
184 depends on ACPI
185 depends on INPUT
186 depends on BACKLIGHT_CLASS_DEVICE
187 ---help---
188 This is a driver for laptops built by Fujitsu:
189
190 * P2xxx/P5xxx/S6xxx/S7xxx series Lifebooks
191 * Possibly other Fujitsu laptop models
192 * Tested with S6410 and S7020
193
194 It adds support for LCD brightness control and some hotkeys.
195
196 If you have a Fujitsu laptop, say Y or M here.
197
198config FUJITSU_LAPTOP_DEBUG
199 bool "Verbose debug mode for Fujitsu Laptop Extras"
200 depends on FUJITSU_LAPTOP
201 default n
202 ---help---
203 Enables extra debug output from the fujitsu extras driver, at the
204 expense of a slight increase in driver size.
205
206 If you are not sure, say N here.
207
208config TC1100_WMI
209 tristate "HP Compaq TC1100 Tablet WMI Extras (EXPERIMENTAL)"
210 depends on X86 && !X86_64
211 depends on EXPERIMENTAL
212 depends on ACPI
213 select ACPI_WMI
214 ---help---
215 This is a driver for the WMI extensions (wireless and bluetooth power
216 control) of the HP Compaq TC1100 tablet.
217
218config HP_WMI
219 tristate "HP WMI extras"
220 depends on ACPI_WMI
221 depends on INPUT
222 depends on RFKILL
223 help
224 Say Y here if you want to support WMI-based hotkeys on HP laptops and
225 to read data from WMI such as docking or ambient light sensor state.
226
227 To compile this driver as a module, choose M here: the module will
228 be called hp-wmi.
229
230config ICS932S401 139config ICS932S401
231 tristate "Integrated Circuits ICS932S401" 140 tristate "Integrated Circuits ICS932S401"
232 depends on I2C && EXPERIMENTAL 141 depends on I2C && EXPERIMENTAL
@@ -237,170 +146,6 @@ config ICS932S401
237 This driver can also be built as a module. If so, the module 146 This driver can also be built as a module. If so, the module
238 will be called ics932s401. 147 will be called ics932s401.
239 148
240config MSI_LAPTOP
241 tristate "MSI Laptop Extras"
242 depends on X86
243 depends on ACPI
244 depends on BACKLIGHT_CLASS_DEVICE
245 ---help---
246 This is a driver for laptops built by MSI (MICRO-STAR
247 INTERNATIONAL):
248
249 MSI MegaBook S270 (MS-1013)
250 Cytron/TCM/Medion/Tchibo MD96100/SAM2000
251
252 It adds support for Bluetooth, WLAN and LCD brightness control.
253
254 More information about this driver is available at
255 <http://0pointer.de/lennart/tchibo.html>.
256
257 If you have an MSI S270 laptop, say Y or M here.
258
259config PANASONIC_LAPTOP
260 tristate "Panasonic Laptop Extras"
261 depends on X86 && INPUT && ACPI
262 depends on BACKLIGHT_CLASS_DEVICE
263 ---help---
264 This driver adds support for access to backlight control and hotkeys
265 on Panasonic Let's Note laptops.
266
267 If you have a Panasonic Let's note laptop (such as the R1(N variant),
268 R2, R3, R5, T2, W2 and Y2 series), say Y.
269
270config COMPAL_LAPTOP
271 tristate "Compal Laptop Extras"
272 depends on X86
273 depends on ACPI
274 depends on BACKLIGHT_CLASS_DEVICE
275 ---help---
276 This is a driver for laptops built by Compal:
277
278 Compal FL90/IFL90
279 Compal FL91/IFL91
280 Compal FL92/JFL92
281 Compal FT00/IFT00
282
283 It adds support for Bluetooth, WLAN and LCD brightness control.
284
285 If you have an Compal FL9x/IFL9x/FT00 laptop, say Y or M here.
286
287config SONY_LAPTOP
288 tristate "Sony Laptop Extras"
289 depends on X86 && ACPI
290 select BACKLIGHT_CLASS_DEVICE
291 depends on INPUT
292 ---help---
293 This mini-driver drives the SNC and SPIC devices present in the ACPI
294 BIOS of the Sony Vaio laptops.
295
296 It gives access to some extra laptop functionalities like Bluetooth,
297 screen brightness control, Fn keys and allows powering on/off some
298 devices.
299
300 Read <file:Documentation/laptops/sony-laptop.txt> for more information.
301
302config SONYPI_COMPAT
303 bool "Sonypi compatibility"
304 depends on SONY_LAPTOP
305 ---help---
306 Build the sonypi driver compatibility code into the sony-laptop driver.
307
308config THINKPAD_ACPI
309 tristate "ThinkPad ACPI Laptop Extras"
310 depends on X86 && ACPI
311 select BACKLIGHT_LCD_SUPPORT
312 select BACKLIGHT_CLASS_DEVICE
313 select HWMON
314 select NVRAM
315 select INPUT
316 select NEW_LEDS
317 select LEDS_CLASS
318 select NET
319 select RFKILL
320 ---help---
321 This is a driver for the IBM and Lenovo ThinkPad laptops. It adds
322 support for Fn-Fx key combinations, Bluetooth control, video
323 output switching, ThinkLight control, UltraBay eject and more.
324 For more information about this driver see
325 <file:Documentation/laptops/thinkpad-acpi.txt> and
326 <http://ibm-acpi.sf.net/> .
327
328 This driver was formerly known as ibm-acpi.
329
330 If you have an IBM or Lenovo ThinkPad laptop, say Y or M here.
331
332config THINKPAD_ACPI_DEBUG
333 bool "Verbose debug mode"
334 depends on THINKPAD_ACPI
335 default n
336 ---help---
337 Enables extra debugging information, at the expense of a slightly
338 increase in driver size.
339
340 If you are not sure, say N here.
341
342config THINKPAD_ACPI_DOCK
343 bool "Legacy Docking Station Support"
344 depends on THINKPAD_ACPI
345 depends on ACPI_DOCK=n
346 default n
347 ---help---
348 Allows the thinkpad_acpi driver to handle docking station events.
349 This support was made obsolete by the generic ACPI docking station
350 support (CONFIG_ACPI_DOCK). It will allow locking and removing the
351 laptop from the docking station, but will not properly connect PCI
352 devices.
353
354 If you are not sure, say N here.
355
356config THINKPAD_ACPI_BAY
357 bool "Legacy Removable Bay Support"
358 depends on THINKPAD_ACPI
359 default y
360 ---help---
361 Allows the thinkpad_acpi driver to handle removable bays. It will
362 electrically disable the device in the bay, and also generate
363 notifications when the bay lever is ejected or inserted.
364
365 If you are not sure, say Y here.
366
367config THINKPAD_ACPI_VIDEO
368 bool "Video output control support"
369 depends on THINKPAD_ACPI
370 default y
371 ---help---
372 Allows the thinkpad_acpi driver to provide an interface to control
373 the various video output ports.
374
375 This feature often won't work well, depending on ThinkPad model,
376 display state, video output devices in use, whether there is a X
377 server running, phase of the moon, and the current mood of
378 Schroedinger's cat. If you can use X.org's RandR to control
379 your ThinkPad's video output ports instead of this feature,
380 don't think twice: do it and say N here to save some memory.
381
382 If you are not sure, say Y here.
383
384config THINKPAD_ACPI_HOTKEY_POLL
385 bool "Support NVRAM polling for hot keys"
386 depends on THINKPAD_ACPI
387 default y
388 ---help---
389 Some thinkpad models benefit from NVRAM polling to detect a few of
390 the hot key press events. If you know your ThinkPad model does not
391 need to do NVRAM polling to support any of the hot keys you use,
392 unselecting this option will save about 1kB of memory.
393
394 ThinkPads T40 and newer, R52 and newer, and X31 and newer are
395 unlikely to need NVRAM polling in their latest BIOS versions.
396
397 NVRAM polling can detect at most the following keys: ThinkPad/Access
398 IBM, Zoom, Switch Display (fn+F7), ThinkLight, Volume up/down/mute,
399 Brightness up/down, Display Expand (fn+F8), Hibernate (fn+F12).
400
401 If you are not sure, say Y here. The driver enables polling only if
402 it is strictly necessary to do so.
403
404config ATMEL_SSC 149config ATMEL_SSC
405 tristate "Device driver for Atmel SSC peripheral" 150 tristate "Device driver for Atmel SSC peripheral"
406 depends on AVR32 || ARCH_AT91 151 depends on AVR32 || ARCH_AT91
@@ -413,31 +158,6 @@ config ATMEL_SSC
413 158
414 If unsure, say N. 159 If unsure, say N.
415 160
416config INTEL_MENLOW
417 tristate "Thermal Management driver for Intel menlow platform"
418 depends on ACPI_THERMAL
419 select THERMAL
420 depends on X86
421 ---help---
422 ACPI thermal management enhancement driver on
423 Intel Menlow platform.
424
425 If unsure, say N.
426
427config EEEPC_LAPTOP
428 tristate "Eee PC Hotkey Driver (EXPERIMENTAL)"
429 depends on X86
430 depends on ACPI
431 depends on BACKLIGHT_CLASS_DEVICE
432 depends on HWMON
433 depends on EXPERIMENTAL
434 depends on RFKILL
435 ---help---
436 This driver supports the Fn-Fx keys on Eee PC laptops.
437 It also adds the ability to switch camera/wlan on/off.
438
439 If you have an Eee PC laptop, say Y or M here.
440
441config ENCLOSURE_SERVICES 161config ENCLOSURE_SERVICES
442 tristate "Enclosure Services" 162 tristate "Enclosure Services"
443 default n 163 default n
@@ -498,6 +218,18 @@ config SGI_GRU_DEBUG
498 This option enables addition debugging code for the SGI GRU driver. If 218 This option enables addition debugging code for the SGI GRU driver. If
499 you are unsure, say N. 219 you are unsure, say N.
500 220
221config DELL_LAPTOP
222 tristate "Dell Laptop Extras (EXPERIMENTAL)"
223 depends on X86
224 depends on DCDBAS
225 depends on EXPERIMENTAL
226 depends on BACKLIGHT_CLASS_DEVICE
227 depends on RFKILL
228 default n
229 ---help---
230 This driver adds support for rfkill and backlight control to Dell
231 laptops.
232
501source "drivers/misc/c2port/Kconfig" 233source "drivers/misc/c2port/Kconfig"
502 234
503endif # MISC_DEVICES 235endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 817f7f5ab3bd..9cf8ae6e4b39 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -1,32 +1,20 @@
1# 1#
2# Makefile for misc devices that really don't fit anywhere else. 2# Makefile for misc devices that really don't fit anywhere else.
3# 3#
4obj- := misc.o # Dummy rule to force built-in.o to be made
5 4
6obj-$(CONFIG_IBM_ASM) += ibmasm/ 5obj-$(CONFIG_IBM_ASM) += ibmasm/
7obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ 6obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/
8obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
9obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o
10obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
11obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
12obj-$(CONFIG_ACER_WMI) += acer-wmi.o
13obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o 7obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
14obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o 8obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
15obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o 9obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
16obj-$(CONFIG_HP_WMI) += hp-wmi.o
17obj-$(CONFIG_ICS932S401) += ics932s401.o 10obj-$(CONFIG_ICS932S401) += ics932s401.o
18obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
19obj-$(CONFIG_LKDTM) += lkdtm.o 11obj-$(CONFIG_LKDTM) += lkdtm.o
20obj-$(CONFIG_TIFM_CORE) += tifm_core.o 12obj-$(CONFIG_TIFM_CORE) += tifm_core.o
13obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
21obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o 14obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
22obj-$(CONFIG_PHANTOM) += phantom.o 15obj-$(CONFIG_PHANTOM) += phantom.o
23obj-$(CONFIG_SGI_IOC4) += ioc4.o 16obj-$(CONFIG_SGI_IOC4) += ioc4.o
24obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
25obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
26obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
27obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o
28obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o 17obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
29obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
30obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o 18obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
31obj-$(CONFIG_KGDB_TESTS) += kgdbts.o 19obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
32obj-$(CONFIG_SGI_XP) += sgi-xp/ 20obj-$(CONFIG_SGI_XP) += sgi-xp/
diff --git a/drivers/misc/dell-laptop.c b/drivers/misc/dell-laptop.c
new file mode 100644
index 000000000000..4d33a2068b7a
--- /dev/null
+++ b/drivers/misc/dell-laptop.c
@@ -0,0 +1,436 @@
1/*
2 * Driver for Dell laptop extras
3 *
4 * Copyright (c) Red Hat <mjg@redhat.com>
5 *
6 * Based on documentation in the libsmbios package, Copyright (C) 2005 Dell
7 * Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/platform_device.h>
18#include <linux/backlight.h>
19#include <linux/err.h>
20#include <linux/dmi.h>
21#include <linux/io.h>
22#include <linux/rfkill.h>
23#include <linux/power_supply.h>
24#include <linux/acpi.h>
25#include "../firmware/dcdbas.h"
26
27#define BRIGHTNESS_TOKEN 0x7d
28
29/* This structure will be modified by the firmware when we enter
30 * system management mode, hence the volatiles */
31
32struct calling_interface_buffer {
33 u16 class;
34 u16 select;
35 volatile u32 input[4];
36 volatile u32 output[4];
37} __packed;
38
39struct calling_interface_token {
40 u16 tokenID;
41 u16 location;
42 union {
43 u16 value;
44 u16 stringlength;
45 };
46};
47
48struct calling_interface_structure {
49 struct dmi_header header;
50 u16 cmdIOAddress;
51 u8 cmdIOCode;
52 u32 supportedCmds;
53 struct calling_interface_token tokens[];
54} __packed;
55
56static int da_command_address;
57static int da_command_code;
58static int da_num_tokens;
59static struct calling_interface_token *da_tokens;
60
61static struct backlight_device *dell_backlight_device;
62static struct rfkill *wifi_rfkill;
63static struct rfkill *bluetooth_rfkill;
64static struct rfkill *wwan_rfkill;
65
66static const struct dmi_system_id __initdata dell_device_table[] = {
67 {
68 .ident = "Dell laptop",
69 .matches = {
70 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
71 DMI_MATCH(DMI_CHASSIS_TYPE, "8"),
72 },
73 },
74 { }
75};
76
77static void parse_da_table(const struct dmi_header *dm)
78{
79 /* Final token is a terminator, so we don't want to copy it */
80 int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1;
81 struct calling_interface_structure *table =
82 container_of(dm, struct calling_interface_structure, header);
83
84 /* 4 bytes of table header, plus 7 bytes of Dell header, plus at least
85 6 bytes of entry */
86
87 if (dm->length < 17)
88 return;
89
90 da_command_address = table->cmdIOAddress;
91 da_command_code = table->cmdIOCode;
92
93 da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) *
94 sizeof(struct calling_interface_token),
95 GFP_KERNEL);
96
97 if (!da_tokens)
98 return;
99
100 memcpy(da_tokens+da_num_tokens, table->tokens,
101 sizeof(struct calling_interface_token) * tokens);
102
103 da_num_tokens += tokens;
104}
105
106static void find_tokens(const struct dmi_header *dm)
107{
108 switch (dm->type) {
109 case 0xd4: /* Indexed IO */
110 break;
111 case 0xd5: /* Protected Area Type 1 */
112 break;
113 case 0xd6: /* Protected Area Type 2 */
114 break;
115 case 0xda: /* Calling interface */
116 parse_da_table(dm);
117 break;
118 }
119}
120
121static int find_token_location(int tokenid)
122{
123 int i;
124 for (i = 0; i < da_num_tokens; i++) {
125 if (da_tokens[i].tokenID == tokenid)
126 return da_tokens[i].location;
127 }
128
129 return -1;
130}
131
132static struct calling_interface_buffer *
133dell_send_request(struct calling_interface_buffer *buffer, int class,
134 int select)
135{
136 struct smi_cmd command;
137
138 command.magic = SMI_CMD_MAGIC;
139 command.command_address = da_command_address;
140 command.command_code = da_command_code;
141 command.ebx = virt_to_phys(buffer);
142 command.ecx = 0x42534931;
143
144 buffer->class = class;
145 buffer->select = select;
146
147 dcdbas_smi_request(&command);
148
149 return buffer;
150}
151
152/* Derived from information in DellWirelessCtl.cpp:
153 Class 17, select 11 is radio control. It returns an array of 32-bit values.
154
155 result[0]: return code
156 result[1]:
157 Bit 0: Hardware switch supported
158 Bit 1: Wifi locator supported
159 Bit 2: Wifi is supported
160 Bit 3: Bluetooth is supported
161 Bit 4: WWAN is supported
162 Bit 5: Wireless keyboard supported
163 Bits 6-7: Reserved
164 Bit 8: Wifi is installed
165 Bit 9: Bluetooth is installed
166 Bit 10: WWAN is installed
167 Bits 11-15: Reserved
168 Bit 16: Hardware switch is on
169 Bit 17: Wifi is blocked
170 Bit 18: Bluetooth is blocked
171 Bit 19: WWAN is blocked
172 Bits 20-31: Reserved
173 result[2]: NVRAM size in bytes
174 result[3]: NVRAM format version number
175*/
176
177static int dell_rfkill_set(int radio, enum rfkill_state state)
178{
179 struct calling_interface_buffer buffer;
180 int disable = (state == RFKILL_STATE_UNBLOCKED) ? 0 : 1;
181
182 memset(&buffer, 0, sizeof(struct calling_interface_buffer));
183 buffer.input[0] = (1 | (radio<<8) | (disable << 16));
184 dell_send_request(&buffer, 17, 11);
185
186 return 0;
187}
188
189static int dell_wifi_set(void *data, enum rfkill_state state)
190{
191 return dell_rfkill_set(1, state);
192}
193
194static int dell_bluetooth_set(void *data, enum rfkill_state state)
195{
196 return dell_rfkill_set(2, state);
197}
198
199static int dell_wwan_set(void *data, enum rfkill_state state)
200{
201 return dell_rfkill_set(3, state);
202}
203
204static int dell_rfkill_get(int bit, enum rfkill_state *state)
205{
206 struct calling_interface_buffer buffer;
207 int status;
208 int new_state = RFKILL_STATE_HARD_BLOCKED;
209
210 memset(&buffer, 0, sizeof(struct calling_interface_buffer));
211 dell_send_request(&buffer, 17, 11);
212 status = buffer.output[1];
213
214 if (status & (1<<16))
215 new_state = RFKILL_STATE_SOFT_BLOCKED;
216
217 if (status & (1<<bit))
218 *state = new_state;
219 else
220 *state = RFKILL_STATE_UNBLOCKED;
221
222 return 0;
223}
224
225static int dell_wifi_get(void *data, enum rfkill_state *state)
226{
227 return dell_rfkill_get(17, state);
228}
229
230static int dell_bluetooth_get(void *data, enum rfkill_state *state)
231{
232 return dell_rfkill_get(18, state);
233}
234
235static int dell_wwan_get(void *data, enum rfkill_state *state)
236{
237 return dell_rfkill_get(19, state);
238}
239
240static int dell_setup_rfkill(void)
241{
242 struct calling_interface_buffer buffer;
243 int status;
244 int ret;
245
246 memset(&buffer, 0, sizeof(struct calling_interface_buffer));
247 dell_send_request(&buffer, 17, 11);
248 status = buffer.output[1];
249
250 if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
251 wifi_rfkill = rfkill_allocate(NULL, RFKILL_TYPE_WLAN);
252 if (!wifi_rfkill)
253 goto err_wifi;
254 wifi_rfkill->name = "dell-wifi";
255 wifi_rfkill->toggle_radio = dell_wifi_set;
256 wifi_rfkill->get_state = dell_wifi_get;
257 ret = rfkill_register(wifi_rfkill);
258 if (ret)
259 goto err_wifi;
260 }
261
262 if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) {
263 bluetooth_rfkill = rfkill_allocate(NULL, RFKILL_TYPE_BLUETOOTH);
264 if (!bluetooth_rfkill)
265 goto err_bluetooth;
266 bluetooth_rfkill->name = "dell-bluetooth";
267 bluetooth_rfkill->toggle_radio = dell_bluetooth_set;
268 bluetooth_rfkill->get_state = dell_bluetooth_get;
269 ret = rfkill_register(bluetooth_rfkill);
270 if (ret)
271 goto err_bluetooth;
272 }
273
274 if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) {
275 wwan_rfkill = rfkill_allocate(NULL, RFKILL_TYPE_WWAN);
276 if (!wwan_rfkill)
277 goto err_wwan;
278 wwan_rfkill->name = "dell-wwan";
279 wwan_rfkill->toggle_radio = dell_wwan_set;
280 wwan_rfkill->get_state = dell_wwan_get;
281 ret = rfkill_register(wwan_rfkill);
282 if (ret)
283 goto err_wwan;
284 }
285
286 return 0;
287err_wwan:
288 if (wwan_rfkill)
289 rfkill_free(wwan_rfkill);
290 if (bluetooth_rfkill) {
291 rfkill_unregister(bluetooth_rfkill);
292 bluetooth_rfkill = NULL;
293 }
294err_bluetooth:
295 if (bluetooth_rfkill)
296 rfkill_free(bluetooth_rfkill);
297 if (wifi_rfkill) {
298 rfkill_unregister(wifi_rfkill);
299 wifi_rfkill = NULL;
300 }
301err_wifi:
302 if (wifi_rfkill)
303 rfkill_free(wifi_rfkill);
304
305 return ret;
306}
307
308static int dell_send_intensity(struct backlight_device *bd)
309{
310 struct calling_interface_buffer buffer;
311
312 memset(&buffer, 0, sizeof(struct calling_interface_buffer));
313 buffer.input[0] = find_token_location(BRIGHTNESS_TOKEN);
314 buffer.input[1] = bd->props.brightness;
315
316 if (buffer.input[0] == -1)
317 return -ENODEV;
318
319 if (power_supply_is_system_supplied() > 0)
320 dell_send_request(&buffer, 1, 2);
321 else
322 dell_send_request(&buffer, 1, 1);
323
324 return 0;
325}
326
327static int dell_get_intensity(struct backlight_device *bd)
328{
329 struct calling_interface_buffer buffer;
330
331 memset(&buffer, 0, sizeof(struct calling_interface_buffer));
332 buffer.input[0] = find_token_location(BRIGHTNESS_TOKEN);
333
334 if (buffer.input[0] == -1)
335 return -ENODEV;
336
337 if (power_supply_is_system_supplied() > 0)
338 dell_send_request(&buffer, 0, 2);
339 else
340 dell_send_request(&buffer, 0, 1);
341
342 return buffer.output[1];
343}
344
345static struct backlight_ops dell_ops = {
346 .get_brightness = dell_get_intensity,
347 .update_status = dell_send_intensity,
348};
349
350static int __init dell_init(void)
351{
352 struct calling_interface_buffer buffer;
353 int max_intensity = 0;
354 int ret;
355
356 if (!dmi_check_system(dell_device_table))
357 return -ENODEV;
358
359 dmi_walk(find_tokens);
360
361 if (!da_tokens) {
362 printk(KERN_INFO "dell-laptop: Unable to find dmi tokens\n");
363 return -ENODEV;
364 }
365
366 ret = dell_setup_rfkill();
367
368 if (ret) {
369 printk(KERN_WARNING "dell-laptop: Unable to setup rfkill\n");
370 goto out;
371 }
372
373#ifdef CONFIG_ACPI
374 /* In the event of an ACPI backlight being available, don't
375 * register the platform controller.
376 */
377 if (acpi_video_backlight_support())
378 return 0;
379#endif
380
381 memset(&buffer, 0, sizeof(struct calling_interface_buffer));
382 buffer.input[0] = find_token_location(BRIGHTNESS_TOKEN);
383
384 if (buffer.input[0] != -1) {
385 dell_send_request(&buffer, 0, 2);
386 max_intensity = buffer.output[3];
387 }
388
389 if (max_intensity) {
390 dell_backlight_device = backlight_device_register(
391 "dell_backlight",
392 NULL, NULL,
393 &dell_ops);
394
395 if (IS_ERR(dell_backlight_device)) {
396 ret = PTR_ERR(dell_backlight_device);
397 dell_backlight_device = NULL;
398 goto out;
399 }
400
401 dell_backlight_device->props.max_brightness = max_intensity;
402 dell_backlight_device->props.brightness =
403 dell_get_intensity(dell_backlight_device);
404 backlight_update_status(dell_backlight_device);
405 }
406
407 return 0;
408out:
409 if (wifi_rfkill)
410 rfkill_unregister(wifi_rfkill);
411 if (bluetooth_rfkill)
412 rfkill_unregister(bluetooth_rfkill);
413 if (wwan_rfkill)
414 rfkill_unregister(wwan_rfkill);
415 kfree(da_tokens);
416 return ret;
417}
418
419static void __exit dell_exit(void)
420{
421 backlight_device_unregister(dell_backlight_device);
422 if (wifi_rfkill)
423 rfkill_unregister(wifi_rfkill);
424 if (bluetooth_rfkill)
425 rfkill_unregister(bluetooth_rfkill);
426 if (wwan_rfkill)
427 rfkill_unregister(wwan_rfkill);
428}
429
430module_init(dell_init);
431module_exit(dell_exit);
432
433MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
434MODULE_DESCRIPTION("Dell laptop driver");
435MODULE_LICENSE("GPL");
436MODULE_ALIAS("dmi:*svnDellInc.:*:ct8:*");
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 0736cff9d97a..3cf61ece71d7 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -119,7 +119,7 @@ enclosure_register(struct device *dev, const char *name, int components,
119 edev->edev.class = &enclosure_class; 119 edev->edev.class = &enclosure_class;
120 edev->edev.parent = get_device(dev); 120 edev->edev.parent = get_device(dev);
121 edev->cb = cb; 121 edev->cb = cb;
122 snprintf(edev->edev.bus_id, BUS_ID_SIZE, "%s", name); 122 dev_set_name(&edev->edev, name);
123 err = device_register(&edev->edev); 123 err = device_register(&edev->edev);
124 if (err) 124 if (err)
125 goto err; 125 goto err;
@@ -170,7 +170,7 @@ EXPORT_SYMBOL_GPL(enclosure_unregister);
170static void enclosure_link_name(struct enclosure_component *cdev, char *name) 170static void enclosure_link_name(struct enclosure_component *cdev, char *name)
171{ 171{
172 strcpy(name, "enclosure_device:"); 172 strcpy(name, "enclosure_device:");
173 strcat(name, cdev->cdev.bus_id); 173 strcat(name, dev_name(&cdev->cdev));
174} 174}
175 175
176static void enclosure_remove_links(struct enclosure_component *cdev) 176static void enclosure_remove_links(struct enclosure_component *cdev)
@@ -256,9 +256,9 @@ enclosure_component_register(struct enclosure_device *edev,
256 cdev = &ecomp->cdev; 256 cdev = &ecomp->cdev;
257 cdev->parent = get_device(&edev->edev); 257 cdev->parent = get_device(&edev->edev);
258 if (name) 258 if (name)
259 snprintf(cdev->bus_id, BUS_ID_SIZE, "%s", name); 259 dev_set_name(cdev, name);
260 else 260 else
261 snprintf(cdev->bus_id, BUS_ID_SIZE, "%u", number); 261 dev_set_name(cdev, "%u", number);
262 262
263 cdev->release = enclosure_component_release; 263 cdev->release = enclosure_component_release;
264 cdev->groups = enclosure_groups; 264 cdev->groups = enclosure_groups;
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 81152b3e360c..7957f525b2f4 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -95,11 +95,6 @@ struct xpnet_pending_msg {
95 atomic_t use_count; 95 atomic_t use_count;
96}; 96};
97 97
98/* driver specific structure pointed to by the device structure */
99struct xpnet_dev_private {
100 struct net_device_stats stats;
101};
102
103struct net_device *xpnet_device; 98struct net_device *xpnet_device;
104 99
105/* 100/*
@@ -153,7 +148,6 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg)
153 struct sk_buff *skb; 148 struct sk_buff *skb;
154 void *dst; 149 void *dst;
155 enum xp_retval ret; 150 enum xp_retval ret;
156 struct xpnet_dev_private *priv = netdev_priv(xpnet_device);
157 151
158 if (!XPNET_VALID_MSG(msg)) { 152 if (!XPNET_VALID_MSG(msg)) {
159 /* 153 /*
@@ -161,7 +155,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg)
161 */ 155 */
162 xpc_received(partid, channel, (void *)msg); 156 xpc_received(partid, channel, (void *)msg);
163 157
164 priv->stats.rx_errors++; 158 xpnet_device->stats.rx_errors++;
165 159
166 return; 160 return;
167 } 161 }
@@ -176,7 +170,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg)
176 170
177 xpc_received(partid, channel, (void *)msg); 171 xpc_received(partid, channel, (void *)msg);
178 172
179 priv->stats.rx_errors++; 173 xpnet_device->stats.rx_errors++;
180 174
181 return; 175 return;
182 } 176 }
@@ -226,7 +220,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg)
226 220
227 xpc_received(partid, channel, (void *)msg); 221 xpc_received(partid, channel, (void *)msg);
228 222
229 priv->stats.rx_errors++; 223 xpnet_device->stats.rx_errors++;
230 224
231 return; 225 return;
232 } 226 }
@@ -247,8 +241,8 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg)
247 skb_end_pointer(skb), skb->len); 241 skb_end_pointer(skb), skb->len);
248 242
249 xpnet_device->last_rx = jiffies; 243 xpnet_device->last_rx = jiffies;
250 priv->stats.rx_packets++; 244 xpnet_device->stats.rx_packets++;
251 priv->stats.rx_bytes += skb->len + ETH_HLEN; 245 xpnet_device->stats.rx_bytes += skb->len + ETH_HLEN;
252 246
253 netif_rx_ni(skb); 247 netif_rx_ni(skb);
254 xpc_received(partid, channel, (void *)msg); 248 xpc_received(partid, channel, (void *)msg);
@@ -353,26 +347,6 @@ xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
353} 347}
354 348
355/* 349/*
356 * Required for the net_device structure.
357 */
358static int
359xpnet_dev_set_config(struct net_device *dev, struct ifmap *new_map)
360{
361 return 0;
362}
363
364/*
365 * Return statistics to the caller.
366 */
367static struct net_device_stats *
368xpnet_dev_get_stats(struct net_device *dev)
369{
370 struct xpnet_dev_private *priv = netdev_priv(dev);
371
372 return &priv->stats;
373}
374
375/*
376 * Notification that the other end has received the message and 350 * Notification that the other end has received the message and
377 * DMA'd the skb information. At this point, they are done with 351 * DMA'd the skb information. At this point, they are done with
378 * our side. When all recipients are done processing, we 352 * our side. When all recipients are done processing, we
@@ -453,7 +427,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
453 struct xpnet_pending_msg *queued_msg; 427 struct xpnet_pending_msg *queued_msg;
454 u64 start_addr, end_addr; 428 u64 start_addr, end_addr;
455 short dest_partid; 429 short dest_partid;
456 struct xpnet_dev_private *priv = netdev_priv(dev);
457 u16 embedded_bytes = 0; 430 u16 embedded_bytes = 0;
458 431
459 dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p " 432 dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
@@ -476,7 +449,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
476 dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping " 449 dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping "
477 "packet\n", sizeof(struct xpnet_pending_msg)); 450 "packet\n", sizeof(struct xpnet_pending_msg));
478 451
479 priv->stats.tx_errors++; 452 dev->stats.tx_errors++;
480 return -ENOMEM; 453 return -ENOMEM;
481 } 454 }
482 455
@@ -526,8 +499,8 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
526 kfree(queued_msg); 499 kfree(queued_msg);
527 } 500 }
528 501
529 priv->stats.tx_packets++; 502 dev->stats.tx_packets++;
530 priv->stats.tx_bytes += skb->len; 503 dev->stats.tx_bytes += skb->len;
531 504
532 return 0; 505 return 0;
533} 506}
@@ -538,12 +511,19 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
538static void 511static void
539xpnet_dev_tx_timeout(struct net_device *dev) 512xpnet_dev_tx_timeout(struct net_device *dev)
540{ 513{
541 struct xpnet_dev_private *priv = netdev_priv(dev); 514 dev->stats.tx_errors++;
542
543 priv->stats.tx_errors++;
544 return;
545} 515}
546 516
517static const struct net_device_ops xpnet_netdev_ops = {
518 .ndo_open = xpnet_dev_open,
519 .ndo_stop = xpnet_dev_stop,
520 .ndo_start_xmit = xpnet_dev_hard_start_xmit,
521 .ndo_change_mtu = xpnet_dev_change_mtu,
522 .ndo_tx_timeout = xpnet_dev_tx_timeout,
523 .ndo_set_mac_address = eth_mac_addr,
524 .ndo_validate_addr = eth_validate_addr,
525};
526
547static int __init 527static int __init
548xpnet_init(void) 528xpnet_init(void)
549{ 529{
@@ -563,8 +543,7 @@ xpnet_init(void)
563 * use ether_setup() to init the majority of our device 543 * use ether_setup() to init the majority of our device
564 * structure and then override the necessary pieces. 544 * structure and then override the necessary pieces.
565 */ 545 */
566 xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private), 546 xpnet_device = alloc_netdev(0, XPNET_DEVICE_NAME, ether_setup);
567 XPNET_DEVICE_NAME, ether_setup);
568 if (xpnet_device == NULL) { 547 if (xpnet_device == NULL) {
569 kfree(xpnet_broadcast_partitions); 548 kfree(xpnet_broadcast_partitions);
570 return -ENOMEM; 549 return -ENOMEM;
@@ -573,13 +552,6 @@ xpnet_init(void)
573 netif_carrier_off(xpnet_device); 552 netif_carrier_off(xpnet_device);
574 553
575 xpnet_device->mtu = XPNET_DEF_MTU; 554 xpnet_device->mtu = XPNET_DEF_MTU;
576 xpnet_device->change_mtu = xpnet_dev_change_mtu;
577 xpnet_device->open = xpnet_dev_open;
578 xpnet_device->get_stats = xpnet_dev_get_stats;
579 xpnet_device->stop = xpnet_dev_stop;
580 xpnet_device->hard_start_xmit = xpnet_dev_hard_start_xmit;
581 xpnet_device->tx_timeout = xpnet_dev_tx_timeout;
582 xpnet_device->set_config = xpnet_dev_set_config;
583 555
584 /* 556 /*
585 * Multicast assumes the LSB of the first octet is set for multicast 557 * Multicast assumes the LSB of the first octet is set for multicast
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 1e97916914ad..76bfe16c09b1 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -55,7 +55,6 @@ enum atmel_mci_state {
55 55
56struct atmel_mci_dma { 56struct atmel_mci_dma {
57#ifdef CONFIG_MMC_ATMELMCI_DMA 57#ifdef CONFIG_MMC_ATMELMCI_DMA
58 struct dma_client client;
59 struct dma_chan *chan; 58 struct dma_chan *chan;
60 struct dma_async_tx_descriptor *data_desc; 59 struct dma_async_tx_descriptor *data_desc;
61#endif 60#endif
@@ -593,10 +592,8 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
593 592
594 /* If we don't have a channel, we can't do DMA */ 593 /* If we don't have a channel, we can't do DMA */
595 chan = host->dma.chan; 594 chan = host->dma.chan;
596 if (chan) { 595 if (chan)
597 dma_chan_get(chan);
598 host->data_chan = chan; 596 host->data_chan = chan;
599 }
600 597
601 if (!chan) 598 if (!chan)
602 return -ENODEV; 599 return -ENODEV;
@@ -1443,60 +1440,6 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
1443 return IRQ_HANDLED; 1440 return IRQ_HANDLED;
1444} 1441}
1445 1442
1446#ifdef CONFIG_MMC_ATMELMCI_DMA
1447
1448static inline struct atmel_mci *
1449dma_client_to_atmel_mci(struct dma_client *client)
1450{
1451 return container_of(client, struct atmel_mci, dma.client);
1452}
1453
1454static enum dma_state_client atmci_dma_event(struct dma_client *client,
1455 struct dma_chan *chan, enum dma_state state)
1456{
1457 struct atmel_mci *host;
1458 enum dma_state_client ret = DMA_NAK;
1459
1460 host = dma_client_to_atmel_mci(client);
1461
1462 switch (state) {
1463 case DMA_RESOURCE_AVAILABLE:
1464 spin_lock_bh(&host->lock);
1465 if (!host->dma.chan) {
1466 host->dma.chan = chan;
1467 ret = DMA_ACK;
1468 }
1469 spin_unlock_bh(&host->lock);
1470
1471 if (ret == DMA_ACK)
1472 dev_info(&host->pdev->dev,
1473 "Using %s for DMA transfers\n",
1474 chan->dev.bus_id);
1475 break;
1476
1477 case DMA_RESOURCE_REMOVED:
1478 spin_lock_bh(&host->lock);
1479 if (host->dma.chan == chan) {
1480 host->dma.chan = NULL;
1481 ret = DMA_ACK;
1482 }
1483 spin_unlock_bh(&host->lock);
1484
1485 if (ret == DMA_ACK)
1486 dev_info(&host->pdev->dev,
1487 "Lost %s, falling back to PIO\n",
1488 chan->dev.bus_id);
1489 break;
1490
1491 default:
1492 break;
1493 }
1494
1495
1496 return ret;
1497}
1498#endif /* CONFIG_MMC_ATMELMCI_DMA */
1499
1500static int __init atmci_init_slot(struct atmel_mci *host, 1443static int __init atmci_init_slot(struct atmel_mci *host,
1501 struct mci_slot_pdata *slot_data, unsigned int id, 1444 struct mci_slot_pdata *slot_data, unsigned int id,
1502 u32 sdc_reg) 1445 u32 sdc_reg)
@@ -1600,6 +1543,18 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
1600 mmc_free_host(slot->mmc); 1543 mmc_free_host(slot->mmc);
1601} 1544}
1602 1545
1546#ifdef CONFIG_MMC_ATMELMCI_DMA
1547static bool filter(struct dma_chan *chan, void *slave)
1548{
1549 struct dw_dma_slave *dws = slave;
1550
1551 if (dws->dma_dev == chan->device->dev)
1552 return true;
1553 else
1554 return false;
1555}
1556#endif
1557
1603static int __init atmci_probe(struct platform_device *pdev) 1558static int __init atmci_probe(struct platform_device *pdev)
1604{ 1559{
1605 struct mci_platform_data *pdata; 1560 struct mci_platform_data *pdata;
@@ -1652,22 +1607,20 @@ static int __init atmci_probe(struct platform_device *pdev)
1652 goto err_request_irq; 1607 goto err_request_irq;
1653 1608
1654#ifdef CONFIG_MMC_ATMELMCI_DMA 1609#ifdef CONFIG_MMC_ATMELMCI_DMA
1655 if (pdata->dma_slave) { 1610 if (pdata->dma_slave.dma_dev) {
1656 struct dma_slave *slave = pdata->dma_slave; 1611 struct dw_dma_slave *dws = &pdata->dma_slave;
1612 dma_cap_mask_t mask;
1657 1613
1658 slave->tx_reg = regs->start + MCI_TDR; 1614 dws->tx_reg = regs->start + MCI_TDR;
1659 slave->rx_reg = regs->start + MCI_RDR; 1615 dws->rx_reg = regs->start + MCI_RDR;
1660 1616
1661 /* Try to grab a DMA channel */ 1617 /* Try to grab a DMA channel */
1662 host->dma.client.event_callback = atmci_dma_event; 1618 dma_cap_zero(mask);
1663 dma_cap_set(DMA_SLAVE, host->dma.client.cap_mask); 1619 dma_cap_set(DMA_SLAVE, mask);
1664 host->dma.client.slave = slave; 1620 host->dma.chan = dma_request_channel(mask, filter, dws);
1665
1666 dma_async_client_register(&host->dma.client);
1667 dma_async_client_chan_request(&host->dma.client);
1668 } else {
1669 dev_notice(&pdev->dev, "DMA not available, using PIO\n");
1670 } 1621 }
1622 if (!host->dma.chan)
1623 dev_notice(&pdev->dev, "DMA not available, using PIO\n");
1671#endif /* CONFIG_MMC_ATMELMCI_DMA */ 1624#endif /* CONFIG_MMC_ATMELMCI_DMA */
1672 1625
1673 platform_set_drvdata(pdev, host); 1626 platform_set_drvdata(pdev, host);
@@ -1699,8 +1652,8 @@ static int __init atmci_probe(struct platform_device *pdev)
1699 1652
1700err_init_slot: 1653err_init_slot:
1701#ifdef CONFIG_MMC_ATMELMCI_DMA 1654#ifdef CONFIG_MMC_ATMELMCI_DMA
1702 if (pdata->dma_slave) 1655 if (host->dma.chan)
1703 dma_async_client_unregister(&host->dma.client); 1656 dma_release_channel(host->dma.chan);
1704#endif 1657#endif
1705 free_irq(irq, host); 1658 free_irq(irq, host);
1706err_request_irq: 1659err_request_irq:
@@ -1731,8 +1684,8 @@ static int __exit atmci_remove(struct platform_device *pdev)
1731 clk_disable(host->mck); 1684 clk_disable(host->mck);
1732 1685
1733#ifdef CONFIG_MMC_ATMELMCI_DMA 1686#ifdef CONFIG_MMC_ATMELMCI_DMA
1734 if (host->dma.client.slave) 1687 if (host->dma.chan)
1735 dma_async_client_unregister(&host->dma.client); 1688 dma_release_channel(host->dma.chan);
1736#endif 1689#endif
1737 1690
1738 free_irq(platform_get_irq(pdev, 0), host); 1691 free_irq(platform_get_irq(pdev, 0), host);
@@ -1761,7 +1714,7 @@ static void __exit atmci_exit(void)
1761 platform_driver_unregister(&atmci_driver); 1714 platform_driver_unregister(&atmci_driver);
1762} 1715}
1763 1716
1764module_init(atmci_init); 1717late_initcall(atmci_init); /* try to load after dma driver when built-in */
1765module_exit(atmci_exit); 1718module_exit(atmci_exit);
1766 1719
1767MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver"); 1720MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index a90d50c2c3e5..7d04fb9ddcaa 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -45,6 +45,14 @@ config MTD_PARTITIONS
45 devices. Partitioning on NFTL 'devices' is a different - that's the 45 devices. Partitioning on NFTL 'devices' is a different - that's the
46 'normal' form of partitioning used on a block device. 46 'normal' form of partitioning used on a block device.
47 47
48config MTD_TESTS
49 tristate "MTD tests support"
50 depends on m
51 help
52 This option includes various MTD tests into compilation. The tests
53 should normally be compiled as kernel modules. The modules perform
54 various checks and verifications when loaded.
55
48config MTD_REDBOOT_PARTS 56config MTD_REDBOOT_PARTS
49 tristate "RedBoot partition table parsing" 57 tristate "RedBoot partition table parsing"
50 depends on MTD_PARTITIONS 58 depends on MTD_PARTITIONS
@@ -316,6 +324,8 @@ source "drivers/mtd/nand/Kconfig"
316 324
317source "drivers/mtd/onenand/Kconfig" 325source "drivers/mtd/onenand/Kconfig"
318 326
327source "drivers/mtd/lpddr/Kconfig"
328
319source "drivers/mtd/ubi/Kconfig" 329source "drivers/mtd/ubi/Kconfig"
320 330
321endif # MTD 331endif # MTD
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 4b77335715f0..4521b1ecce45 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -29,6 +29,6 @@ obj-$(CONFIG_MTD_OOPS) += mtdoops.o
29nftl-objs := nftlcore.o nftlmount.o 29nftl-objs := nftlcore.o nftlmount.o
30inftl-objs := inftlcore.o inftlmount.o 30inftl-objs := inftlcore.o inftlmount.o
31 31
32obj-y += chips/ maps/ devices/ nand/ onenand/ 32obj-y += chips/ lpddr/ maps/ devices/ nand/ onenand/ tests/
33 33
34obj-$(CONFIG_MTD_UBI) += ubi/ 34obj-$(CONFIG_MTD_UBI) += ubi/
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index c93a8be5d5f1..f5ab6fa1057b 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -58,8 +58,8 @@ static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t
58static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *); 58static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
59static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *); 59static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
60static void cfi_intelext_sync (struct mtd_info *); 60static void cfi_intelext_sync (struct mtd_info *);
61static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len); 61static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
62static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len); 62static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
63#ifdef CONFIG_MTD_OTP 63#ifdef CONFIG_MTD_OTP
64static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 64static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 65static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
@@ -558,8 +558,8 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
558 } 558 }
559 559
560 for (i=0; i<mtd->numeraseregions;i++){ 560 for (i=0; i<mtd->numeraseregions;i++){
561 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n", 561 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
562 i,mtd->eraseregions[i].offset, 562 i,(unsigned long long)mtd->eraseregions[i].offset,
563 mtd->eraseregions[i].erasesize, 563 mtd->eraseregions[i].erasesize,
564 mtd->eraseregions[i].numblocks); 564 mtd->eraseregions[i].numblocks);
565 } 565 }
@@ -2058,7 +2058,7 @@ out: put_chip(map, chip, adr);
2058 return ret; 2058 return ret;
2059} 2059}
2060 2060
2061static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 2061static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2062{ 2062{
2063 int ret; 2063 int ret;
2064 2064
@@ -2082,7 +2082,7 @@ static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
2082 return ret; 2082 return ret;
2083} 2083}
2084 2084
2085static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 2085static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2086{ 2086{
2087 int ret; 2087 int ret;
2088 2088
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index d74ec46aa032..94bb61e19047 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -71,8 +71,8 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
71static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 71static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
72#include "fwh_lock.h" 72#include "fwh_lock.h"
73 73
74static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len); 74static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
75static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len); 75static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
76 76
77static struct mtd_chip_driver cfi_amdstd_chipdrv = { 77static struct mtd_chip_driver cfi_amdstd_chipdrv = {
78 .probe = NULL, /* Not usable directly */ 78 .probe = NULL, /* Not usable directly */
@@ -322,6 +322,14 @@ static struct cfi_fixup fixup_table[] = {
322}; 322};
323 323
324 324
325static void cfi_fixup_major_minor(struct cfi_private *cfi,
326 struct cfi_pri_amdstd *extp)
327{
328 if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e &&
329 extp->MajorVersion == '0')
330 extp->MajorVersion = '1';
331}
332
325struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 333struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
326{ 334{
327 struct cfi_private *cfi = map->fldrv_priv; 335 struct cfi_private *cfi = map->fldrv_priv;
@@ -363,6 +371,8 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
363 return NULL; 371 return NULL;
364 } 372 }
365 373
374 cfi_fixup_major_minor(cfi, extp);
375
366 if (extp->MajorVersion != '1' || 376 if (extp->MajorVersion != '1' ||
367 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) { 377 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
368 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 378 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
@@ -1774,12 +1784,12 @@ out_unlock:
1774 return ret; 1784 return ret;
1775} 1785}
1776 1786
1777static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 1787static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1778{ 1788{
1779 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 1789 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1780} 1790}
1781 1791
1782static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 1792static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1783{ 1793{
1784 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 1794 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1785} 1795}
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index d4714dd9f7ab..6c740f346f91 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -42,8 +42,8 @@ static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
42 unsigned long count, loff_t to, size_t *retlen); 42 unsigned long count, loff_t to, size_t *retlen);
43static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *); 43static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
44static void cfi_staa_sync (struct mtd_info *); 44static void cfi_staa_sync (struct mtd_info *);
45static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len); 45static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
46static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len); 46static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
47static int cfi_staa_suspend (struct mtd_info *); 47static int cfi_staa_suspend (struct mtd_info *);
48static void cfi_staa_resume (struct mtd_info *); 48static void cfi_staa_resume (struct mtd_info *);
49 49
@@ -221,8 +221,8 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
221 } 221 }
222 222
223 for (i=0; i<mtd->numeraseregions;i++){ 223 for (i=0; i<mtd->numeraseregions;i++){
224 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n", 224 printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
225 i,mtd->eraseregions[i].offset, 225 i, (unsigned long long)mtd->eraseregions[i].offset,
226 mtd->eraseregions[i].erasesize, 226 mtd->eraseregions[i].erasesize,
227 mtd->eraseregions[i].numblocks); 227 mtd->eraseregions[i].numblocks);
228 } 228 }
@@ -964,7 +964,7 @@ static int cfi_staa_erase_varsize(struct mtd_info *mtd,
964 adr += regions[i].erasesize; 964 adr += regions[i].erasesize;
965 len -= regions[i].erasesize; 965 len -= regions[i].erasesize;
966 966
967 if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift))) 967 if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
968 i++; 968 i++;
969 969
970 if (adr >> cfi->chipshift) { 970 if (adr >> cfi->chipshift) {
@@ -1135,7 +1135,7 @@ retry:
1135 spin_unlock_bh(chip->mutex); 1135 spin_unlock_bh(chip->mutex);
1136 return 0; 1136 return 0;
1137} 1137}
1138static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 1138static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1139{ 1139{
1140 struct map_info *map = mtd->priv; 1140 struct map_info *map = mtd->priv;
1141 struct cfi_private *cfi = map->fldrv_priv; 1141 struct cfi_private *cfi = map->fldrv_priv;
@@ -1284,7 +1284,7 @@ retry:
1284 spin_unlock_bh(chip->mutex); 1284 spin_unlock_bh(chip->mutex);
1285 return 0; 1285 return 0;
1286} 1286}
1287static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 1287static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1288{ 1288{
1289 struct map_info *map = mtd->priv; 1289 struct map_info *map = mtd->priv;
1290 struct cfi_private *cfi = map->fldrv_priv; 1290 struct cfi_private *cfi = map->fldrv_priv;
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index ab44f2b996f8..57e0e4e921f9 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -77,7 +77,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
77} 77}
78 78
79 79
80static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len) 80static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len)
81{ 81{
82 int ret; 82 int ret;
83 83
@@ -88,7 +88,7 @@ static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len)
88} 88}
89 89
90 90
91static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len) 91static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len)
92{ 92{
93 int ret; 93 int ret;
94 94
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 6fde0a2e3567..bc33200535fc 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -120,6 +120,13 @@ config MTD_PHRAM
120 doesn't have access to, memory beyond the mem=xxx limit, nvram, 120 doesn't have access to, memory beyond the mem=xxx limit, nvram,
121 memory on the video card, etc... 121 memory on the video card, etc...
122 122
123config MTD_PS3VRAM
124 tristate "PS3 video RAM"
125 depends on FB_PS3
126 help
127 This driver allows you to use excess PS3 video RAM as volatile
128 storage or system swap.
129
123config MTD_LART 130config MTD_LART
124 tristate "28F160xx flash driver for LART" 131 tristate "28F160xx flash driver for LART"
125 depends on SA1100_LART 132 depends on SA1100_LART
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 0993d5cf3923..e51521df4e40 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -16,3 +16,4 @@ obj-$(CONFIG_MTD_LART) += lart.o
16obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o 16obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
17obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o 17obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
18obj-$(CONFIG_MTD_M25P80) += m25p80.o 18obj-$(CONFIG_MTD_M25P80) += m25p80.o
19obj-$(CONFIG_MTD_PS3VRAM) += ps3vram.o
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index f4bda4cee495..578de1c67bfe 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -619,7 +619,7 @@ static struct mtd_partition lart_partitions[] = {
619}; 619};
620#endif 620#endif
621 621
622int __init lart_flash_init (void) 622static int __init lart_flash_init (void)
623{ 623{
624 int result; 624 int result;
625 memset (&mtd,0,sizeof (mtd)); 625 memset (&mtd,0,sizeof (mtd));
@@ -690,7 +690,7 @@ int __init lart_flash_init (void)
690 return (result); 690 return (result);
691} 691}
692 692
693void __exit lart_flash_exit (void) 693static void __exit lart_flash_exit (void)
694{ 694{
695#ifndef HAVE_PARTITIONS 695#ifndef HAVE_PARTITIONS
696 del_mtd_device (&mtd); 696 del_mtd_device (&mtd);
@@ -705,5 +705,3 @@ module_exit (lart_flash_exit);
705MODULE_LICENSE("GPL"); 705MODULE_LICENSE("GPL");
706MODULE_AUTHOR("Abraham vd Merwe <abraham@2d3d.co.za>"); 706MODULE_AUTHOR("Abraham vd Merwe <abraham@2d3d.co.za>");
707MODULE_DESCRIPTION("MTD driver for Intel 28F160F3 on LART board"); 707MODULE_DESCRIPTION("MTD driver for Intel 28F160F3 on LART board");
708
709
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 5733f0643843..7c3fc766dcf1 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -20,6 +20,7 @@
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23#include <linux/math64.h>
23 24
24#include <linux/mtd/mtd.h> 25#include <linux/mtd/mtd.h>
25#include <linux/mtd/partitions.h> 26#include <linux/mtd/partitions.h>
@@ -169,9 +170,9 @@ static int wait_till_ready(struct m25p *flash)
169 */ 170 */
170static int erase_chip(struct m25p *flash) 171static int erase_chip(struct m25p *flash)
171{ 172{
172 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n", 173 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %lldKiB\n",
173 dev_name(&flash->spi->dev), __func__, 174 dev_name(&flash->spi->dev), __func__,
174 flash->mtd.size / 1024); 175 (long long)(flash->mtd.size >> 10));
175 176
176 /* Wait until finished previous write command. */ 177 /* Wait until finished previous write command. */
177 if (wait_till_ready(flash)) 178 if (wait_till_ready(flash))
@@ -232,18 +233,18 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
232{ 233{
233 struct m25p *flash = mtd_to_m25p(mtd); 234 struct m25p *flash = mtd_to_m25p(mtd);
234 u32 addr,len; 235 u32 addr,len;
236 uint32_t rem;
235 237
236 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %d\n", 238 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%llx, len %lld\n",
237 dev_name(&flash->spi->dev), __func__, "at", 239 dev_name(&flash->spi->dev), __func__, "at",
238 (u32)instr->addr, instr->len); 240 (long long)instr->addr, (long long)instr->len);
239 241
240 /* sanity checks */ 242 /* sanity checks */
241 if (instr->addr + instr->len > flash->mtd.size) 243 if (instr->addr + instr->len > flash->mtd.size)
242 return -EINVAL; 244 return -EINVAL;
243 if ((instr->addr % mtd->erasesize) != 0 245 div_u64_rem(instr->len, mtd->erasesize, &rem);
244 || (instr->len % mtd->erasesize) != 0) { 246 if (rem)
245 return -EINVAL; 247 return -EINVAL;
246 }
247 248
248 addr = instr->addr; 249 addr = instr->addr;
249 len = instr->len; 250 len = instr->len;
@@ -677,24 +678,24 @@ static int __devinit m25p_probe(struct spi_device *spi)
677 flash->mtd.erasesize = info->sector_size; 678 flash->mtd.erasesize = info->sector_size;
678 } 679 }
679 680
680 dev_info(&spi->dev, "%s (%d Kbytes)\n", info->name, 681 dev_info(&spi->dev, "%s (%lld Kbytes)\n", info->name,
681 flash->mtd.size / 1024); 682 (long long)flash->mtd.size >> 10);
682 683
683 DEBUG(MTD_DEBUG_LEVEL2, 684 DEBUG(MTD_DEBUG_LEVEL2,
684 "mtd .name = %s, .size = 0x%.8x (%uMiB) " 685 "mtd .name = %s, .size = 0x%llx (%lldMiB) "
685 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", 686 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
686 flash->mtd.name, 687 flash->mtd.name,
687 flash->mtd.size, flash->mtd.size / (1024*1024), 688 (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
688 flash->mtd.erasesize, flash->mtd.erasesize / 1024, 689 flash->mtd.erasesize, flash->mtd.erasesize / 1024,
689 flash->mtd.numeraseregions); 690 flash->mtd.numeraseregions);
690 691
691 if (flash->mtd.numeraseregions) 692 if (flash->mtd.numeraseregions)
692 for (i = 0; i < flash->mtd.numeraseregions; i++) 693 for (i = 0; i < flash->mtd.numeraseregions; i++)
693 DEBUG(MTD_DEBUG_LEVEL2, 694 DEBUG(MTD_DEBUG_LEVEL2,
694 "mtd.eraseregions[%d] = { .offset = 0x%.8x, " 695 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
695 ".erasesize = 0x%.8x (%uKiB), " 696 ".erasesize = 0x%.8x (%uKiB), "
696 ".numblocks = %d }\n", 697 ".numblocks = %d }\n",
697 i, flash->mtd.eraseregions[i].offset, 698 i, (long long)flash->mtd.eraseregions[i].offset,
698 flash->mtd.eraseregions[i].erasesize, 699 flash->mtd.eraseregions[i].erasesize,
699 flash->mtd.eraseregions[i].erasesize / 1024, 700 flash->mtd.eraseregions[i].erasesize / 1024,
700 flash->mtd.eraseregions[i].numblocks); 701 flash->mtd.eraseregions[i].numblocks);
@@ -722,12 +723,12 @@ static int __devinit m25p_probe(struct spi_device *spi)
722 if (nr_parts > 0) { 723 if (nr_parts > 0) {
723 for (i = 0; i < nr_parts; i++) { 724 for (i = 0; i < nr_parts; i++) {
724 DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = " 725 DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
725 "{.name = %s, .offset = 0x%.8x, " 726 "{.name = %s, .offset = 0x%llx, "
726 ".size = 0x%.8x (%uKiB) }\n", 727 ".size = 0x%llx (%lldKiB) }\n",
727 i, parts[i].name, 728 i, parts[i].name,
728 parts[i].offset, 729 (long long)parts[i].offset,
729 parts[i].size, 730 (long long)parts[i].size,
730 parts[i].size / 1024); 731 (long long)(parts[i].size >> 10));
731 } 732 }
732 flash->partitioned = 1; 733 flash->partitioned = 1;
733 return add_mtd_partitions(&flash->mtd, parts, nr_parts); 734 return add_mtd_partitions(&flash->mtd, parts, nr_parts);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 65126cd668ff..d44f741ae229 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -16,6 +16,7 @@
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/math64.h>
19 20
20#include <linux/spi/spi.h> 21#include <linux/spi/spi.h>
21#include <linux/spi/flash.h> 22#include <linux/spi/flash.h>
@@ -152,15 +153,20 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
152 struct spi_message msg; 153 struct spi_message msg;
153 unsigned blocksize = priv->page_size << 3; 154 unsigned blocksize = priv->page_size << 3;
154 uint8_t *command; 155 uint8_t *command;
156 uint32_t rem;
155 157
156 DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%x len 0x%x\n", 158 DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%llx len 0x%llx\n",
157 dev_name(&spi->dev), 159 dev_name(&spi->dev), (long long)instr->addr,
158 instr->addr, instr->len); 160 (long long)instr->len);
159 161
160 /* Sanity checks */ 162 /* Sanity checks */
161 if ((instr->addr + instr->len) > mtd->size 163 if (instr->addr + instr->len > mtd->size)
162 || (instr->len % priv->page_size) != 0 164 return -EINVAL;
163 || (instr->addr % priv->page_size) != 0) 165 div_u64_rem(instr->len, priv->page_size, &rem);
166 if (rem)
167 return -EINVAL;
168 div_u64_rem(instr->addr, priv->page_size, &rem);
169 if (rem)
164 return -EINVAL; 170 return -EINVAL;
165 171
166 spi_message_init(&msg); 172 spi_message_init(&msg);
@@ -178,7 +184,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
178 /* Calculate flash page address; use block erase (for speed) if 184 /* Calculate flash page address; use block erase (for speed) if
179 * we're at a block boundary and need to erase the whole block. 185 * we're at a block boundary and need to erase the whole block.
180 */ 186 */
181 pageaddr = instr->addr / priv->page_size; 187 pageaddr = div_u64(instr->len, priv->page_size);
182 do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize; 188 do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize;
183 pageaddr = pageaddr << priv->page_offset; 189 pageaddr = pageaddr << priv->page_offset;
184 190
@@ -667,8 +673,8 @@ add_dataflash_otp(struct spi_device *spi, char *name,
667 if (revision >= 'c') 673 if (revision >= 'c')
668 otp_tag = otp_setup(device, revision); 674 otp_tag = otp_setup(device, revision);
669 675
670 dev_info(&spi->dev, "%s (%d KBytes) pagesize %d bytes%s\n", 676 dev_info(&spi->dev, "%s (%lld KBytes) pagesize %d bytes%s\n",
671 name, DIV_ROUND_UP(device->size, 1024), 677 name, (long long)((device->size + 1023) >> 10),
672 pagesize, otp_tag); 678 pagesize, otp_tag);
673 dev_set_drvdata(&spi->dev, priv); 679 dev_set_drvdata(&spi->dev, priv);
674 680
diff --git a/drivers/mtd/devices/ps3vram.c b/drivers/mtd/devices/ps3vram.c
new file mode 100644
index 000000000000..d21e9beb7ed2
--- /dev/null
+++ b/drivers/mtd/devices/ps3vram.c
@@ -0,0 +1,768 @@
1/**
2 * ps3vram - Use extra PS3 video ram as MTD block device.
3 *
4 * Copyright (c) 2007-2008 Jim Paris <jim@jtan.com>
5 * Added support RSX DMA Vivien Chappelier <vivien.chappelier@free.fr>
6 */
7
8#include <linux/io.h>
9#include <linux/mm.h>
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <linux/list.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/slab.h>
16#include <linux/version.h>
17#include <linux/gfp.h>
18#include <linux/delay.h>
19#include <linux/mtd/mtd.h>
20
21#include <asm/lv1call.h>
22#include <asm/ps3.h>
23
24#define DEVICE_NAME "ps3vram"
25
26#define XDR_BUF_SIZE (2 * 1024 * 1024) /* XDR buffer (must be 1MiB aligned) */
27#define XDR_IOIF 0x0c000000
28
29#define FIFO_BASE XDR_IOIF
30#define FIFO_SIZE (64 * 1024)
31
32#define DMA_PAGE_SIZE (4 * 1024)
33
34#define CACHE_PAGE_SIZE (256 * 1024)
35#define CACHE_PAGE_COUNT ((XDR_BUF_SIZE - FIFO_SIZE) / CACHE_PAGE_SIZE)
36
37#define CACHE_OFFSET CACHE_PAGE_SIZE
38#define FIFO_OFFSET 0
39
40#define CTRL_PUT 0x10
41#define CTRL_GET 0x11
42#define CTRL_TOP 0x15
43
44#define UPLOAD_SUBCH 1
45#define DOWNLOAD_SUBCH 2
46
47#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c
48#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104
49
50#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601
51
52struct mtd_info ps3vram_mtd;
53
54#define CACHE_PAGE_PRESENT 1
55#define CACHE_PAGE_DIRTY 2
56
57struct ps3vram_tag {
58 unsigned int address;
59 unsigned int flags;
60};
61
62struct ps3vram_cache {
63 unsigned int page_count;
64 unsigned int page_size;
65 struct ps3vram_tag *tags;
66};
67
68struct ps3vram_priv {
69 u64 memory_handle;
70 u64 context_handle;
71 u32 *ctrl;
72 u32 *reports;
73 u8 __iomem *ddr_base;
74 u8 *xdr_buf;
75
76 u32 *fifo_base;
77 u32 *fifo_ptr;
78
79 struct device *dev;
80 struct ps3vram_cache cache;
81
82 /* Used to serialize cache/DMA operations */
83 struct mutex lock;
84};
85
86#define DMA_NOTIFIER_HANDLE_BASE 0x66604200 /* first DMA notifier handle */
87#define DMA_NOTIFIER_OFFSET_BASE 0x1000 /* first DMA notifier offset */
88#define DMA_NOTIFIER_SIZE 0x40
89#define NOTIFIER 7 /* notifier used for completion report */
90
91/* A trailing '-' means to subtract off ps3fb_videomemory.size */
92char *size = "256M-";
93module_param(size, charp, 0);
94MODULE_PARM_DESC(size, "memory size");
95
96static u32 *ps3vram_get_notifier(u32 *reports, int notifier)
97{
98 return (void *) reports +
99 DMA_NOTIFIER_OFFSET_BASE +
100 DMA_NOTIFIER_SIZE * notifier;
101}
102
103static void ps3vram_notifier_reset(struct mtd_info *mtd)
104{
105 int i;
106
107 struct ps3vram_priv *priv = mtd->priv;
108 u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER);
109 for (i = 0; i < 4; i++)
110 notify[i] = 0xffffffff;
111}
112
113static int ps3vram_notifier_wait(struct mtd_info *mtd, unsigned int timeout_ms)
114{
115 struct ps3vram_priv *priv = mtd->priv;
116 u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER);
117 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
118
119 do {
120 if (!notify[3])
121 return 0;
122 msleep(1);
123 } while (time_before(jiffies, timeout));
124
125 return -ETIMEDOUT;
126}
127
128static void ps3vram_init_ring(struct mtd_info *mtd)
129{
130 struct ps3vram_priv *priv = mtd->priv;
131
132 priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET;
133 priv->ctrl[CTRL_GET] = FIFO_BASE + FIFO_OFFSET;
134}
135
136static int ps3vram_wait_ring(struct mtd_info *mtd, unsigned int timeout_ms)
137{
138 struct ps3vram_priv *priv = mtd->priv;
139 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
140
141 do {
142 if (priv->ctrl[CTRL_PUT] == priv->ctrl[CTRL_GET])
143 return 0;
144 msleep(1);
145 } while (time_before(jiffies, timeout));
146
147 dev_dbg(priv->dev, "%s:%d: FIFO timeout (%08x/%08x/%08x)\n", __func__,
148 __LINE__, priv->ctrl[CTRL_PUT], priv->ctrl[CTRL_GET],
149 priv->ctrl[CTRL_TOP]);
150
151 return -ETIMEDOUT;
152}
153
154static void ps3vram_out_ring(struct ps3vram_priv *priv, u32 data)
155{
156 *(priv->fifo_ptr)++ = data;
157}
158
159static void ps3vram_begin_ring(struct ps3vram_priv *priv, u32 chan,
160 u32 tag, u32 size)
161{
162 ps3vram_out_ring(priv, (size << 18) | (chan << 13) | tag);
163}
164
165static void ps3vram_rewind_ring(struct mtd_info *mtd)
166{
167 struct ps3vram_priv *priv = mtd->priv;
168 u64 status;
169
170 ps3vram_out_ring(priv, 0x20000000 | (FIFO_BASE + FIFO_OFFSET));
171
172 priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET;
173
174 /* asking the HV for a blit will kick the fifo */
175 status = lv1_gpu_context_attribute(priv->context_handle,
176 L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT,
177 0, 0, 0, 0);
178 if (status)
179 dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n",
180 __func__, __LINE__);
181
182 priv->fifo_ptr = priv->fifo_base;
183}
184
185static void ps3vram_fire_ring(struct mtd_info *mtd)
186{
187 struct ps3vram_priv *priv = mtd->priv;
188 u64 status;
189
190 mutex_lock(&ps3_gpu_mutex);
191
192 priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET +
193 (priv->fifo_ptr - priv->fifo_base) * sizeof(u32);
194
195 /* asking the HV for a blit will kick the fifo */
196 status = lv1_gpu_context_attribute(priv->context_handle,
197 L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT,
198 0, 0, 0, 0);
199 if (status)
200 dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n",
201 __func__, __LINE__);
202
203 if ((priv->fifo_ptr - priv->fifo_base) * sizeof(u32) >
204 FIFO_SIZE - 1024) {
205 dev_dbg(priv->dev, "%s:%d: fifo full, rewinding\n", __func__,
206 __LINE__);
207 ps3vram_wait_ring(mtd, 200);
208 ps3vram_rewind_ring(mtd);
209 }
210
211 mutex_unlock(&ps3_gpu_mutex);
212}
213
214static void ps3vram_bind(struct mtd_info *mtd)
215{
216 struct ps3vram_priv *priv = mtd->priv;
217
218 ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0, 1);
219 ps3vram_out_ring(priv, 0x31337303);
220 ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x180, 3);
221 ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER);
222 ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */
223 ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */
224
225 ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0, 1);
226 ps3vram_out_ring(priv, 0x3137c0de);
227 ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x180, 3);
228 ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER);
229 ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */
230 ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */
231
232 ps3vram_fire_ring(mtd);
233}
234
235static int ps3vram_upload(struct mtd_info *mtd, unsigned int src_offset,
236 unsigned int dst_offset, int len, int count)
237{
238 struct ps3vram_priv *priv = mtd->priv;
239
240 ps3vram_begin_ring(priv, UPLOAD_SUBCH,
241 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
242 ps3vram_out_ring(priv, XDR_IOIF + src_offset);
243 ps3vram_out_ring(priv, dst_offset);
244 ps3vram_out_ring(priv, len);
245 ps3vram_out_ring(priv, len);
246 ps3vram_out_ring(priv, len);
247 ps3vram_out_ring(priv, count);
248 ps3vram_out_ring(priv, (1 << 8) | 1);
249 ps3vram_out_ring(priv, 0);
250
251 ps3vram_notifier_reset(mtd);
252 ps3vram_begin_ring(priv, UPLOAD_SUBCH,
253 NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1);
254 ps3vram_out_ring(priv, 0);
255 ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x100, 1);
256 ps3vram_out_ring(priv, 0);
257 ps3vram_fire_ring(mtd);
258 if (ps3vram_notifier_wait(mtd, 200) < 0) {
259 dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__,
260 __LINE__);
261 return -1;
262 }
263
264 return 0;
265}
266
267static int ps3vram_download(struct mtd_info *mtd, unsigned int src_offset,
268 unsigned int dst_offset, int len, int count)
269{
270 struct ps3vram_priv *priv = mtd->priv;
271
272 ps3vram_begin_ring(priv, DOWNLOAD_SUBCH,
273 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
274 ps3vram_out_ring(priv, src_offset);
275 ps3vram_out_ring(priv, XDR_IOIF + dst_offset);
276 ps3vram_out_ring(priv, len);
277 ps3vram_out_ring(priv, len);
278 ps3vram_out_ring(priv, len);
279 ps3vram_out_ring(priv, count);
280 ps3vram_out_ring(priv, (1 << 8) | 1);
281 ps3vram_out_ring(priv, 0);
282
283 ps3vram_notifier_reset(mtd);
284 ps3vram_begin_ring(priv, DOWNLOAD_SUBCH,
285 NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1);
286 ps3vram_out_ring(priv, 0);
287 ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x100, 1);
288 ps3vram_out_ring(priv, 0);
289 ps3vram_fire_ring(mtd);
290 if (ps3vram_notifier_wait(mtd, 200) < 0) {
291 dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__,
292 __LINE__);
293 return -1;
294 }
295
296 return 0;
297}
298
299static void ps3vram_cache_evict(struct mtd_info *mtd, int entry)
300{
301 struct ps3vram_priv *priv = mtd->priv;
302 struct ps3vram_cache *cache = &priv->cache;
303
304 if (cache->tags[entry].flags & CACHE_PAGE_DIRTY) {
305 dev_dbg(priv->dev, "%s:%d: flushing %d : 0x%08x\n", __func__,
306 __LINE__, entry, cache->tags[entry].address);
307 if (ps3vram_upload(mtd,
308 CACHE_OFFSET + entry * cache->page_size,
309 cache->tags[entry].address,
310 DMA_PAGE_SIZE,
311 cache->page_size / DMA_PAGE_SIZE) < 0) {
312 dev_dbg(priv->dev, "%s:%d: failed to upload from "
313 "0x%x to 0x%x size 0x%x\n", __func__, __LINE__,
314 entry * cache->page_size,
315 cache->tags[entry].address, cache->page_size);
316 }
317 cache->tags[entry].flags &= ~CACHE_PAGE_DIRTY;
318 }
319}
320
321static void ps3vram_cache_load(struct mtd_info *mtd, int entry,
322 unsigned int address)
323{
324 struct ps3vram_priv *priv = mtd->priv;
325 struct ps3vram_cache *cache = &priv->cache;
326
327 dev_dbg(priv->dev, "%s:%d: fetching %d : 0x%08x\n", __func__, __LINE__,
328 entry, address);
329 if (ps3vram_download(mtd,
330 address,
331 CACHE_OFFSET + entry * cache->page_size,
332 DMA_PAGE_SIZE,
333 cache->page_size / DMA_PAGE_SIZE) < 0) {
334 dev_err(priv->dev, "%s:%d: failed to download from "
335 "0x%x to 0x%x size 0x%x\n", __func__, __LINE__, address,
336 entry * cache->page_size, cache->page_size);
337 }
338
339 cache->tags[entry].address = address;
340 cache->tags[entry].flags |= CACHE_PAGE_PRESENT;
341}
342
343
344static void ps3vram_cache_flush(struct mtd_info *mtd)
345{
346 struct ps3vram_priv *priv = mtd->priv;
347 struct ps3vram_cache *cache = &priv->cache;
348 int i;
349
350 dev_dbg(priv->dev, "%s:%d: FLUSH\n", __func__, __LINE__);
351 for (i = 0; i < cache->page_count; i++) {
352 ps3vram_cache_evict(mtd, i);
353 cache->tags[i].flags = 0;
354 }
355}
356
357static unsigned int ps3vram_cache_match(struct mtd_info *mtd, loff_t address)
358{
359 struct ps3vram_priv *priv = mtd->priv;
360 struct ps3vram_cache *cache = &priv->cache;
361 unsigned int base;
362 unsigned int offset;
363 int i;
364 static int counter;
365
366 offset = (unsigned int) (address & (cache->page_size - 1));
367 base = (unsigned int) (address - offset);
368
369 /* fully associative check */
370 for (i = 0; i < cache->page_count; i++) {
371 if ((cache->tags[i].flags & CACHE_PAGE_PRESENT) &&
372 cache->tags[i].address == base) {
373 dev_dbg(priv->dev, "%s:%d: found entry %d : 0x%08x\n",
374 __func__, __LINE__, i, cache->tags[i].address);
375 return i;
376 }
377 }
378
379 /* choose a random entry */
380 i = (jiffies + (counter++)) % cache->page_count;
381 dev_dbg(priv->dev, "%s:%d: using entry %d\n", __func__, __LINE__, i);
382
383 ps3vram_cache_evict(mtd, i);
384 ps3vram_cache_load(mtd, i, base);
385
386 return i;
387}
388
389static int ps3vram_cache_init(struct mtd_info *mtd)
390{
391 struct ps3vram_priv *priv = mtd->priv;
392
393 priv->cache.page_count = CACHE_PAGE_COUNT;
394 priv->cache.page_size = CACHE_PAGE_SIZE;
395 priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) *
396 CACHE_PAGE_COUNT, GFP_KERNEL);
397 if (priv->cache.tags == NULL) {
398 dev_err(priv->dev, "%s:%d: could not allocate cache tags\n",
399 __func__, __LINE__);
400 return -ENOMEM;
401 }
402
403 dev_info(priv->dev, "created ram cache: %d entries, %d KiB each\n",
404 CACHE_PAGE_COUNT, CACHE_PAGE_SIZE / 1024);
405
406 return 0;
407}
408
409static void ps3vram_cache_cleanup(struct mtd_info *mtd)
410{
411 struct ps3vram_priv *priv = mtd->priv;
412
413 ps3vram_cache_flush(mtd);
414 kfree(priv->cache.tags);
415}
416
417static int ps3vram_erase(struct mtd_info *mtd, struct erase_info *instr)
418{
419 struct ps3vram_priv *priv = mtd->priv;
420
421 if (instr->addr + instr->len > mtd->size)
422 return -EINVAL;
423
424 mutex_lock(&priv->lock);
425
426 ps3vram_cache_flush(mtd);
427
428 /* Set bytes to 0xFF */
429 memset_io(priv->ddr_base + instr->addr, 0xFF, instr->len);
430
431 mutex_unlock(&priv->lock);
432
433 instr->state = MTD_ERASE_DONE;
434 mtd_erase_callback(instr);
435
436 return 0;
437}
438
439static int ps3vram_read(struct mtd_info *mtd, loff_t from, size_t len,
440 size_t *retlen, u_char *buf)
441{
442 struct ps3vram_priv *priv = mtd->priv;
443 unsigned int cached, count;
444
445 dev_dbg(priv->dev, "%s:%d: from=0x%08x len=0x%zx\n", __func__, __LINE__,
446 (unsigned int)from, len);
447
448 if (from >= mtd->size)
449 return -EINVAL;
450
451 if (len > mtd->size - from)
452 len = mtd->size - from;
453
454 /* Copy from vram to buf */
455 count = len;
456 while (count) {
457 unsigned int offset, avail;
458 unsigned int entry;
459
460 offset = (unsigned int) (from & (priv->cache.page_size - 1));
461 avail = priv->cache.page_size - offset;
462
463 mutex_lock(&priv->lock);
464
465 entry = ps3vram_cache_match(mtd, from);
466 cached = CACHE_OFFSET + entry * priv->cache.page_size + offset;
467
468 dev_dbg(priv->dev, "%s:%d: from=%08x cached=%08x offset=%08x "
469 "avail=%08x count=%08x\n", __func__, __LINE__,
470 (unsigned int)from, cached, offset, avail, count);
471
472 if (avail > count)
473 avail = count;
474 memcpy(buf, priv->xdr_buf + cached, avail);
475
476 mutex_unlock(&priv->lock);
477
478 buf += avail;
479 count -= avail;
480 from += avail;
481 }
482
483 *retlen = len;
484 return 0;
485}
486
487static int ps3vram_write(struct mtd_info *mtd, loff_t to, size_t len,
488 size_t *retlen, const u_char *buf)
489{
490 struct ps3vram_priv *priv = mtd->priv;
491 unsigned int cached, count;
492
493 if (to >= mtd->size)
494 return -EINVAL;
495
496 if (len > mtd->size - to)
497 len = mtd->size - to;
498
499 /* Copy from buf to vram */
500 count = len;
501 while (count) {
502 unsigned int offset, avail;
503 unsigned int entry;
504
505 offset = (unsigned int) (to & (priv->cache.page_size - 1));
506 avail = priv->cache.page_size - offset;
507
508 mutex_lock(&priv->lock);
509
510 entry = ps3vram_cache_match(mtd, to);
511 cached = CACHE_OFFSET + entry * priv->cache.page_size + offset;
512
513 dev_dbg(priv->dev, "%s:%d: to=%08x cached=%08x offset=%08x "
514 "avail=%08x count=%08x\n", __func__, __LINE__,
515 (unsigned int)to, cached, offset, avail, count);
516
517 if (avail > count)
518 avail = count;
519 memcpy(priv->xdr_buf + cached, buf, avail);
520
521 priv->cache.tags[entry].flags |= CACHE_PAGE_DIRTY;
522
523 mutex_unlock(&priv->lock);
524
525 buf += avail;
526 count -= avail;
527 to += avail;
528 }
529
530 *retlen = len;
531 return 0;
532}
533
534static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
535{
536 struct ps3vram_priv *priv;
537 int status;
538 u64 ddr_lpar;
539 u64 ctrl_lpar;
540 u64 info_lpar;
541 u64 reports_lpar;
542 u64 ddr_size;
543 u64 reports_size;
544 int ret = -ENOMEM;
545 char *rest;
546
547 ret = -EIO;
548 ps3vram_mtd.priv = kzalloc(sizeof(struct ps3vram_priv), GFP_KERNEL);
549 if (!ps3vram_mtd.priv)
550 goto out;
551 priv = ps3vram_mtd.priv;
552
553 mutex_init(&priv->lock);
554 priv->dev = &dev->core;
555
556 /* Allocate XDR buffer (1MiB aligned) */
557 priv->xdr_buf = (void *)__get_free_pages(GFP_KERNEL,
558 get_order(XDR_BUF_SIZE));
559 if (priv->xdr_buf == NULL) {
560 dev_dbg(&dev->core, "%s:%d: could not allocate XDR buffer\n",
561 __func__, __LINE__);
562 ret = -ENOMEM;
563 goto out_free_priv;
564 }
565
566 /* Put FIFO at begginning of XDR buffer */
567 priv->fifo_base = (u32 *) (priv->xdr_buf + FIFO_OFFSET);
568 priv->fifo_ptr = priv->fifo_base;
569
570 /* XXX: Need to open GPU, in case ps3fb or snd_ps3 aren't loaded */
571 if (ps3_open_hv_device(dev)) {
572 dev_err(&dev->core, "%s:%d: ps3_open_hv_device failed\n",
573 __func__, __LINE__);
574 ret = -EAGAIN;
575 goto out_close_gpu;
576 }
577
578 /* Request memory */
579 status = -1;
580 ddr_size = memparse(size, &rest);
581 if (*rest == '-')
582 ddr_size -= ps3fb_videomemory.size;
583 ddr_size = ALIGN(ddr_size, 1024*1024);
584 if (ddr_size <= 0) {
585 dev_err(&dev->core, "%s:%d: specified size is too small\n",
586 __func__, __LINE__);
587 ret = -EINVAL;
588 goto out_close_gpu;
589 }
590
591 while (ddr_size > 0) {
592 status = lv1_gpu_memory_allocate(ddr_size, 0, 0, 0, 0,
593 &priv->memory_handle,
594 &ddr_lpar);
595 if (!status)
596 break;
597 ddr_size -= 1024*1024;
598 }
599 if (status || ddr_size <= 0) {
600 dev_err(&dev->core, "%s:%d: lv1_gpu_memory_allocate failed\n",
601 __func__, __LINE__);
602 ret = -ENOMEM;
603 goto out_free_xdr_buf;
604 }
605
606 /* Request context */
607 status = lv1_gpu_context_allocate(priv->memory_handle,
608 0,
609 &priv->context_handle,
610 &ctrl_lpar,
611 &info_lpar,
612 &reports_lpar,
613 &reports_size);
614 if (status) {
615 dev_err(&dev->core, "%s:%d: lv1_gpu_context_allocate failed\n",
616 __func__, __LINE__);
617 ret = -ENOMEM;
618 goto out_free_memory;
619 }
620
621 /* Map XDR buffer to RSX */
622 status = lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF,
623 ps3_mm_phys_to_lpar(__pa(priv->xdr_buf)),
624 XDR_BUF_SIZE, 0);
625 if (status) {
626 dev_err(&dev->core, "%s:%d: lv1_gpu_context_iomap failed\n",
627 __func__, __LINE__);
628 ret = -ENOMEM;
629 goto out_free_context;
630 }
631
632 priv->ddr_base = ioremap_flags(ddr_lpar, ddr_size, _PAGE_NO_CACHE);
633
634 if (!priv->ddr_base) {
635 dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__,
636 __LINE__);
637 ret = -ENOMEM;
638 goto out_free_context;
639 }
640
641 priv->ctrl = ioremap(ctrl_lpar, 64 * 1024);
642 if (!priv->ctrl) {
643 dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__,
644 __LINE__);
645 ret = -ENOMEM;
646 goto out_unmap_vram;
647 }
648
649 priv->reports = ioremap(reports_lpar, reports_size);
650 if (!priv->reports) {
651 dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__,
652 __LINE__);
653 ret = -ENOMEM;
654 goto out_unmap_ctrl;
655 }
656
657 mutex_lock(&ps3_gpu_mutex);
658 ps3vram_init_ring(&ps3vram_mtd);
659 mutex_unlock(&ps3_gpu_mutex);
660
661 ps3vram_mtd.name = "ps3vram";
662 ps3vram_mtd.size = ddr_size;
663 ps3vram_mtd.flags = MTD_CAP_RAM;
664 ps3vram_mtd.erase = ps3vram_erase;
665 ps3vram_mtd.point = NULL;
666 ps3vram_mtd.unpoint = NULL;
667 ps3vram_mtd.read = ps3vram_read;
668 ps3vram_mtd.write = ps3vram_write;
669 ps3vram_mtd.owner = THIS_MODULE;
670 ps3vram_mtd.type = MTD_RAM;
671 ps3vram_mtd.erasesize = CACHE_PAGE_SIZE;
672 ps3vram_mtd.writesize = 1;
673
674 ps3vram_bind(&ps3vram_mtd);
675
676 mutex_lock(&ps3_gpu_mutex);
677 ret = ps3vram_wait_ring(&ps3vram_mtd, 100);
678 mutex_unlock(&ps3_gpu_mutex);
679 if (ret < 0) {
680 dev_err(&dev->core, "%s:%d: failed to initialize channels\n",
681 __func__, __LINE__);
682 ret = -ETIMEDOUT;
683 goto out_unmap_reports;
684 }
685
686 ps3vram_cache_init(&ps3vram_mtd);
687
688 if (add_mtd_device(&ps3vram_mtd)) {
689 dev_err(&dev->core, "%s:%d: add_mtd_device failed\n",
690 __func__, __LINE__);
691 ret = -EAGAIN;
692 goto out_cache_cleanup;
693 }
694
695 dev_info(&dev->core, "reserved %u MiB of gpu memory\n",
696 (unsigned int)(ddr_size / 1024 / 1024));
697
698 return 0;
699
700out_cache_cleanup:
701 ps3vram_cache_cleanup(&ps3vram_mtd);
702out_unmap_reports:
703 iounmap(priv->reports);
704out_unmap_ctrl:
705 iounmap(priv->ctrl);
706out_unmap_vram:
707 iounmap(priv->ddr_base);
708out_free_context:
709 lv1_gpu_context_free(priv->context_handle);
710out_free_memory:
711 lv1_gpu_memory_free(priv->memory_handle);
712out_close_gpu:
713 ps3_close_hv_device(dev);
714out_free_xdr_buf:
715 free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
716out_free_priv:
717 kfree(ps3vram_mtd.priv);
718 ps3vram_mtd.priv = NULL;
719out:
720 return ret;
721}
722
723static int ps3vram_shutdown(struct ps3_system_bus_device *dev)
724{
725 struct ps3vram_priv *priv;
726
727 priv = ps3vram_mtd.priv;
728
729 del_mtd_device(&ps3vram_mtd);
730 ps3vram_cache_cleanup(&ps3vram_mtd);
731 iounmap(priv->reports);
732 iounmap(priv->ctrl);
733 iounmap(priv->ddr_base);
734 lv1_gpu_context_free(priv->context_handle);
735 lv1_gpu_memory_free(priv->memory_handle);
736 ps3_close_hv_device(dev);
737 free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
738 kfree(priv);
739 return 0;
740}
741
742static struct ps3_system_bus_driver ps3vram_driver = {
743 .match_id = PS3_MATCH_ID_GPU,
744 .match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK,
745 .core.name = DEVICE_NAME,
746 .core.owner = THIS_MODULE,
747 .probe = ps3vram_probe,
748 .remove = ps3vram_shutdown,
749 .shutdown = ps3vram_shutdown,
750};
751
752static int __init ps3vram_init(void)
753{
754 return ps3_system_bus_driver_register(&ps3vram_driver);
755}
756
757static void __exit ps3vram_exit(void)
758{
759 ps3_system_bus_driver_unregister(&ps3vram_driver);
760}
761
762module_init(ps3vram_init);
763module_exit(ps3vram_exit);
764
765MODULE_LICENSE("GPL");
766MODULE_AUTHOR("Jim Paris <jim@jtan.com>");
767MODULE_DESCRIPTION("MTD driver for PS3 video RAM");
768MODULE_ALIAS(PS3_MODULE_ALIAS_GPU_RAMDISK);
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 9bf581c4f740..a790c062af1f 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -109,25 +109,25 @@ module_param(shuffle_freq, int, 0);
109/* Each memory region corresponds to a minor device */ 109/* Each memory region corresponds to a minor device */
110typedef struct partition_t { 110typedef struct partition_t {
111 struct mtd_blktrans_dev mbd; 111 struct mtd_blktrans_dev mbd;
112 u_int32_t state; 112 uint32_t state;
113 u_int32_t *VirtualBlockMap; 113 uint32_t *VirtualBlockMap;
114 u_int32_t *VirtualPageMap; 114 uint32_t *VirtualPageMap;
115 u_int32_t FreeTotal; 115 uint32_t FreeTotal;
116 struct eun_info_t { 116 struct eun_info_t {
117 u_int32_t Offset; 117 uint32_t Offset;
118 u_int32_t EraseCount; 118 uint32_t EraseCount;
119 u_int32_t Free; 119 uint32_t Free;
120 u_int32_t Deleted; 120 uint32_t Deleted;
121 } *EUNInfo; 121 } *EUNInfo;
122 struct xfer_info_t { 122 struct xfer_info_t {
123 u_int32_t Offset; 123 uint32_t Offset;
124 u_int32_t EraseCount; 124 uint32_t EraseCount;
125 u_int16_t state; 125 uint16_t state;
126 } *XferInfo; 126 } *XferInfo;
127 u_int16_t bam_index; 127 uint16_t bam_index;
128 u_int32_t *bam_cache; 128 uint32_t *bam_cache;
129 u_int16_t DataUnits; 129 uint16_t DataUnits;
130 u_int32_t BlocksPerUnit; 130 uint32_t BlocksPerUnit;
131 erase_unit_header_t header; 131 erase_unit_header_t header;
132} partition_t; 132} partition_t;
133 133
@@ -199,8 +199,8 @@ static int scan_header(partition_t *part)
199static int build_maps(partition_t *part) 199static int build_maps(partition_t *part)
200{ 200{
201 erase_unit_header_t header; 201 erase_unit_header_t header;
202 u_int16_t xvalid, xtrans, i; 202 uint16_t xvalid, xtrans, i;
203 u_int blocks, j; 203 unsigned blocks, j;
204 int hdr_ok, ret = -1; 204 int hdr_ok, ret = -1;
205 ssize_t retval; 205 ssize_t retval;
206 loff_t offset; 206 loff_t offset;
@@ -269,14 +269,14 @@ static int build_maps(partition_t *part)
269 269
270 /* Set up virtual page map */ 270 /* Set up virtual page map */
271 blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize; 271 blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize;
272 part->VirtualBlockMap = vmalloc(blocks * sizeof(u_int32_t)); 272 part->VirtualBlockMap = vmalloc(blocks * sizeof(uint32_t));
273 if (!part->VirtualBlockMap) 273 if (!part->VirtualBlockMap)
274 goto out_XferInfo; 274 goto out_XferInfo;
275 275
276 memset(part->VirtualBlockMap, 0xff, blocks * sizeof(u_int32_t)); 276 memset(part->VirtualBlockMap, 0xff, blocks * sizeof(uint32_t));
277 part->BlocksPerUnit = (1 << header.EraseUnitSize) >> header.BlockSize; 277 part->BlocksPerUnit = (1 << header.EraseUnitSize) >> header.BlockSize;
278 278
279 part->bam_cache = kmalloc(part->BlocksPerUnit * sizeof(u_int32_t), 279 part->bam_cache = kmalloc(part->BlocksPerUnit * sizeof(uint32_t),
280 GFP_KERNEL); 280 GFP_KERNEL);
281 if (!part->bam_cache) 281 if (!part->bam_cache)
282 goto out_VirtualBlockMap; 282 goto out_VirtualBlockMap;
@@ -290,7 +290,7 @@ static int build_maps(partition_t *part)
290 offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset); 290 offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset);
291 291
292 ret = part->mbd.mtd->read(part->mbd.mtd, offset, 292 ret = part->mbd.mtd->read(part->mbd.mtd, offset,
293 part->BlocksPerUnit * sizeof(u_int32_t), &retval, 293 part->BlocksPerUnit * sizeof(uint32_t), &retval,
294 (unsigned char *)part->bam_cache); 294 (unsigned char *)part->bam_cache);
295 295
296 if (ret) 296 if (ret)
@@ -332,7 +332,7 @@ out:
332======================================================================*/ 332======================================================================*/
333 333
334static int erase_xfer(partition_t *part, 334static int erase_xfer(partition_t *part,
335 u_int16_t xfernum) 335 uint16_t xfernum)
336{ 336{
337 int ret; 337 int ret;
338 struct xfer_info_t *xfer; 338 struct xfer_info_t *xfer;
@@ -408,7 +408,7 @@ static int prepare_xfer(partition_t *part, int i)
408 erase_unit_header_t header; 408 erase_unit_header_t header;
409 struct xfer_info_t *xfer; 409 struct xfer_info_t *xfer;
410 int nbam, ret; 410 int nbam, ret;
411 u_int32_t ctl; 411 uint32_t ctl;
412 ssize_t retlen; 412 ssize_t retlen;
413 loff_t offset; 413 loff_t offset;
414 414
@@ -430,15 +430,15 @@ static int prepare_xfer(partition_t *part, int i)
430 } 430 }
431 431
432 /* Write the BAM stub */ 432 /* Write the BAM stub */
433 nbam = (part->BlocksPerUnit * sizeof(u_int32_t) + 433 nbam = (part->BlocksPerUnit * sizeof(uint32_t) +
434 le32_to_cpu(part->header.BAMOffset) + SECTOR_SIZE - 1) / SECTOR_SIZE; 434 le32_to_cpu(part->header.BAMOffset) + SECTOR_SIZE - 1) / SECTOR_SIZE;
435 435
436 offset = xfer->Offset + le32_to_cpu(part->header.BAMOffset); 436 offset = xfer->Offset + le32_to_cpu(part->header.BAMOffset);
437 ctl = cpu_to_le32(BLOCK_CONTROL); 437 ctl = cpu_to_le32(BLOCK_CONTROL);
438 438
439 for (i = 0; i < nbam; i++, offset += sizeof(u_int32_t)) { 439 for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) {
440 440
441 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int32_t), 441 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
442 &retlen, (u_char *)&ctl); 442 &retlen, (u_char *)&ctl);
443 443
444 if (ret) 444 if (ret)
@@ -461,18 +461,18 @@ static int prepare_xfer(partition_t *part, int i)
461 461
462======================================================================*/ 462======================================================================*/
463 463
464static int copy_erase_unit(partition_t *part, u_int16_t srcunit, 464static int copy_erase_unit(partition_t *part, uint16_t srcunit,
465 u_int16_t xferunit) 465 uint16_t xferunit)
466{ 466{
467 u_char buf[SECTOR_SIZE]; 467 u_char buf[SECTOR_SIZE];
468 struct eun_info_t *eun; 468 struct eun_info_t *eun;
469 struct xfer_info_t *xfer; 469 struct xfer_info_t *xfer;
470 u_int32_t src, dest, free, i; 470 uint32_t src, dest, free, i;
471 u_int16_t unit; 471 uint16_t unit;
472 int ret; 472 int ret;
473 ssize_t retlen; 473 ssize_t retlen;
474 loff_t offset; 474 loff_t offset;
475 u_int16_t srcunitswap = cpu_to_le16(srcunit); 475 uint16_t srcunitswap = cpu_to_le16(srcunit);
476 476
477 eun = &part->EUNInfo[srcunit]; 477 eun = &part->EUNInfo[srcunit];
478 xfer = &part->XferInfo[xferunit]; 478 xfer = &part->XferInfo[xferunit];
@@ -486,7 +486,7 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
486 offset = eun->Offset + le32_to_cpu(part->header.BAMOffset); 486 offset = eun->Offset + le32_to_cpu(part->header.BAMOffset);
487 487
488 ret = part->mbd.mtd->read(part->mbd.mtd, offset, 488 ret = part->mbd.mtd->read(part->mbd.mtd, offset,
489 part->BlocksPerUnit * sizeof(u_int32_t), 489 part->BlocksPerUnit * sizeof(uint32_t),
490 &retlen, (u_char *) (part->bam_cache)); 490 &retlen, (u_char *) (part->bam_cache));
491 491
492 /* mark the cache bad, in case we get an error later */ 492 /* mark the cache bad, in case we get an error later */
@@ -503,7 +503,7 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
503 offset = xfer->Offset + 20; /* Bad! */ 503 offset = xfer->Offset + 20; /* Bad! */
504 unit = cpu_to_le16(0x7fff); 504 unit = cpu_to_le16(0x7fff);
505 505
506 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int16_t), 506 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint16_t),
507 &retlen, (u_char *) &unit); 507 &retlen, (u_char *) &unit);
508 508
509 if (ret) { 509 if (ret) {
@@ -560,7 +560,7 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
560 560
561 561
562 /* All clear? Then update the LogicalEUN again */ 562 /* All clear? Then update the LogicalEUN again */
563 ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(u_int16_t), 563 ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
564 &retlen, (u_char *)&srcunitswap); 564 &retlen, (u_char *)&srcunitswap);
565 565
566 if (ret) { 566 if (ret) {
@@ -605,8 +605,8 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
605 605
606static int reclaim_block(partition_t *part) 606static int reclaim_block(partition_t *part)
607{ 607{
608 u_int16_t i, eun, xfer; 608 uint16_t i, eun, xfer;
609 u_int32_t best; 609 uint32_t best;
610 int queued, ret; 610 int queued, ret;
611 611
612 DEBUG(0, "ftl_cs: reclaiming space...\n"); 612 DEBUG(0, "ftl_cs: reclaiming space...\n");
@@ -723,10 +723,10 @@ static void dump_lists(partition_t *part)
723} 723}
724#endif 724#endif
725 725
726static u_int32_t find_free(partition_t *part) 726static uint32_t find_free(partition_t *part)
727{ 727{
728 u_int16_t stop, eun; 728 uint16_t stop, eun;
729 u_int32_t blk; 729 uint32_t blk;
730 size_t retlen; 730 size_t retlen;
731 int ret; 731 int ret;
732 732
@@ -749,7 +749,7 @@ static u_int32_t find_free(partition_t *part)
749 749
750 ret = part->mbd.mtd->read(part->mbd.mtd, 750 ret = part->mbd.mtd->read(part->mbd.mtd,
751 part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset), 751 part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
752 part->BlocksPerUnit * sizeof(u_int32_t), 752 part->BlocksPerUnit * sizeof(uint32_t),
753 &retlen, (u_char *) (part->bam_cache)); 753 &retlen, (u_char *) (part->bam_cache));
754 754
755 if (ret) { 755 if (ret) {
@@ -786,7 +786,7 @@ static u_int32_t find_free(partition_t *part)
786static int ftl_read(partition_t *part, caddr_t buffer, 786static int ftl_read(partition_t *part, caddr_t buffer,
787 u_long sector, u_long nblocks) 787 u_long sector, u_long nblocks)
788{ 788{
789 u_int32_t log_addr, bsize; 789 uint32_t log_addr, bsize;
790 u_long i; 790 u_long i;
791 int ret; 791 int ret;
792 size_t offset, retlen; 792 size_t offset, retlen;
@@ -829,14 +829,14 @@ static int ftl_read(partition_t *part, caddr_t buffer,
829 829
830======================================================================*/ 830======================================================================*/
831 831
832static int set_bam_entry(partition_t *part, u_int32_t log_addr, 832static int set_bam_entry(partition_t *part, uint32_t log_addr,
833 u_int32_t virt_addr) 833 uint32_t virt_addr)
834{ 834{
835 u_int32_t bsize, blk, le_virt_addr; 835 uint32_t bsize, blk, le_virt_addr;
836#ifdef PSYCHO_DEBUG 836#ifdef PSYCHO_DEBUG
837 u_int32_t old_addr; 837 uint32_t old_addr;
838#endif 838#endif
839 u_int16_t eun; 839 uint16_t eun;
840 int ret; 840 int ret;
841 size_t retlen, offset; 841 size_t retlen, offset;
842 842
@@ -845,11 +845,11 @@ static int set_bam_entry(partition_t *part, u_int32_t log_addr,
845 bsize = 1 << part->header.EraseUnitSize; 845 bsize = 1 << part->header.EraseUnitSize;
846 eun = log_addr / bsize; 846 eun = log_addr / bsize;
847 blk = (log_addr % bsize) / SECTOR_SIZE; 847 blk = (log_addr % bsize) / SECTOR_SIZE;
848 offset = (part->EUNInfo[eun].Offset + blk * sizeof(u_int32_t) + 848 offset = (part->EUNInfo[eun].Offset + blk * sizeof(uint32_t) +
849 le32_to_cpu(part->header.BAMOffset)); 849 le32_to_cpu(part->header.BAMOffset));
850 850
851#ifdef PSYCHO_DEBUG 851#ifdef PSYCHO_DEBUG
852 ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(u_int32_t), 852 ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(uint32_t),
853 &retlen, (u_char *)&old_addr); 853 &retlen, (u_char *)&old_addr);
854 if (ret) { 854 if (ret) {
855 printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret); 855 printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret);
@@ -886,7 +886,7 @@ static int set_bam_entry(partition_t *part, u_int32_t log_addr,
886#endif 886#endif
887 part->bam_cache[blk] = le_virt_addr; 887 part->bam_cache[blk] = le_virt_addr;
888 } 888 }
889 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int32_t), 889 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
890 &retlen, (u_char *)&le_virt_addr); 890 &retlen, (u_char *)&le_virt_addr);
891 891
892 if (ret) { 892 if (ret) {
@@ -900,7 +900,7 @@ static int set_bam_entry(partition_t *part, u_int32_t log_addr,
900static int ftl_write(partition_t *part, caddr_t buffer, 900static int ftl_write(partition_t *part, caddr_t buffer,
901 u_long sector, u_long nblocks) 901 u_long sector, u_long nblocks)
902{ 902{
903 u_int32_t bsize, log_addr, virt_addr, old_addr, blk; 903 uint32_t bsize, log_addr, virt_addr, old_addr, blk;
904 u_long i; 904 u_long i;
905 int ret; 905 int ret;
906 size_t retlen, offset; 906 size_t retlen, offset;
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 50ce13887f63..73f05227dc8c 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -50,7 +50,7 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
50 struct INFTLrecord *inftl; 50 struct INFTLrecord *inftl;
51 unsigned long temp; 51 unsigned long temp;
52 52
53 if (mtd->type != MTD_NANDFLASH) 53 if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
54 return; 54 return;
55 /* OK, this is moderately ugly. But probably safe. Alternatives? */ 55 /* OK, this is moderately ugly. But probably safe. Alternatives? */
56 if (memcmp(mtd->name, "DiskOnChip", 10)) 56 if (memcmp(mtd->name, "DiskOnChip", 10))
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 9113628ed1ef..f751dd97c549 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -63,7 +63,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
63 * otherwise. 63 * otherwise.
64 */ 64 */
65 inftl->EraseSize = inftl->mbd.mtd->erasesize; 65 inftl->EraseSize = inftl->mbd.mtd->erasesize;
66 inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize; 66 inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize;
67 67
68 inftl->MediaUnit = BLOCK_NIL; 68 inftl->MediaUnit = BLOCK_NIL;
69 69
@@ -187,7 +187,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
187 mh->BlockMultiplierBits); 187 mh->BlockMultiplierBits);
188 inftl->EraseSize = inftl->mbd.mtd->erasesize << 188 inftl->EraseSize = inftl->mbd.mtd->erasesize <<
189 mh->BlockMultiplierBits; 189 mh->BlockMultiplierBits;
190 inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize; 190 inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize;
191 block >>= mh->BlockMultiplierBits; 191 block >>= mh->BlockMultiplierBits;
192 } 192 }
193 193
diff --git a/drivers/mtd/lpddr/Kconfig b/drivers/mtd/lpddr/Kconfig
new file mode 100644
index 000000000000..acd4ea9b2278
--- /dev/null
+++ b/drivers/mtd/lpddr/Kconfig
@@ -0,0 +1,22 @@
1# drivers/mtd/chips/Kconfig
2
3menu "LPDDR flash memory drivers"
4 depends on MTD!=n
5
6config MTD_LPDDR
7 tristate "Support for LPDDR flash chips"
8 select MTD_QINFO_PROBE
9 help
10 This option enables support of LPDDR (Low power double data rate)
11 flash chips. Synonymous with Mobile-DDR. It is a new standard for
12 DDR memories, intended for battery-operated systems.
13
14config MTD_QINFO_PROBE
15 tristate "Detect flash chips by QINFO probe"
16 help
17 Device Information for LPDDR chips is offered through the Overlay
18 Window QINFO interface, permits software to be used for entire
19 families of devices. This serves similar purpose of CFI on legacy
20 Flash products
21endmenu
22
diff --git a/drivers/mtd/lpddr/Makefile b/drivers/mtd/lpddr/Makefile
new file mode 100644
index 000000000000..da48e46b5812
--- /dev/null
+++ b/drivers/mtd/lpddr/Makefile
@@ -0,0 +1,6 @@
1#
2# linux/drivers/mtd/lpddr/Makefile
3#
4
5obj-$(CONFIG_MTD_QINFO_PROBE) += qinfo_probe.o
6obj-$(CONFIG_MTD_LPDDR) += lpddr_cmds.o
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
new file mode 100644
index 000000000000..e22ca49583e7
--- /dev/null
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -0,0 +1,796 @@
1/*
2 * LPDDR flash memory device operations. This module provides read, write,
3 * erase, lock/unlock support for LPDDR flash memories
4 * (C) 2008 Korolev Alexey <akorolev@infradead.org>
5 * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
6 * Many thanks to Roman Borisov for intial enabling
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 * TODO:
23 * Implement VPP management
24 * Implement XIP support
25 * Implement OTP support
26 */
27#include <linux/mtd/pfow.h>
28#include <linux/mtd/qinfo.h>
29
30static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
31 size_t *retlen, u_char *buf);
32static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to,
33 size_t len, size_t *retlen, const u_char *buf);
34static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
35 unsigned long count, loff_t to, size_t *retlen);
36static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr);
37static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
38static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
39static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
40 size_t *retlen, void **mtdbuf, resource_size_t *phys);
41static void lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
42static int get_chip(struct map_info *map, struct flchip *chip, int mode);
43static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
44static void put_chip(struct map_info *map, struct flchip *chip);
45
46struct mtd_info *lpddr_cmdset(struct map_info *map)
47{
48 struct lpddr_private *lpddr = map->fldrv_priv;
49 struct flchip_shared *shared;
50 struct flchip *chip;
51 struct mtd_info *mtd;
52 int numchips;
53 int i, j;
54
55 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
56 if (!mtd) {
57 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
58 return NULL;
59 }
60 mtd->priv = map;
61 mtd->type = MTD_NORFLASH;
62
63 /* Fill in the default mtd operations */
64 mtd->read = lpddr_read;
65 mtd->type = MTD_NORFLASH;
66 mtd->flags = MTD_CAP_NORFLASH;
67 mtd->flags &= ~MTD_BIT_WRITEABLE;
68 mtd->erase = lpddr_erase;
69 mtd->write = lpddr_write_buffers;
70 mtd->writev = lpddr_writev;
71 mtd->read_oob = NULL;
72 mtd->write_oob = NULL;
73 mtd->sync = NULL;
74 mtd->lock = lpddr_lock;
75 mtd->unlock = lpddr_unlock;
76 mtd->suspend = NULL;
77 mtd->resume = NULL;
78 if (map_is_linear(map)) {
79 mtd->point = lpddr_point;
80 mtd->unpoint = lpddr_unpoint;
81 }
82 mtd->block_isbad = NULL;
83 mtd->block_markbad = NULL;
84 mtd->size = 1 << lpddr->qinfo->DevSizeShift;
85 mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
86 mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
87
88 shared = kmalloc(sizeof(struct flchip_shared) * lpddr->numchips,
89 GFP_KERNEL);
90 if (!shared) {
91 kfree(lpddr);
92 kfree(mtd);
93 return NULL;
94 }
95
96 chip = &lpddr->chips[0];
97 numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
98 for (i = 0; i < numchips; i++) {
99 shared[i].writing = shared[i].erasing = NULL;
100 spin_lock_init(&shared[i].lock);
101 for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
102 *chip = lpddr->chips[i];
103 chip->start += j << lpddr->chipshift;
104 chip->oldstate = chip->state = FL_READY;
105 chip->priv = &shared[i];
106 /* those should be reset too since
107 they create memory references. */
108 init_waitqueue_head(&chip->wq);
109 spin_lock_init(&chip->_spinlock);
110 chip->mutex = &chip->_spinlock;
111 chip++;
112 }
113 }
114
115 return mtd;
116}
117EXPORT_SYMBOL(lpddr_cmdset);
118
119static int wait_for_ready(struct map_info *map, struct flchip *chip,
120 unsigned int chip_op_time)
121{
122 unsigned int timeo, reset_timeo, sleep_time;
123 unsigned int dsr;
124 flstate_t chip_state = chip->state;
125 int ret = 0;
126
127 /* set our timeout to 8 times the expected delay */
128 timeo = chip_op_time * 8;
129 if (!timeo)
130 timeo = 500000;
131 reset_timeo = timeo;
132 sleep_time = chip_op_time / 2;
133
134 for (;;) {
135 dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
136 if (dsr & DSR_READY_STATUS)
137 break;
138 if (!timeo) {
139 printk(KERN_ERR "%s: Flash timeout error state %d \n",
140 map->name, chip_state);
141 ret = -ETIME;
142 break;
143 }
144
145 /* OK Still waiting. Drop the lock, wait a while and retry. */
146 spin_unlock(chip->mutex);
147 if (sleep_time >= 1000000/HZ) {
148 /*
149 * Half of the normal delay still remaining
150 * can be performed with a sleeping delay instead
151 * of busy waiting.
152 */
153 msleep(sleep_time/1000);
154 timeo -= sleep_time;
155 sleep_time = 1000000/HZ;
156 } else {
157 udelay(1);
158 cond_resched();
159 timeo--;
160 }
161 spin_lock(chip->mutex);
162
163 while (chip->state != chip_state) {
164 /* Someone's suspended the operation: sleep */
165 DECLARE_WAITQUEUE(wait, current);
166 set_current_state(TASK_UNINTERRUPTIBLE);
167 add_wait_queue(&chip->wq, &wait);
168 spin_unlock(chip->mutex);
169 schedule();
170 remove_wait_queue(&chip->wq, &wait);
171 spin_lock(chip->mutex);
172 }
173 if (chip->erase_suspended || chip->write_suspended) {
174 /* Suspend has occured while sleep: reset timeout */
175 timeo = reset_timeo;
176 chip->erase_suspended = chip->write_suspended = 0;
177 }
178 }
179 /* check status for errors */
180 if (dsr & DSR_ERR) {
181 /* Clear DSR*/
182 map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
183 printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n",
184 map->name, dsr);
185 print_drs_error(dsr);
186 ret = -EIO;
187 }
188 chip->state = FL_READY;
189 return ret;
190}
191
192static int get_chip(struct map_info *map, struct flchip *chip, int mode)
193{
194 int ret;
195 DECLARE_WAITQUEUE(wait, current);
196
197 retry:
198 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)
199 && chip->state != FL_SYNCING) {
200 /*
201 * OK. We have possibility for contension on the write/erase
202 * operations which are global to the real chip and not per
203 * partition. So let's fight it over in the partition which
204 * currently has authority on the operation.
205 *
206 * The rules are as follows:
207 *
208 * - any write operation must own shared->writing.
209 *
210 * - any erase operation must own _both_ shared->writing and
211 * shared->erasing.
212 *
213 * - contension arbitration is handled in the owner's context.
214 *
215 * The 'shared' struct can be read and/or written only when
216 * its lock is taken.
217 */
218 struct flchip_shared *shared = chip->priv;
219 struct flchip *contender;
220 spin_lock(&shared->lock);
221 contender = shared->writing;
222 if (contender && contender != chip) {
223 /*
224 * The engine to perform desired operation on this
225 * partition is already in use by someone else.
226 * Let's fight over it in the context of the chip
227 * currently using it. If it is possible to suspend,
228 * that other partition will do just that, otherwise
229 * it'll happily send us to sleep. In any case, when
230 * get_chip returns success we're clear to go ahead.
231 */
232 ret = spin_trylock(contender->mutex);
233 spin_unlock(&shared->lock);
234 if (!ret)
235 goto retry;
236 spin_unlock(chip->mutex);
237 ret = chip_ready(map, contender, mode);
238 spin_lock(chip->mutex);
239
240 if (ret == -EAGAIN) {
241 spin_unlock(contender->mutex);
242 goto retry;
243 }
244 if (ret) {
245 spin_unlock(contender->mutex);
246 return ret;
247 }
248 spin_lock(&shared->lock);
249
250 /* We should not own chip if it is already in FL_SYNCING
251 * state. Put contender and retry. */
252 if (chip->state == FL_SYNCING) {
253 put_chip(map, contender);
254 spin_unlock(contender->mutex);
255 goto retry;
256 }
257 spin_unlock(contender->mutex);
258 }
259
260 /* Check if we have suspended erase on this chip.
261 Must sleep in such a case. */
262 if (mode == FL_ERASING && shared->erasing
263 && shared->erasing->oldstate == FL_ERASING) {
264 spin_unlock(&shared->lock);
265 set_current_state(TASK_UNINTERRUPTIBLE);
266 add_wait_queue(&chip->wq, &wait);
267 spin_unlock(chip->mutex);
268 schedule();
269 remove_wait_queue(&chip->wq, &wait);
270 spin_lock(chip->mutex);
271 goto retry;
272 }
273
274 /* We now own it */
275 shared->writing = chip;
276 if (mode == FL_ERASING)
277 shared->erasing = chip;
278 spin_unlock(&shared->lock);
279 }
280
281 ret = chip_ready(map, chip, mode);
282 if (ret == -EAGAIN)
283 goto retry;
284
285 return ret;
286}
287
288static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
289{
290 struct lpddr_private *lpddr = map->fldrv_priv;
291 int ret = 0;
292 DECLARE_WAITQUEUE(wait, current);
293
294 /* Prevent setting state FL_SYNCING for chip in suspended state. */
295 if (FL_SYNCING == mode && FL_READY != chip->oldstate)
296 goto sleep;
297
298 switch (chip->state) {
299 case FL_READY:
300 case FL_JEDEC_QUERY:
301 return 0;
302
303 case FL_ERASING:
304 if (!lpddr->qinfo->SuspEraseSupp ||
305 !(mode == FL_READY || mode == FL_POINT))
306 goto sleep;
307
308 map_write(map, CMD(LPDDR_SUSPEND),
309 map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND);
310 chip->oldstate = FL_ERASING;
311 chip->state = FL_ERASE_SUSPENDING;
312 ret = wait_for_ready(map, chip, 0);
313 if (ret) {
314 /* Oops. something got wrong. */
315 /* Resume and pretend we weren't here. */
316 map_write(map, CMD(LPDDR_RESUME),
317 map->pfow_base + PFOW_COMMAND_CODE);
318 map_write(map, CMD(LPDDR_START_EXECUTION),
319 map->pfow_base + PFOW_COMMAND_EXECUTE);
320 chip->state = FL_ERASING;
321 chip->oldstate = FL_READY;
322 printk(KERN_ERR "%s: suspend operation failed."
323 "State may be wrong \n", map->name);
324 return -EIO;
325 }
326 chip->erase_suspended = 1;
327 chip->state = FL_READY;
328 return 0;
329 /* Erase suspend */
330 case FL_POINT:
331 /* Only if there's no operation suspended... */
332 if (mode == FL_READY && chip->oldstate == FL_READY)
333 return 0;
334
335 default:
336sleep:
337 set_current_state(TASK_UNINTERRUPTIBLE);
338 add_wait_queue(&chip->wq, &wait);
339 spin_unlock(chip->mutex);
340 schedule();
341 remove_wait_queue(&chip->wq, &wait);
342 spin_lock(chip->mutex);
343 return -EAGAIN;
344 }
345}
346
347static void put_chip(struct map_info *map, struct flchip *chip)
348{
349 if (chip->priv) {
350 struct flchip_shared *shared = chip->priv;
351 spin_lock(&shared->lock);
352 if (shared->writing == chip && chip->oldstate == FL_READY) {
353 /* We own the ability to write, but we're done */
354 shared->writing = shared->erasing;
355 if (shared->writing && shared->writing != chip) {
356 /* give back the ownership */
357 struct flchip *loaner = shared->writing;
358 spin_lock(loaner->mutex);
359 spin_unlock(&shared->lock);
360 spin_unlock(chip->mutex);
361 put_chip(map, loaner);
362 spin_lock(chip->mutex);
363 spin_unlock(loaner->mutex);
364 wake_up(&chip->wq);
365 return;
366 }
367 shared->erasing = NULL;
368 shared->writing = NULL;
369 } else if (shared->erasing == chip && shared->writing != chip) {
370 /*
371 * We own the ability to erase without the ability
372 * to write, which means the erase was suspended
373 * and some other partition is currently writing.
374 * Don't let the switch below mess things up since
375 * we don't have ownership to resume anything.
376 */
377 spin_unlock(&shared->lock);
378 wake_up(&chip->wq);
379 return;
380 }
381 spin_unlock(&shared->lock);
382 }
383
384 switch (chip->oldstate) {
385 case FL_ERASING:
386 chip->state = chip->oldstate;
387 map_write(map, CMD(LPDDR_RESUME),
388 map->pfow_base + PFOW_COMMAND_CODE);
389 map_write(map, CMD(LPDDR_START_EXECUTION),
390 map->pfow_base + PFOW_COMMAND_EXECUTE);
391 chip->oldstate = FL_READY;
392 chip->state = FL_ERASING;
393 break;
394 case FL_READY:
395 break;
396 default:
397 printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n",
398 map->name, chip->oldstate);
399 }
400 wake_up(&chip->wq);
401}
402
403int do_write_buffer(struct map_info *map, struct flchip *chip,
404 unsigned long adr, const struct kvec **pvec,
405 unsigned long *pvec_seek, int len)
406{
407 struct lpddr_private *lpddr = map->fldrv_priv;
408 map_word datum;
409 int ret, wbufsize, word_gap, words;
410 const struct kvec *vec;
411 unsigned long vec_seek;
412 unsigned long prog_buf_ofs;
413
414 wbufsize = 1 << lpddr->qinfo->BufSizeShift;
415
416 spin_lock(chip->mutex);
417 ret = get_chip(map, chip, FL_WRITING);
418 if (ret) {
419 spin_unlock(chip->mutex);
420 return ret;
421 }
422 /* Figure out the number of words to write */
423 word_gap = (-adr & (map_bankwidth(map)-1));
424 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
425 if (!word_gap) {
426 words--;
427 } else {
428 word_gap = map_bankwidth(map) - word_gap;
429 adr -= word_gap;
430 datum = map_word_ff(map);
431 }
432 /* Write data */
433 /* Get the program buffer offset from PFOW register data first*/
434 prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map,
435 map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET));
436 vec = *pvec;
437 vec_seek = *pvec_seek;
438 do {
439 int n = map_bankwidth(map) - word_gap;
440
441 if (n > vec->iov_len - vec_seek)
442 n = vec->iov_len - vec_seek;
443 if (n > len)
444 n = len;
445
446 if (!word_gap && (len < map_bankwidth(map)))
447 datum = map_word_ff(map);
448
449 datum = map_word_load_partial(map, datum,
450 vec->iov_base + vec_seek, word_gap, n);
451
452 len -= n;
453 word_gap += n;
454 if (!len || word_gap == map_bankwidth(map)) {
455 map_write(map, datum, prog_buf_ofs);
456 prog_buf_ofs += map_bankwidth(map);
457 word_gap = 0;
458 }
459
460 vec_seek += n;
461 if (vec_seek == vec->iov_len) {
462 vec++;
463 vec_seek = 0;
464 }
465 } while (len);
466 *pvec = vec;
467 *pvec_seek = vec_seek;
468
469 /* GO GO GO */
470 send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL);
471 chip->state = FL_WRITING;
472 ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
473 if (ret) {
474 printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n",
475 map->name, ret, adr);
476 goto out;
477 }
478
479 out: put_chip(map, chip);
480 spin_unlock(chip->mutex);
481 return ret;
482}
483
484int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
485{
486 struct map_info *map = mtd->priv;
487 struct lpddr_private *lpddr = map->fldrv_priv;
488 int chipnum = adr >> lpddr->chipshift;
489 struct flchip *chip = &lpddr->chips[chipnum];
490 int ret;
491
492 spin_lock(chip->mutex);
493 ret = get_chip(map, chip, FL_ERASING);
494 if (ret) {
495 spin_unlock(chip->mutex);
496 return ret;
497 }
498 send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
499 chip->state = FL_ERASING;
500 ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000);
501 if (ret) {
502 printk(KERN_WARNING"%s Erase block error %d at : %llx\n",
503 map->name, ret, adr);
504 goto out;
505 }
506 out: put_chip(map, chip);
507 spin_unlock(chip->mutex);
508 return ret;
509}
510
511static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
512 size_t *retlen, u_char *buf)
513{
514 struct map_info *map = mtd->priv;
515 struct lpddr_private *lpddr = map->fldrv_priv;
516 int chipnum = adr >> lpddr->chipshift;
517 struct flchip *chip = &lpddr->chips[chipnum];
518 int ret = 0;
519
520 spin_lock(chip->mutex);
521 ret = get_chip(map, chip, FL_READY);
522 if (ret) {
523 spin_unlock(chip->mutex);
524 return ret;
525 }
526
527 map_copy_from(map, buf, adr, len);
528 *retlen = len;
529
530 put_chip(map, chip);
531 spin_unlock(chip->mutex);
532 return ret;
533}
534
535static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
536 size_t *retlen, void **mtdbuf, resource_size_t *phys)
537{
538 struct map_info *map = mtd->priv;
539 struct lpddr_private *lpddr = map->fldrv_priv;
540 int chipnum = adr >> lpddr->chipshift;
541 unsigned long ofs, last_end = 0;
542 struct flchip *chip = &lpddr->chips[chipnum];
543 int ret = 0;
544
545 if (!map->virt || (adr + len > mtd->size))
546 return -EINVAL;
547
548 /* ofs: offset within the first chip that the first read should start */
549 ofs = adr - (chipnum << lpddr->chipshift);
550
551 *mtdbuf = (void *)map->virt + chip->start + ofs;
552 *retlen = 0;
553
554 while (len) {
555 unsigned long thislen;
556
557 if (chipnum >= lpddr->numchips)
558 break;
559
560 /* We cannot point across chips that are virtually disjoint */
561 if (!last_end)
562 last_end = chip->start;
563 else if (chip->start != last_end)
564 break;
565
566 if ((len + ofs - 1) >> lpddr->chipshift)
567 thislen = (1<<lpddr->chipshift) - ofs;
568 else
569 thislen = len;
570 /* get the chip */
571 spin_lock(chip->mutex);
572 ret = get_chip(map, chip, FL_POINT);
573 spin_unlock(chip->mutex);
574 if (ret)
575 break;
576
577 chip->state = FL_POINT;
578 chip->ref_point_counter++;
579 *retlen += thislen;
580 len -= thislen;
581
582 ofs = 0;
583 last_end += 1 << lpddr->chipshift;
584 chipnum++;
585 chip = &lpddr->chips[chipnum];
586 }
587 return 0;
588}
589
590static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
591{
592 struct map_info *map = mtd->priv;
593 struct lpddr_private *lpddr = map->fldrv_priv;
594 int chipnum = adr >> lpddr->chipshift;
595 unsigned long ofs;
596
597 /* ofs: offset within the first chip that the first read should start */
598 ofs = adr - (chipnum << lpddr->chipshift);
599
600 while (len) {
601 unsigned long thislen;
602 struct flchip *chip;
603
604 chip = &lpddr->chips[chipnum];
605 if (chipnum >= lpddr->numchips)
606 break;
607
608 if ((len + ofs - 1) >> lpddr->chipshift)
609 thislen = (1<<lpddr->chipshift) - ofs;
610 else
611 thislen = len;
612
613 spin_lock(chip->mutex);
614 if (chip->state == FL_POINT) {
615 chip->ref_point_counter--;
616 if (chip->ref_point_counter == 0)
617 chip->state = FL_READY;
618 } else
619 printk(KERN_WARNING "%s: Warning: unpoint called on non"
620 "pointed region\n", map->name);
621
622 put_chip(map, chip);
623 spin_unlock(chip->mutex);
624
625 len -= thislen;
626 ofs = 0;
627 chipnum++;
628 }
629}
630
631static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
632 size_t *retlen, const u_char *buf)
633{
634 struct kvec vec;
635
636 vec.iov_base = (void *) buf;
637 vec.iov_len = len;
638
639 return lpddr_writev(mtd, &vec, 1, to, retlen);
640}
641
642
643static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
644 unsigned long count, loff_t to, size_t *retlen)
645{
646 struct map_info *map = mtd->priv;
647 struct lpddr_private *lpddr = map->fldrv_priv;
648 int ret = 0;
649 int chipnum;
650 unsigned long ofs, vec_seek, i;
651 int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
652
653 size_t len = 0;
654
655 for (i = 0; i < count; i++)
656 len += vecs[i].iov_len;
657
658 *retlen = 0;
659 if (!len)
660 return 0;
661
662 chipnum = to >> lpddr->chipshift;
663
664 ofs = to;
665 vec_seek = 0;
666
667 do {
668 /* We must not cross write block boundaries */
669 int size = wbufsize - (ofs & (wbufsize-1));
670
671 if (size > len)
672 size = len;
673
674 ret = do_write_buffer(map, &lpddr->chips[chipnum],
675 ofs, &vecs, &vec_seek, size);
676 if (ret)
677 return ret;
678
679 ofs += size;
680 (*retlen) += size;
681 len -= size;
682
683 /* Be nice and reschedule with the chip in a usable
684 * state for other processes */
685 cond_resched();
686
687 } while (len);
688
689 return 0;
690}
691
692static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
693{
694 unsigned long ofs, len;
695 int ret;
696 struct map_info *map = mtd->priv;
697 struct lpddr_private *lpddr = map->fldrv_priv;
698 int size = 1 << lpddr->qinfo->UniformBlockSizeShift;
699
700 ofs = instr->addr;
701 len = instr->len;
702
703 if (ofs > mtd->size || (len + ofs) > mtd->size)
704 return -EINVAL;
705
706 while (len > 0) {
707 ret = do_erase_oneblock(mtd, ofs);
708 if (ret)
709 return ret;
710 ofs += size;
711 len -= size;
712 }
713 instr->state = MTD_ERASE_DONE;
714 mtd_erase_callback(instr);
715
716 return 0;
717}
718
719#define DO_XXLOCK_LOCK 1
720#define DO_XXLOCK_UNLOCK 2
721int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
722{
723 int ret = 0;
724 struct map_info *map = mtd->priv;
725 struct lpddr_private *lpddr = map->fldrv_priv;
726 int chipnum = adr >> lpddr->chipshift;
727 struct flchip *chip = &lpddr->chips[chipnum];
728
729 spin_lock(chip->mutex);
730 ret = get_chip(map, chip, FL_LOCKING);
731 if (ret) {
732 spin_unlock(chip->mutex);
733 return ret;
734 }
735
736 if (thunk == DO_XXLOCK_LOCK) {
737 send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL);
738 chip->state = FL_LOCKING;
739 } else if (thunk == DO_XXLOCK_UNLOCK) {
740 send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL);
741 chip->state = FL_UNLOCKING;
742 } else
743 BUG();
744
745 ret = wait_for_ready(map, chip, 1);
746 if (ret) {
747 printk(KERN_ERR "%s: block unlock error status %d \n",
748 map->name, ret);
749 goto out;
750 }
751out: put_chip(map, chip);
752 spin_unlock(chip->mutex);
753 return ret;
754}
755
756static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
757{
758 return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK);
759}
760
761static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
762{
763 return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
764}
765
766int word_program(struct map_info *map, loff_t adr, uint32_t curval)
767{
768 int ret;
769 struct lpddr_private *lpddr = map->fldrv_priv;
770 int chipnum = adr >> lpddr->chipshift;
771 struct flchip *chip = &lpddr->chips[chipnum];
772
773 spin_lock(chip->mutex);
774 ret = get_chip(map, chip, FL_WRITING);
775 if (ret) {
776 spin_unlock(chip->mutex);
777 return ret;
778 }
779
780 send_pfow_command(map, LPDDR_WORD_PROGRAM, adr, 0x00, (map_word *)&curval);
781
782 ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->SingleWordProgTime));
783 if (ret) {
784 printk(KERN_WARNING"%s word_program error at: %llx; val: %x\n",
785 map->name, adr, curval);
786 goto out;
787 }
788
789out: put_chip(map, chip);
790 spin_unlock(chip->mutex);
791 return ret;
792}
793
794MODULE_LICENSE("GPL");
795MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
796MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
new file mode 100644
index 000000000000..79bf40f48b75
--- /dev/null
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -0,0 +1,255 @@
1/*
2 * Probing flash chips with QINFO records.
3 * (C) 2008 Korolev Alexey <akorolev@infradead.org>
4 * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 */
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/errno.h>
26#include <linux/slab.h>
27#include <linux/interrupt.h>
28
29#include <linux/mtd/xip.h>
30#include <linux/mtd/map.h>
31#include <linux/mtd/pfow.h>
32#include <linux/mtd/qinfo.h>
33
34static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr);
35struct mtd_info *lpddr_probe(struct map_info *map);
36static struct lpddr_private *lpddr_probe_chip(struct map_info *map);
37static int lpddr_pfow_present(struct map_info *map,
38 struct lpddr_private *lpddr);
39
40static struct qinfo_query_info qinfo_array[] = {
41 /* General device info */
42 {0, 0, "DevSizeShift", "Device size 2^n bytes"},
43 {0, 3, "BufSizeShift", "Program buffer size 2^n bytes"},
44 /* Erase block information */
45 {1, 1, "TotalBlocksNum", "Total number of blocks"},
46 {1, 2, "UniformBlockSizeShift", "Uniform block size 2^n bytes"},
47 /* Partition information */
48 {2, 1, "HWPartsNum", "Number of hardware partitions"},
49 /* Optional features */
50 {5, 1, "SuspEraseSupp", "Suspend erase supported"},
51 /* Operation typical time */
52 {10, 0, "SingleWordProgTime", "Single word program 2^n u-sec"},
53 {10, 1, "ProgBufferTime", "Program buffer write 2^n u-sec"},
54 {10, 2, "BlockEraseTime", "Block erase 2^n m-sec"},
55 {10, 3, "FullChipEraseTime", "Full chip erase 2^n m-sec"},
56};
57
58static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
59{
60 int qinfo_lines = sizeof(qinfo_array)/sizeof(struct qinfo_query_info);
61 int i;
62 int bankwidth = map_bankwidth(map) * 8;
63 int major, minor;
64
65 for (i = 0; i < qinfo_lines; i++) {
66 if (strcmp(id_str, qinfo_array[i].id_str) == 0) {
67 major = qinfo_array[i].major & ((1 << bankwidth) - 1);
68 minor = qinfo_array[i].minor & ((1 << bankwidth) - 1);
69 return minor | (major << bankwidth);
70 }
71 }
72 printk(KERN_ERR"%s qinfo id string is wrong! \n", map->name);
73 BUG();
74 return -1;
75}
76
77static uint16_t lpddr_info_query(struct map_info *map, char *id_str)
78{
79 unsigned int dsr, val;
80 int bits_per_chip = map_bankwidth(map) * 8;
81 unsigned long adr = lpddr_get_qinforec_pos(map, id_str);
82 int attempts = 20;
83
84 /* Write a request for the PFOW record */
85 map_write(map, CMD(LPDDR_INFO_QUERY),
86 map->pfow_base + PFOW_COMMAND_CODE);
87 map_write(map, CMD(adr & ((1 << bits_per_chip) - 1)),
88 map->pfow_base + PFOW_COMMAND_ADDRESS_L);
89 map_write(map, CMD(adr >> bits_per_chip),
90 map->pfow_base + PFOW_COMMAND_ADDRESS_H);
91 map_write(map, CMD(LPDDR_START_EXECUTION),
92 map->pfow_base + PFOW_COMMAND_EXECUTE);
93
94 while ((attempts--) > 0) {
95 dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
96 if (dsr & DSR_READY_STATUS)
97 break;
98 udelay(10);
99 }
100
101 val = CMDVAL(map_read(map, map->pfow_base + PFOW_COMMAND_DATA));
102 return val;
103}
104
105static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
106{
107 map_word pfow_val[4];
108
109 /* Check identification string */
110 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
111 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
112 pfow_val[2] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_O);
113 pfow_val[3] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_W);
114
115 if (!map_word_equal(map, CMD('P'), pfow_val[0]))
116 goto out;
117
118 if (!map_word_equal(map, CMD('F'), pfow_val[1]))
119 goto out;
120
121 if (!map_word_equal(map, CMD('O'), pfow_val[2]))
122 goto out;
123
124 if (!map_word_equal(map, CMD('W'), pfow_val[3]))
125 goto out;
126
127 return 1; /* "PFOW" is found */
128out:
129 printk(KERN_WARNING"%s: PFOW string at 0x%lx is not found \n",
130 map->name, map->pfow_base);
131 return 0;
132}
133
134static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr)
135{
136
137 lpddr->qinfo = kmalloc(sizeof(struct qinfo_chip), GFP_KERNEL);
138 if (!lpddr->qinfo) {
139 printk(KERN_WARNING "%s: no memory for LPDDR qinfo structure\n",
140 map->name);
141 return 0;
142 }
143 memset(lpddr->qinfo, 0, sizeof(struct qinfo_chip));
144
145 /* Get the ManuID */
146 lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID));
147 /* Get the DeviceID */
148 lpddr->DevId = CMDVAL(map_read(map, map->pfow_base + PFOW_DEVICE_ID));
149 /* read parameters from chip qinfo table */
150 lpddr->qinfo->DevSizeShift = lpddr_info_query(map, "DevSizeShift");
151 lpddr->qinfo->TotalBlocksNum = lpddr_info_query(map, "TotalBlocksNum");
152 lpddr->qinfo->BufSizeShift = lpddr_info_query(map, "BufSizeShift");
153 lpddr->qinfo->HWPartsNum = lpddr_info_query(map, "HWPartsNum");
154 lpddr->qinfo->UniformBlockSizeShift =
155 lpddr_info_query(map, "UniformBlockSizeShift");
156 lpddr->qinfo->SuspEraseSupp = lpddr_info_query(map, "SuspEraseSupp");
157 lpddr->qinfo->SingleWordProgTime =
158 lpddr_info_query(map, "SingleWordProgTime");
159 lpddr->qinfo->ProgBufferTime = lpddr_info_query(map, "ProgBufferTime");
160 lpddr->qinfo->BlockEraseTime = lpddr_info_query(map, "BlockEraseTime");
161 return 1;
162}
163static struct lpddr_private *lpddr_probe_chip(struct map_info *map)
164{
165 struct lpddr_private lpddr;
166 struct lpddr_private *retlpddr;
167 int numvirtchips;
168
169
170 if ((map->pfow_base + 0x1000) >= map->size) {
171 printk(KERN_NOTICE"%s Probe at base (0x%08lx) past the end of"
172 "the map(0x%08lx)\n", map->name,
173 (unsigned long)map->pfow_base, map->size - 1);
174 return NULL;
175 }
176 memset(&lpddr, 0, sizeof(struct lpddr_private));
177 if (!lpddr_pfow_present(map, &lpddr))
178 return NULL;
179
180 if (!lpddr_chip_setup(map, &lpddr))
181 return NULL;
182
183 /* Ok so we found a chip */
184 lpddr.chipshift = lpddr.qinfo->DevSizeShift;
185 lpddr.numchips = 1;
186
187 numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum;
188 retlpddr = kmalloc(sizeof(struct lpddr_private) +
189 numvirtchips * sizeof(struct flchip), GFP_KERNEL);
190 if (!retlpddr)
191 return NULL;
192
193 memset(retlpddr, 0, sizeof(struct lpddr_private) +
194 numvirtchips * sizeof(struct flchip));
195 memcpy(retlpddr, &lpddr, sizeof(struct lpddr_private));
196
197 retlpddr->numchips = numvirtchips;
198 retlpddr->chipshift = retlpddr->qinfo->DevSizeShift -
199 __ffs(retlpddr->qinfo->HWPartsNum);
200
201 return retlpddr;
202}
203
204struct mtd_info *lpddr_probe(struct map_info *map)
205{
206 struct mtd_info *mtd = NULL;
207 struct lpddr_private *lpddr;
208
209 /* First probe the map to see if we havecan open PFOW here */
210 lpddr = lpddr_probe_chip(map);
211 if (!lpddr)
212 return NULL;
213
214 map->fldrv_priv = lpddr;
215 mtd = lpddr_cmdset(map);
216 if (mtd) {
217 if (mtd->size > map->size) {
218 printk(KERN_WARNING "Reducing visibility of %ldKiB chip"
219 "to %ldKiB\n", (unsigned long)mtd->size >> 10,
220 (unsigned long)map->size >> 10);
221 mtd->size = map->size;
222 }
223 return mtd;
224 }
225
226 kfree(lpddr->qinfo);
227 kfree(lpddr);
228 map->fldrv_priv = NULL;
229 return NULL;
230}
231
232static struct mtd_chip_driver lpddr_chipdrv = {
233 .probe = lpddr_probe,
234 .name = "qinfo_probe",
235 .module = THIS_MODULE
236};
237
238static int __init lpddr_probe_init(void)
239{
240 register_mtd_chip_driver(&lpddr_chipdrv);
241 return 0;
242}
243
244static void __exit lpddr_probe_exit(void)
245{
246 unregister_mtd_chip_driver(&lpddr_chipdrv);
247}
248
249module_init(lpddr_probe_init);
250module_exit(lpddr_probe_exit);
251
252MODULE_LICENSE("GPL");
253MODULE_AUTHOR("Vasiliy Leonenko <vasiliy.leonenko@gmail.com>");
254MODULE_DESCRIPTION("Driver to probe qinfo flash chips");
255
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 5ea169362164..0225cbbf22de 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -10,8 +10,8 @@ config MTD_COMPLEX_MAPPINGS
10 paged mappings of flash chips. 10 paged mappings of flash chips.
11 11
12config MTD_PHYSMAP 12config MTD_PHYSMAP
13 tristate "CFI Flash device in physical memory map" 13 tristate "Flash device in physical memory map"
14 depends on MTD_CFI || MTD_JEDECPROBE || MTD_ROM 14 depends on MTD_CFI || MTD_JEDECPROBE || MTD_ROM || MTD_LPDDR
15 help 15 help
16 This provides a 'mapping' driver which allows the NOR Flash and 16 This provides a 'mapping' driver which allows the NOR Flash and
17 ROM driver code to communicate with chips which are mapped 17 ROM driver code to communicate with chips which are mapped
@@ -23,9 +23,20 @@ config MTD_PHYSMAP
23 To compile this driver as a module, choose M here: the 23 To compile this driver as a module, choose M here: the
24 module will be called physmap. 24 module will be called physmap.
25 25
26config MTD_PHYSMAP_COMPAT
27 bool "Physmap compat support"
28 depends on MTD_PHYSMAP
29 default n
30 help
31 Setup a simple mapping via the Kconfig options. Normally the
32 physmap configuration options are done via your board's
33 resource file.
34
35 If unsure, say N here.
36
26config MTD_PHYSMAP_START 37config MTD_PHYSMAP_START
27 hex "Physical start address of flash mapping" 38 hex "Physical start address of flash mapping"
28 depends on MTD_PHYSMAP 39 depends on MTD_PHYSMAP_COMPAT
29 default "0x8000000" 40 default "0x8000000"
30 help 41 help
31 This is the physical memory location at which the flash chips 42 This is the physical memory location at which the flash chips
@@ -37,7 +48,7 @@ config MTD_PHYSMAP_START
37 48
38config MTD_PHYSMAP_LEN 49config MTD_PHYSMAP_LEN
39 hex "Physical length of flash mapping" 50 hex "Physical length of flash mapping"
40 depends on MTD_PHYSMAP 51 depends on MTD_PHYSMAP_COMPAT
41 default "0" 52 default "0"
42 help 53 help
43 This is the total length of the mapping of the flash chips on 54 This is the total length of the mapping of the flash chips on
@@ -51,7 +62,7 @@ config MTD_PHYSMAP_LEN
51 62
52config MTD_PHYSMAP_BANKWIDTH 63config MTD_PHYSMAP_BANKWIDTH
53 int "Bank width in octets" 64 int "Bank width in octets"
54 depends on MTD_PHYSMAP 65 depends on MTD_PHYSMAP_COMPAT
55 default "2" 66 default "2"
56 help 67 help
57 This is the total width of the data bus of the flash devices 68 This is the total width of the data bus of the flash devices
diff --git a/drivers/mtd/maps/alchemy-flash.c b/drivers/mtd/maps/alchemy-flash.c
index 82811bcb0436..845ad4f2a542 100644
--- a/drivers/mtd/maps/alchemy-flash.c
+++ b/drivers/mtd/maps/alchemy-flash.c
@@ -111,7 +111,7 @@ static struct mtd_partition alchemy_partitions[] = {
111 111
112static struct mtd_info *mymtd; 112static struct mtd_info *mymtd;
113 113
114int __init alchemy_mtd_init(void) 114static int __init alchemy_mtd_init(void)
115{ 115{
116 struct mtd_partition *parts; 116 struct mtd_partition *parts;
117 int nb_parts = 0; 117 int nb_parts = 0;
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index d1eec7d3243f..237733d094c4 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -232,8 +232,8 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
232 /* Trim the size if we are larger than the map */ 232 /* Trim the size if we are larger than the map */
233 if (map->mtd->size > map->map.size) { 233 if (map->mtd->size > map->map.size) {
234 printk(KERN_WARNING MOD_NAME 234 printk(KERN_WARNING MOD_NAME
235 " rom(%u) larger than window(%lu). fixing...\n", 235 " rom(%llu) larger than window(%lu). fixing...\n",
236 map->mtd->size, map->map.size); 236 (unsigned long long)map->mtd->size, map->map.size);
237 map->mtd->size = map->map.size; 237 map->mtd->size = map->map.size;
238 } 238 }
239 if (window->rsrc.parent) { 239 if (window->rsrc.parent) {
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index 0ecc3f6d735b..b4ed81611918 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -88,7 +88,7 @@ struct mtd_partition flagadm_parts[] = {
88 88
89static struct mtd_info *mymtd; 89static struct mtd_info *mymtd;
90 90
91int __init init_flagadm(void) 91static int __init init_flagadm(void)
92{ 92{
93 printk(KERN_NOTICE "FlagaDM flash device: %x at %x\n", 93 printk(KERN_NOTICE "FlagaDM flash device: %x at %x\n",
94 FLASH_SIZE, FLASH_PHYS_ADDR); 94 FLASH_SIZE, FLASH_PHYS_ADDR);
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 1a6feb4474de..5f7a245ed132 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -263,8 +263,8 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
263 /* Trim the size if we are larger than the map */ 263 /* Trim the size if we are larger than the map */
264 if (map->mtd->size > map->map.size) { 264 if (map->mtd->size > map->map.size) {
265 printk(KERN_WARNING MOD_NAME 265 printk(KERN_WARNING MOD_NAME
266 " rom(%u) larger than window(%lu). fixing...\n", 266 " rom(%llu) larger than window(%lu). fixing...\n",
267 map->mtd->size, map->map.size); 267 (unsigned long long)map->mtd->size, map->map.size);
268 map->mtd->size = map->map.size; 268 map->mtd->size = map->map.size;
269 } 269 }
270 if (window->rsrc.parent) { 270 if (window->rsrc.parent) {
diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c
index e115667bf1d0..cfacfa6f45dd 100644
--- a/drivers/mtd/maps/dbox2-flash.c
+++ b/drivers/mtd/maps/dbox2-flash.c
@@ -69,7 +69,7 @@ struct map_info dbox2_flash_map = {
69 .phys = WINDOW_ADDR, 69 .phys = WINDOW_ADDR,
70}; 70};
71 71
72int __init init_dbox2_flash(void) 72static int __init init_dbox2_flash(void)
73{ 73{
74 printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR); 74 printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR);
75 dbox2_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE); 75 dbox2_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c
index 9433738c1664..be9e90b44587 100644
--- a/drivers/mtd/maps/edb7312.c
+++ b/drivers/mtd/maps/edb7312.c
@@ -71,7 +71,7 @@ static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
71static int mtd_parts_nb = 0; 71static int mtd_parts_nb = 0;
72static struct mtd_partition *mtd_parts = 0; 72static struct mtd_partition *mtd_parts = 0;
73 73
74int __init init_edb7312nor(void) 74static int __init init_edb7312nor(void)
75{ 75{
76 static const char *rom_probe_types[] = PROBETYPES; 76 static const char *rom_probe_types[] = PROBETYPES;
77 const char **type; 77 const char **type;
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index bbbcdd4c8d13..11a2f57df9cf 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -324,8 +324,8 @@ static int __devinit esb2rom_init_one(struct pci_dev *pdev,
324 /* Trim the size if we are larger than the map */ 324 /* Trim the size if we are larger than the map */
325 if (map->mtd->size > map->map.size) { 325 if (map->mtd->size > map->map.size) {
326 printk(KERN_WARNING MOD_NAME 326 printk(KERN_WARNING MOD_NAME
327 " rom(%u) larger than window(%lu). fixing...\n", 327 " rom(%llu) larger than window(%lu). fixing...\n",
328 map->mtd->size, map->map.size); 328 (unsigned long long)map->mtd->size, map->map.size);
329 map->mtd->size = map->map.size; 329 map->mtd->size = map->map.size;
330 } 330 }
331 if (window->rsrc.parent) { 331 if (window->rsrc.parent) {
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
index a8e3fde4cbd5..1e43124d498b 100644
--- a/drivers/mtd/maps/fortunet.c
+++ b/drivers/mtd/maps/fortunet.c
@@ -181,7 +181,7 @@ __setup("MTD_Partition=", MTD_New_Partition);
181/* Backwards-spelling-compatibility */ 181/* Backwards-spelling-compatibility */
182__setup("MTD_Partion=", MTD_New_Partition); 182__setup("MTD_Partion=", MTD_New_Partition);
183 183
184int __init init_fortunet(void) 184static int __init init_fortunet(void)
185{ 185{
186 int ix,iy; 186 int ix,iy;
187 for(iy=ix=0;ix<MAX_NUM_REGIONS;ix++) 187 for(iy=ix=0;ix<MAX_NUM_REGIONS;ix++)
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 3b959fad1c4e..72c724fa8c27 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -65,7 +65,7 @@ static const char *probes[] = { "cmdlinepart", NULL };
65/* 65/*
66 * Initialize FLASH support 66 * Initialize FLASH support
67 */ 67 */
68int __init h720x_mtd_init(void) 68static int __init h720x_mtd_init(void)
69{ 69{
70 70
71 char *part_type = NULL; 71 char *part_type = NULL;
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index aeb6c916e23f..c32bc28920b3 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -258,8 +258,8 @@ static int __devinit ichxrom_init_one (struct pci_dev *pdev,
258 /* Trim the size if we are larger than the map */ 258 /* Trim the size if we are larger than the map */
259 if (map->mtd->size > map->map.size) { 259 if (map->mtd->size > map->map.size) {
260 printk(KERN_WARNING MOD_NAME 260 printk(KERN_WARNING MOD_NAME
261 " rom(%u) larger than window(%lu). fixing...\n", 261 " rom(%llu) larger than window(%lu). fixing...\n",
262 map->mtd->size, map->map.size); 262 (unsigned long long)map->mtd->size, map->map.size);
263 map->mtd->size = map->map.size; 263 map->mtd->size = map->map.size;
264 } 264 }
265 if (window->rsrc.parent) { 265 if (window->rsrc.parent) {
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 2682ab51a367..998a27da97f3 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -70,7 +70,7 @@ static struct mtd_partition *mtd_parts[NUM_FLASHBANKS];
70 70
71static const char *probes[] = { "cmdlinepart", NULL }; 71static const char *probes[] = { "cmdlinepart", NULL };
72 72
73int __init init_impa7(void) 73static int __init init_impa7(void)
74{ 74{
75 static const char *rom_probe_types[] = PROBETYPES; 75 static const char *rom_probe_types[] = PROBETYPES;
76 const char **type; 76 const char **type;
diff --git a/drivers/mtd/maps/ipaq-flash.c b/drivers/mtd/maps/ipaq-flash.c
index ed58f6a77bd9..748c85f635f1 100644
--- a/drivers/mtd/maps/ipaq-flash.c
+++ b/drivers/mtd/maps/ipaq-flash.c
@@ -202,7 +202,7 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
202 202
203static int __init h1900_special_case(void); 203static int __init h1900_special_case(void);
204 204
205int __init ipaq_mtd_init(void) 205static int __init ipaq_mtd_init(void)
206{ 206{
207 struct mtd_partition *parts = NULL; 207 struct mtd_partition *parts = NULL;
208 int nb_parts = 0; 208 int nb_parts = 0;
diff --git a/drivers/mtd/maps/mbx860.c b/drivers/mtd/maps/mbx860.c
index 706f67394b07..0eb5a7c85380 100644
--- a/drivers/mtd/maps/mbx860.c
+++ b/drivers/mtd/maps/mbx860.c
@@ -55,7 +55,7 @@ struct map_info mbx_map = {
55 .bankwidth = 4, 55 .bankwidth = 4,
56}; 56};
57 57
58int __init init_mbx(void) 58static int __init init_mbx(void)
59{ 59{
60 printk(KERN_NOTICE "Motorola MBX flash device: 0x%x at 0x%x\n", WINDOW_SIZE*4, WINDOW_ADDR); 60 printk(KERN_NOTICE "Motorola MBX flash device: 0x%x at 0x%x\n", WINDOW_SIZE*4, WINDOW_ADDR);
61 mbx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4); 61 mbx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 965e6c6d6ab0..a97133eb9d70 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -226,7 +226,7 @@ static int __init nettel_init(void)
226 226
227 if ((amd_mtd = do_map_probe("jedec_probe", &nettel_amd_map))) { 227 if ((amd_mtd = do_map_probe("jedec_probe", &nettel_amd_map))) {
228 printk(KERN_NOTICE "SNAPGEAR: AMD flash device size = %dK\n", 228 printk(KERN_NOTICE "SNAPGEAR: AMD flash device size = %dK\n",
229 amd_mtd->size>>10); 229 (int)(amd_mtd->size>>10));
230 230
231 amd_mtd->owner = THIS_MODULE; 231 amd_mtd->owner = THIS_MODULE;
232 232
@@ -357,13 +357,12 @@ static int __init nettel_init(void)
357 *intel1par = 0; 357 *intel1par = 0;
358 } 358 }
359 359
360 printk(KERN_NOTICE "SNAPGEAR: Intel flash device size = %dK\n", 360 printk(KERN_NOTICE "SNAPGEAR: Intel flash device size = %lldKiB\n",
361 (intel_mtd->size >> 10)); 361 (unsigned long long)(intel_mtd->size >> 10));
362 362
363 intel_mtd->owner = THIS_MODULE; 363 intel_mtd->owner = THIS_MODULE;
364 364
365 num_intel_partitions = sizeof(nettel_intel_partitions) / 365 num_intel_partitions = ARRAY_SIZE(nettel_intel_partitions);
366 sizeof(nettel_intel_partitions[0]);
367 366
368 if (intelboot) { 367 if (intelboot) {
369 /* 368 /*
diff --git a/drivers/mtd/maps/octagon-5066.c b/drivers/mtd/maps/octagon-5066.c
index 43e04c1d22a9..2b2e45093218 100644
--- a/drivers/mtd/maps/octagon-5066.c
+++ b/drivers/mtd/maps/octagon-5066.c
@@ -184,7 +184,7 @@ void cleanup_oct5066(void)
184 release_region(PAGE_IO, 1); 184 release_region(PAGE_IO, 1);
185} 185}
186 186
187int __init init_oct5066(void) 187static int __init init_oct5066(void)
188{ 188{
189 int i; 189 int i;
190 int ret = 0; 190 int ret = 0;
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 1db16e549e38..87743661d48e 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -29,7 +29,6 @@ struct physmap_flash_info {
29 struct map_info map[MAX_RESOURCES]; 29 struct map_info map[MAX_RESOURCES];
30#ifdef CONFIG_MTD_PARTITIONS 30#ifdef CONFIG_MTD_PARTITIONS
31 int nr_parts; 31 int nr_parts;
32 struct mtd_partition *parts;
33#endif 32#endif
34}; 33};
35 34
@@ -56,14 +55,10 @@ static int physmap_flash_remove(struct platform_device *dev)
56 for (i = 0; i < MAX_RESOURCES; i++) { 55 for (i = 0; i < MAX_RESOURCES; i++) {
57 if (info->mtd[i] != NULL) { 56 if (info->mtd[i] != NULL) {
58#ifdef CONFIG_MTD_PARTITIONS 57#ifdef CONFIG_MTD_PARTITIONS
59 if (info->nr_parts) { 58 if (info->nr_parts || physmap_data->nr_parts)
60 del_mtd_partitions(info->mtd[i]); 59 del_mtd_partitions(info->mtd[i]);
61 kfree(info->parts); 60 else
62 } else if (physmap_data->nr_parts) {
63 del_mtd_partitions(info->mtd[i]);
64 } else {
65 del_mtd_device(info->mtd[i]); 61 del_mtd_device(info->mtd[i]);
66 }
67#else 62#else
68 del_mtd_device(info->mtd[i]); 63 del_mtd_device(info->mtd[i]);
69#endif 64#endif
@@ -73,7 +68,12 @@ static int physmap_flash_remove(struct platform_device *dev)
73 return 0; 68 return 0;
74} 69}
75 70
76static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL }; 71static const char *rom_probe_types[] = {
72 "cfi_probe",
73 "jedec_probe",
74 "qinfo_probe",
75 "map_rom",
76 NULL };
77#ifdef CONFIG_MTD_PARTITIONS 77#ifdef CONFIG_MTD_PARTITIONS
78static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; 78static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
79#endif 79#endif
@@ -86,6 +86,9 @@ static int physmap_flash_probe(struct platform_device *dev)
86 int err = 0; 86 int err = 0;
87 int i; 87 int i;
88 int devices_found = 0; 88 int devices_found = 0;
89#ifdef CONFIG_MTD_PARTITIONS
90 struct mtd_partition *parts;
91#endif
89 92
90 physmap_data = dev->dev.platform_data; 93 physmap_data = dev->dev.platform_data;
91 if (physmap_data == NULL) 94 if (physmap_data == NULL)
@@ -119,6 +122,7 @@ static int physmap_flash_probe(struct platform_device *dev)
119 info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1; 122 info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1;
120 info->map[i].bankwidth = physmap_data->width; 123 info->map[i].bankwidth = physmap_data->width;
121 info->map[i].set_vpp = physmap_data->set_vpp; 124 info->map[i].set_vpp = physmap_data->set_vpp;
125 info->map[i].pfow_base = physmap_data->pfow_base;
122 126
123 info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys, 127 info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys,
124 info->map[i].size); 128 info->map[i].size);
@@ -163,9 +167,10 @@ static int physmap_flash_probe(struct platform_device *dev)
163 goto err_out; 167 goto err_out;
164 168
165#ifdef CONFIG_MTD_PARTITIONS 169#ifdef CONFIG_MTD_PARTITIONS
166 err = parse_mtd_partitions(info->cmtd, part_probe_types, &info->parts, 0); 170 err = parse_mtd_partitions(info->cmtd, part_probe_types, &parts, 0);
167 if (err > 0) { 171 if (err > 0) {
168 add_mtd_partitions(info->cmtd, info->parts, err); 172 add_mtd_partitions(info->cmtd, parts, err);
173 kfree(parts);
169 return 0; 174 return 0;
170 } 175 }
171 176
@@ -251,14 +256,7 @@ static struct platform_driver physmap_flash_driver = {
251}; 256};
252 257
253 258
254#ifdef CONFIG_MTD_PHYSMAP_LEN 259#ifdef CONFIG_MTD_PHYSMAP_COMPAT
255#if CONFIG_MTD_PHYSMAP_LEN != 0
256#warning using PHYSMAP compat code
257#define PHYSMAP_COMPAT
258#endif
259#endif
260
261#ifdef PHYSMAP_COMPAT
262static struct physmap_flash_data physmap_flash_data = { 260static struct physmap_flash_data physmap_flash_data = {
263 .width = CONFIG_MTD_PHYSMAP_BANKWIDTH, 261 .width = CONFIG_MTD_PHYSMAP_BANKWIDTH,
264}; 262};
@@ -302,7 +300,7 @@ static int __init physmap_init(void)
302 int err; 300 int err;
303 301
304 err = platform_driver_register(&physmap_flash_driver); 302 err = platform_driver_register(&physmap_flash_driver);
305#ifdef PHYSMAP_COMPAT 303#ifdef CONFIG_MTD_PHYSMAP_COMPAT
306 if (err == 0) 304 if (err == 0)
307 platform_device_register(&physmap_flash); 305 platform_device_register(&physmap_flash);
308#endif 306#endif
@@ -312,7 +310,7 @@ static int __init physmap_init(void)
312 310
313static void __exit physmap_exit(void) 311static void __exit physmap_exit(void)
314{ 312{
315#ifdef PHYSMAP_COMPAT 313#ifdef CONFIG_MTD_PHYSMAP_COMPAT
316 platform_device_unregister(&physmap_flash); 314 platform_device_unregister(&physmap_flash);
317#endif 315#endif
318 platform_driver_unregister(&physmap_flash_driver); 316 platform_driver_unregister(&physmap_flash_driver);
@@ -326,8 +324,7 @@ MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
326MODULE_DESCRIPTION("Generic configurable MTD map driver"); 324MODULE_DESCRIPTION("Generic configurable MTD map driver");
327 325
328/* legacy platform drivers can't hotplug or coldplg */ 326/* legacy platform drivers can't hotplug or coldplg */
329#ifndef PHYSMAP_COMPAT 327#ifndef CONFIG_MTD_PHYSMAP_COMPAT
330/* work with hotplug and coldplug */ 328/* work with hotplug and coldplug */
331MODULE_ALIAS("platform:physmap-flash"); 329MODULE_ALIAS("platform:physmap-flash");
332#endif 330#endif
333
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
index f43ba2815cbb..4768bd5459d6 100644
--- a/drivers/mtd/maps/pmcmsp-flash.c
+++ b/drivers/mtd/maps/pmcmsp-flash.c
@@ -48,7 +48,7 @@ static int fcnt;
48 48
49#define DEBUG_MARKER printk(KERN_NOTICE "%s[%d]\n", __func__, __LINE__) 49#define DEBUG_MARKER printk(KERN_NOTICE "%s[%d]\n", __func__, __LINE__)
50 50
51int __init init_msp_flash(void) 51static int __init init_msp_flash(void)
52{ 52{
53 int i, j; 53 int i, j;
54 int offset, coff; 54 int offset, coff;
diff --git a/drivers/mtd/maps/redwood.c b/drivers/mtd/maps/redwood.c
index de002eb1a7fe..933c0b63b016 100644
--- a/drivers/mtd/maps/redwood.c
+++ b/drivers/mtd/maps/redwood.c
@@ -122,7 +122,7 @@ struct map_info redwood_flash_map = {
122 122
123static struct mtd_info *redwood_mtd; 123static struct mtd_info *redwood_mtd;
124 124
125int __init init_redwood_flash(void) 125static int __init init_redwood_flash(void)
126{ 126{
127 int err; 127 int err;
128 128
diff --git a/drivers/mtd/maps/rpxlite.c b/drivers/mtd/maps/rpxlite.c
index 14d90edb4430..3e3ef53d4fd4 100644
--- a/drivers/mtd/maps/rpxlite.c
+++ b/drivers/mtd/maps/rpxlite.c
@@ -23,7 +23,7 @@ static struct map_info rpxlite_map = {
23 .phys = WINDOW_ADDR, 23 .phys = WINDOW_ADDR,
24}; 24};
25 25
26int __init init_rpxlite(void) 26static int __init init_rpxlite(void)
27{ 27{
28 printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR); 28 printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR);
29 rpxlite_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4); 29 rpxlite_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
diff --git a/drivers/mtd/maps/sbc8240.c b/drivers/mtd/maps/sbc8240.c
index 6e1e99cd2b59..d5374cdcb163 100644
--- a/drivers/mtd/maps/sbc8240.c
+++ b/drivers/mtd/maps/sbc8240.c
@@ -136,7 +136,7 @@ static struct mtd_part_def sbc8240_part_banks[NUM_FLASH_BANKS];
136#endif /* CONFIG_MTD_PARTITIONS */ 136#endif /* CONFIG_MTD_PARTITIONS */
137 137
138 138
139int __init init_sbc8240_mtd (void) 139static int __init init_sbc8240_mtd (void)
140{ 140{
141 static struct _cjs { 141 static struct _cjs {
142 u_long addr; 142 u_long addr;
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 21169e6d646c..7e329f09a548 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -118,7 +118,8 @@ scb2_fixup_mtd(struct mtd_info *mtd)
118 struct mtd_erase_region_info *region = &mtd->eraseregions[i]; 118 struct mtd_erase_region_info *region = &mtd->eraseregions[i];
119 119
120 if (region->numblocks * region->erasesize > mtd->size) { 120 if (region->numblocks * region->erasesize > mtd->size) {
121 region->numblocks = (mtd->size / region->erasesize); 121 region->numblocks = ((unsigned long)mtd->size /
122 region->erasesize);
122 done = 1; 123 done = 1;
123 } else { 124 } else {
124 region->numblocks = 0; 125 region->numblocks = 0;
@@ -187,8 +188,9 @@ scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
187 return -ENODEV; 188 return -ENODEV;
188 } 189 }
189 190
190 printk(KERN_NOTICE MODNAME ": chip size 0x%x at offset 0x%x\n", 191 printk(KERN_NOTICE MODNAME ": chip size 0x%llx at offset 0x%llx\n",
191 scb2_mtd->size, SCB2_WINDOW - scb2_mtd->size); 192 (unsigned long long)scb2_mtd->size,
193 (unsigned long long)(SCB2_WINDOW - scb2_mtd->size));
192 194
193 add_mtd_device(scb2_mtd); 195 add_mtd_device(scb2_mtd);
194 196
diff --git a/drivers/mtd/maps/sharpsl-flash.c b/drivers/mtd/maps/sharpsl-flash.c
index 026eab028189..b392f096c706 100644
--- a/drivers/mtd/maps/sharpsl-flash.c
+++ b/drivers/mtd/maps/sharpsl-flash.c
@@ -47,7 +47,7 @@ static struct mtd_partition sharpsl_partitions[1] = {
47 } 47 }
48}; 48};
49 49
50int __init init_sharpsl(void) 50static int __init init_sharpsl(void)
51{ 51{
52 struct mtd_partition *parts; 52 struct mtd_partition *parts;
53 int nb_parts = 0; 53 int nb_parts = 0;
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
index a5d3d8531faa..60146984f4be 100644
--- a/drivers/mtd/maps/tqm8xxl.c
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -109,7 +109,7 @@ static struct mtd_partition tqm8xxl_fs_partitions[] = {
109}; 109};
110#endif 110#endif
111 111
112int __init init_tqm_mtd(void) 112static int __init init_tqm_mtd(void)
113{ 113{
114 int idx = 0, ret = 0; 114 int idx = 0, ret = 0;
115 unsigned long flash_addr, flash_size, mtd_size = 0; 115 unsigned long flash_addr, flash_size, mtd_size = 0;
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 0dc645f8152f..81756e397711 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -51,7 +51,7 @@ int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
51 51
52/****************************************************************************/ 52/****************************************************************************/
53 53
54int __init uclinux_mtd_init(void) 54static int __init uclinux_mtd_init(void)
55{ 55{
56 struct mtd_info *mtd; 56 struct mtd_info *mtd;
57 struct map_info *mapp; 57 struct map_info *mapp;
@@ -94,7 +94,7 @@ int __init uclinux_mtd_init(void)
94 94
95/****************************************************************************/ 95/****************************************************************************/
96 96
97void __exit uclinux_mtd_cleanup(void) 97static void __exit uclinux_mtd_cleanup(void)
98{ 98{
99 if (uclinux_ram_mtdinfo) { 99 if (uclinux_ram_mtdinfo) {
100 del_mtd_partitions(uclinux_ram_mtdinfo); 100 del_mtd_partitions(uclinux_ram_mtdinfo);
diff --git a/drivers/mtd/maps/vmax301.c b/drivers/mtd/maps/vmax301.c
index 5a0c9a353b0f..6d452dcdfe34 100644
--- a/drivers/mtd/maps/vmax301.c
+++ b/drivers/mtd/maps/vmax301.c
@@ -146,7 +146,7 @@ static void __exit cleanup_vmax301(void)
146 iounmap((void *)vmax_map[0].map_priv_1 - WINDOW_START); 146 iounmap((void *)vmax_map[0].map_priv_1 - WINDOW_START);
147} 147}
148 148
149int __init init_vmax301(void) 149static int __init init_vmax301(void)
150{ 150{
151 int i; 151 int i;
152 unsigned long iomapadr; 152 unsigned long iomapadr;
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index 413b0cf9bbd2..933a2b6598b4 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -74,7 +74,7 @@ do { \
74 } \ 74 } \
75} while (0); 75} while (0);
76 76
77int __init init_sbc82xx_flash(void) 77static int __init init_sbc82xx_flash(void)
78{ 78{
79 volatile memctl_cpm2_t *mc = &cpm2_immr->im_memctl; 79 volatile memctl_cpm2_t *mc = &cpm2_immr->im_memctl;
80 int bigflash; 80 int bigflash;
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index bcffeda2df3d..e9ec59e9a566 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -450,16 +450,20 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
450 if (!erase) 450 if (!erase)
451 ret = -ENOMEM; 451 ret = -ENOMEM;
452 else { 452 else {
453 struct erase_info_user einfo;
454
453 wait_queue_head_t waitq; 455 wait_queue_head_t waitq;
454 DECLARE_WAITQUEUE(wait, current); 456 DECLARE_WAITQUEUE(wait, current);
455 457
456 init_waitqueue_head(&waitq); 458 init_waitqueue_head(&waitq);
457 459
458 if (copy_from_user(&erase->addr, argp, 460 if (copy_from_user(&einfo, argp,
459 sizeof(struct erase_info_user))) { 461 sizeof(struct erase_info_user))) {
460 kfree(erase); 462 kfree(erase);
461 return -EFAULT; 463 return -EFAULT;
462 } 464 }
465 erase->addr = einfo.start;
466 erase->len = einfo.length;
463 erase->mtd = mtd; 467 erase->mtd = mtd;
464 erase->callback = mtdchar_erase_callback; 468 erase->callback = mtdchar_erase_callback;
465 erase->priv = (unsigned long)&waitq; 469 erase->priv = (unsigned long)&waitq;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 1a05cf37851e..3dbb1b38db66 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -197,7 +197,7 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
197 continue; 197 continue;
198 } 198 }
199 199
200 size = min(total_len, (size_t)(subdev->size - to)); 200 size = min_t(uint64_t, total_len, subdev->size - to);
201 wsize = size; /* store for future use */ 201 wsize = size; /* store for future use */
202 202
203 entry_high = entry_low; 203 entry_high = entry_low;
@@ -385,7 +385,7 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
385 struct mtd_concat *concat = CONCAT(mtd); 385 struct mtd_concat *concat = CONCAT(mtd);
386 struct mtd_info *subdev; 386 struct mtd_info *subdev;
387 int i, err; 387 int i, err;
388 u_int32_t length, offset = 0; 388 uint64_t length, offset = 0;
389 struct erase_info *erase; 389 struct erase_info *erase;
390 390
391 if (!(mtd->flags & MTD_WRITEABLE)) 391 if (!(mtd->flags & MTD_WRITEABLE))
@@ -518,7 +518,7 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
518 return 0; 518 return 0;
519} 519}
520 520
521static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 521static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
522{ 522{
523 struct mtd_concat *concat = CONCAT(mtd); 523 struct mtd_concat *concat = CONCAT(mtd);
524 int i, err = -EINVAL; 524 int i, err = -EINVAL;
@@ -528,7 +528,7 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
528 528
529 for (i = 0; i < concat->num_subdev; i++) { 529 for (i = 0; i < concat->num_subdev; i++) {
530 struct mtd_info *subdev = concat->subdev[i]; 530 struct mtd_info *subdev = concat->subdev[i];
531 size_t size; 531 uint64_t size;
532 532
533 if (ofs >= subdev->size) { 533 if (ofs >= subdev->size) {
534 size = 0; 534 size = 0;
@@ -556,7 +556,7 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
556 return err; 556 return err;
557} 557}
558 558
559static int concat_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 559static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
560{ 560{
561 struct mtd_concat *concat = CONCAT(mtd); 561 struct mtd_concat *concat = CONCAT(mtd);
562 int i, err = 0; 562 int i, err = 0;
@@ -566,7 +566,7 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
566 566
567 for (i = 0; i < concat->num_subdev; i++) { 567 for (i = 0; i < concat->num_subdev; i++) {
568 struct mtd_info *subdev = concat->subdev[i]; 568 struct mtd_info *subdev = concat->subdev[i];
569 size_t size; 569 uint64_t size;
570 570
571 if (ofs >= subdev->size) { 571 if (ofs >= subdev->size) {
572 size = 0; 572 size = 0;
@@ -696,7 +696,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
696 int i; 696 int i;
697 size_t size; 697 size_t size;
698 struct mtd_concat *concat; 698 struct mtd_concat *concat;
699 u_int32_t max_erasesize, curr_erasesize; 699 uint32_t max_erasesize, curr_erasesize;
700 int num_erase_region; 700 int num_erase_region;
701 701
702 printk(KERN_NOTICE "Concatenating MTD devices:\n"); 702 printk(KERN_NOTICE "Concatenating MTD devices:\n");
@@ -842,12 +842,14 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
842 concat->mtd.erasesize = curr_erasesize; 842 concat->mtd.erasesize = curr_erasesize;
843 concat->mtd.numeraseregions = 0; 843 concat->mtd.numeraseregions = 0;
844 } else { 844 } else {
845 uint64_t tmp64;
846
845 /* 847 /*
846 * erase block size varies across the subdevices: allocate 848 * erase block size varies across the subdevices: allocate
847 * space to store the data describing the variable erase regions 849 * space to store the data describing the variable erase regions
848 */ 850 */
849 struct mtd_erase_region_info *erase_region_p; 851 struct mtd_erase_region_info *erase_region_p;
850 u_int32_t begin, position; 852 uint64_t begin, position;
851 853
852 concat->mtd.erasesize = max_erasesize; 854 concat->mtd.erasesize = max_erasesize;
853 concat->mtd.numeraseregions = num_erase_region; 855 concat->mtd.numeraseregions = num_erase_region;
@@ -879,8 +881,9 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
879 erase_region_p->offset = begin; 881 erase_region_p->offset = begin;
880 erase_region_p->erasesize = 882 erase_region_p->erasesize =
881 curr_erasesize; 883 curr_erasesize;
882 erase_region_p->numblocks = 884 tmp64 = position - begin;
883 (position - begin) / curr_erasesize; 885 do_div(tmp64, curr_erasesize);
886 erase_region_p->numblocks = tmp64;
884 begin = position; 887 begin = position;
885 888
886 curr_erasesize = subdev[i]->erasesize; 889 curr_erasesize = subdev[i]->erasesize;
@@ -897,9 +900,9 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
897 erase_region_p->offset = begin; 900 erase_region_p->offset = begin;
898 erase_region_p->erasesize = 901 erase_region_p->erasesize =
899 curr_erasesize; 902 curr_erasesize;
900 erase_region_p->numblocks = 903 tmp64 = position - begin;
901 (position - 904 do_div(tmp64, curr_erasesize);
902 begin) / curr_erasesize; 905 erase_region_p->numblocks = tmp64;
903 begin = position; 906 begin = position;
904 907
905 curr_erasesize = 908 curr_erasesize =
@@ -909,14 +912,16 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
909 } 912 }
910 position += 913 position +=
911 subdev[i]->eraseregions[j]. 914 subdev[i]->eraseregions[j].
912 numblocks * curr_erasesize; 915 numblocks * (uint64_t)curr_erasesize;
913 } 916 }
914 } 917 }
915 } 918 }
916 /* Now write the final entry */ 919 /* Now write the final entry */
917 erase_region_p->offset = begin; 920 erase_region_p->offset = begin;
918 erase_region_p->erasesize = curr_erasesize; 921 erase_region_p->erasesize = curr_erasesize;
919 erase_region_p->numblocks = (position - begin) / curr_erasesize; 922 tmp64 = position - begin;
923 do_div(tmp64, curr_erasesize);
924 erase_region_p->numblocks = tmp64;
920 } 925 }
921 926
922 return &concat->mtd; 927 return &concat->mtd;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index a9d246949820..76fe0a1e7a5e 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -57,6 +57,19 @@ int add_mtd_device(struct mtd_info *mtd)
57 mtd->index = i; 57 mtd->index = i;
58 mtd->usecount = 0; 58 mtd->usecount = 0;
59 59
60 if (is_power_of_2(mtd->erasesize))
61 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
62 else
63 mtd->erasesize_shift = 0;
64
65 if (is_power_of_2(mtd->writesize))
66 mtd->writesize_shift = ffs(mtd->writesize) - 1;
67 else
68 mtd->writesize_shift = 0;
69
70 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
71 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
72
60 /* Some chips always power up locked. Unlock them now */ 73 /* Some chips always power up locked. Unlock them now */
61 if ((mtd->flags & MTD_WRITEABLE) 74 if ((mtd->flags & MTD_WRITEABLE)
62 && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) { 75 && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
@@ -344,7 +357,8 @@ static inline int mtd_proc_info (char *buf, int i)
344 if (!this) 357 if (!this)
345 return 0; 358 return 0;
346 359
347 return sprintf(buf, "mtd%d: %8.8x %8.8x \"%s\"\n", i, this->size, 360 return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", i,
361 (unsigned long long)this->size,
348 this->erasesize, this->name); 362 this->erasesize, this->name);
349} 363}
350 364
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index aebb3b27edbd..1a6b3beabe8d 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -80,9 +80,9 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
80 if (ret) { 80 if (ret) {
81 set_current_state(TASK_RUNNING); 81 set_current_state(TASK_RUNNING);
82 remove_wait_queue(&wait_q, &wait); 82 remove_wait_queue(&wait_q, &wait);
83 printk (KERN_WARNING "mtdoops: erase of region [0x%x, 0x%x] " 83 printk (KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] "
84 "on \"%s\" failed\n", 84 "on \"%s\" failed\n",
85 erase.addr, erase.len, mtd->name); 85 (unsigned long long)erase.addr, (unsigned long long)erase.len, mtd->name);
86 return ret; 86 return ret;
87 } 87 }
88 88
@@ -289,7 +289,10 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
289 } 289 }
290 290
291 cxt->mtd = mtd; 291 cxt->mtd = mtd;
292 cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE; 292 if (mtd->size > INT_MAX)
293 cxt->oops_pages = INT_MAX / OOPS_PAGE_SIZE;
294 else
295 cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE;
293 296
294 find_next_position(cxt); 297 find_next_position(cxt);
295 298
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 3728913fa5fa..144e6b613a77 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -26,7 +26,7 @@ static LIST_HEAD(mtd_partitions);
26struct mtd_part { 26struct mtd_part {
27 struct mtd_info mtd; 27 struct mtd_info mtd;
28 struct mtd_info *master; 28 struct mtd_info *master;
29 u_int32_t offset; 29 uint64_t offset;
30 int index; 30 int index;
31 struct list_head list; 31 struct list_head list;
32 int registered; 32 int registered;
@@ -235,7 +235,7 @@ void mtd_erase_callback(struct erase_info *instr)
235} 235}
236EXPORT_SYMBOL_GPL(mtd_erase_callback); 236EXPORT_SYMBOL_GPL(mtd_erase_callback);
237 237
238static int part_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 238static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
239{ 239{
240 struct mtd_part *part = PART(mtd); 240 struct mtd_part *part = PART(mtd);
241 if ((len + ofs) > mtd->size) 241 if ((len + ofs) > mtd->size)
@@ -243,7 +243,7 @@ static int part_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
243 return part->master->lock(part->master, ofs + part->offset, len); 243 return part->master->lock(part->master, ofs + part->offset, len);
244} 244}
245 245
246static int part_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 246static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
247{ 247{
248 struct mtd_part *part = PART(mtd); 248 struct mtd_part *part = PART(mtd);
249 if ((len + ofs) > mtd->size) 249 if ((len + ofs) > mtd->size)
@@ -317,7 +317,7 @@ EXPORT_SYMBOL(del_mtd_partitions);
317 317
318static struct mtd_part *add_one_partition(struct mtd_info *master, 318static struct mtd_part *add_one_partition(struct mtd_info *master,
319 const struct mtd_partition *part, int partno, 319 const struct mtd_partition *part, int partno,
320 u_int32_t cur_offset) 320 uint64_t cur_offset)
321{ 321{
322 struct mtd_part *slave; 322 struct mtd_part *slave;
323 323
@@ -395,19 +395,19 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
395 slave->offset = cur_offset; 395 slave->offset = cur_offset;
396 if (slave->offset == MTDPART_OFS_NXTBLK) { 396 if (slave->offset == MTDPART_OFS_NXTBLK) {
397 slave->offset = cur_offset; 397 slave->offset = cur_offset;
398 if ((cur_offset % master->erasesize) != 0) { 398 if (mtd_mod_by_eb(cur_offset, master) != 0) {
399 /* Round up to next erasesize */ 399 /* Round up to next erasesize */
400 slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize; 400 slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
401 printk(KERN_NOTICE "Moving partition %d: " 401 printk(KERN_NOTICE "Moving partition %d: "
402 "0x%08x -> 0x%08x\n", partno, 402 "0x%012llx -> 0x%012llx\n", partno,
403 cur_offset, slave->offset); 403 (unsigned long long)cur_offset, (unsigned long long)slave->offset);
404 } 404 }
405 } 405 }
406 if (slave->mtd.size == MTDPART_SIZ_FULL) 406 if (slave->mtd.size == MTDPART_SIZ_FULL)
407 slave->mtd.size = master->size - slave->offset; 407 slave->mtd.size = master->size - slave->offset;
408 408
409 printk(KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset, 409 printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
410 slave->offset + slave->mtd.size, slave->mtd.name); 410 (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
411 411
412 /* let's do some sanity checks */ 412 /* let's do some sanity checks */
413 if (slave->offset >= master->size) { 413 if (slave->offset >= master->size) {
@@ -420,13 +420,13 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
420 } 420 }
421 if (slave->offset + slave->mtd.size > master->size) { 421 if (slave->offset + slave->mtd.size > master->size) {
422 slave->mtd.size = master->size - slave->offset; 422 slave->mtd.size = master->size - slave->offset;
423 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n", 423 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
424 part->name, master->name, slave->mtd.size); 424 part->name, master->name, (unsigned long long)slave->mtd.size);
425 } 425 }
426 if (master->numeraseregions > 1) { 426 if (master->numeraseregions > 1) {
427 /* Deal with variable erase size stuff */ 427 /* Deal with variable erase size stuff */
428 int i, max = master->numeraseregions; 428 int i, max = master->numeraseregions;
429 u32 end = slave->offset + slave->mtd.size; 429 u64 end = slave->offset + slave->mtd.size;
430 struct mtd_erase_region_info *regions = master->eraseregions; 430 struct mtd_erase_region_info *regions = master->eraseregions;
431 431
432 /* Find the first erase regions which is part of this 432 /* Find the first erase regions which is part of this
@@ -449,7 +449,7 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
449 } 449 }
450 450
451 if ((slave->mtd.flags & MTD_WRITEABLE) && 451 if ((slave->mtd.flags & MTD_WRITEABLE) &&
452 (slave->offset % slave->mtd.erasesize)) { 452 mtd_mod_by_eb(slave->offset, &slave->mtd)) {
453 /* Doesn't start on a boundary of major erase size */ 453 /* Doesn't start on a boundary of major erase size */
454 /* FIXME: Let it be writable if it is on a boundary of 454 /* FIXME: Let it be writable if it is on a boundary of
455 * _minor_ erase size though */ 455 * _minor_ erase size though */
@@ -458,7 +458,7 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
458 part->name); 458 part->name);
459 } 459 }
460 if ((slave->mtd.flags & MTD_WRITEABLE) && 460 if ((slave->mtd.flags & MTD_WRITEABLE) &&
461 (slave->mtd.size % slave->mtd.erasesize)) { 461 mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
462 slave->mtd.flags &= ~MTD_WRITEABLE; 462 slave->mtd.flags &= ~MTD_WRITEABLE;
463 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", 463 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
464 part->name); 464 part->name);
@@ -466,7 +466,7 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
466 466
467 slave->mtd.ecclayout = master->ecclayout; 467 slave->mtd.ecclayout = master->ecclayout;
468 if (master->block_isbad) { 468 if (master->block_isbad) {
469 uint32_t offs = 0; 469 uint64_t offs = 0;
470 470
471 while (offs < slave->mtd.size) { 471 while (offs < slave->mtd.size) {
472 if (master->block_isbad(master, 472 if (master->block_isbad(master,
@@ -501,7 +501,7 @@ int add_mtd_partitions(struct mtd_info *master,
501 int nbparts) 501 int nbparts)
502{ 502{
503 struct mtd_part *slave; 503 struct mtd_part *slave;
504 u_int32_t cur_offset = 0; 504 uint64_t cur_offset = 0;
505 int i; 505 int i;
506 506
507 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); 507 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index f8ae0400c49c..8b12e6e109d3 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -163,6 +163,13 @@ config MTD_NAND_S3C2410_HWECC
163 incorrect ECC generation, and if using these, the default of 163 incorrect ECC generation, and if using these, the default of
164 software ECC is preferable. 164 software ECC is preferable.
165 165
166config MTD_NAND_NDFC
167 tristate "NDFC NanD Flash Controller"
168 depends on 4xx
169 select MTD_NAND_ECC_SMC
170 help
171 NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
172
166config MTD_NAND_S3C2410_CLKSTOP 173config MTD_NAND_S3C2410_CLKSTOP
167 bool "S3C2410 NAND IDLE clock stop" 174 bool "S3C2410 NAND IDLE clock stop"
168 depends on MTD_NAND_S3C2410 175 depends on MTD_NAND_S3C2410
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 962380394855..6d9649159a18 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -676,11 +676,11 @@ static int alauda_probe(struct usb_interface *interface,
676 goto error; 676 goto error;
677 677
678 al->write_out = usb_sndbulkpipe(al->dev, 678 al->write_out = usb_sndbulkpipe(al->dev,
679 ep_wr->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 679 usb_endpoint_num(ep_wr));
680 al->bulk_in = usb_rcvbulkpipe(al->dev, 680 al->bulk_in = usb_rcvbulkpipe(al->dev,
681 ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 681 usb_endpoint_num(ep_in));
682 al->bulk_out = usb_sndbulkpipe(al->dev, 682 al->bulk_out = usb_sndbulkpipe(al->dev,
683 ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 683 usb_endpoint_num(ep_out));
684 684
685 /* second device is identical up to now */ 685 /* second device is identical up to now */
686 memcpy(al+1, al, sizeof(*al)); 686 memcpy(al+1, al, sizeof(*al));
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index b8064bf3aee4..22a6b2e50e91 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -90,7 +90,7 @@ static int timing[3];
90module_param_array(timing, int, &numtimings, 0644); 90module_param_array(timing, int, &numtimings, 0644);
91 91
92#ifdef CONFIG_MTD_PARTITIONS 92#ifdef CONFIG_MTD_PARTITIONS
93static const char *part_probes[] = { "RedBoot", NULL }; 93static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
94#endif 94#endif
95 95
96/* Hrm. Why isn't this already conditional on something in the struct device? */ 96/* Hrm. Why isn't this already conditional on something in the struct device? */
@@ -805,10 +805,13 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
805 add_mtd_device(mtd); 805 add_mtd_device(mtd);
806 806
807#ifdef CONFIG_MTD_PARTITIONS 807#ifdef CONFIG_MTD_PARTITIONS
808#ifdef CONFIG_MTD_CMDLINE_PARTS
809 mtd->name = "cafe_nand";
810#endif
808 nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0); 811 nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
809 if (nr_parts > 0) { 812 if (nr_parts > 0) {
810 cafe->parts = parts; 813 cafe->parts = parts;
811 dev_info(&cafe->pdev->dev, "%d RedBoot partitions found\n", nr_parts); 814 dev_info(&cafe->pdev->dev, "%d partitions found\n", nr_parts);
812 add_mtd_partitions(mtd, parts, nr_parts); 815 add_mtd_partitions(mtd, parts, nr_parts);
813 } 816 }
814#endif 817#endif
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 4aa5bd6158da..65929db29446 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -777,7 +777,9 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
777 /* Fill in fsl_elbc_mtd structure */ 777 /* Fill in fsl_elbc_mtd structure */
778 priv->mtd.priv = chip; 778 priv->mtd.priv = chip;
779 priv->mtd.owner = THIS_MODULE; 779 priv->mtd.owner = THIS_MODULE;
780 priv->fmr = 0; /* rest filled in later */ 780
781 /* Set the ECCM according to the settings in bootloader.*/
782 priv->fmr = in_be32(&lbc->fmr) & FMR_ECCM;
781 783
782 /* fill in nand_chip structure */ 784 /* fill in nand_chip structure */
783 /* set up function call table */ 785 /* set up function call table */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 0a9c9cd33f96..0c3afccde8a2 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2014,13 +2014,14 @@ static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
2014int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, 2014int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2015 int allowbbt) 2015 int allowbbt)
2016{ 2016{
2017 int page, len, status, pages_per_block, ret, chipnr; 2017 int page, status, pages_per_block, ret, chipnr;
2018 struct nand_chip *chip = mtd->priv; 2018 struct nand_chip *chip = mtd->priv;
2019 int rewrite_bbt[NAND_MAX_CHIPS]={0}; 2019 loff_t rewrite_bbt[NAND_MAX_CHIPS]={0};
2020 unsigned int bbt_masked_page = 0xffffffff; 2020 unsigned int bbt_masked_page = 0xffffffff;
2021 loff_t len;
2021 2022
2022 DEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%08x, len = %i\n", 2023 DEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%012llx, len = %llu\n",
2023 (unsigned int)instr->addr, (unsigned int)instr->len); 2024 (unsigned long long)instr->addr, (unsigned long long)instr->len);
2024 2025
2025 /* Start address must align on block boundary */ 2026 /* Start address must align on block boundary */
2026 if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) { 2027 if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) {
@@ -2116,7 +2117,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2116 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: " 2117 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: "
2117 "Failed erase, page 0x%08x\n", page); 2118 "Failed erase, page 0x%08x\n", page);
2118 instr->state = MTD_ERASE_FAILED; 2119 instr->state = MTD_ERASE_FAILED;
2119 instr->fail_addr = (page << chip->page_shift); 2120 instr->fail_addr =
2121 ((loff_t)page << chip->page_shift);
2120 goto erase_exit; 2122 goto erase_exit;
2121 } 2123 }
2122 2124
@@ -2126,7 +2128,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2126 */ 2128 */
2127 if (bbt_masked_page != 0xffffffff && 2129 if (bbt_masked_page != 0xffffffff &&
2128 (page & BBT_PAGE_MASK) == bbt_masked_page) 2130 (page & BBT_PAGE_MASK) == bbt_masked_page)
2129 rewrite_bbt[chipnr] = (page << chip->page_shift); 2131 rewrite_bbt[chipnr] =
2132 ((loff_t)page << chip->page_shift);
2130 2133
2131 /* Increment page address and decrement length */ 2134 /* Increment page address and decrement length */
2132 len -= (1 << chip->phys_erase_shift); 2135 len -= (1 << chip->phys_erase_shift);
@@ -2173,7 +2176,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2173 continue; 2176 continue;
2174 /* update the BBT for chip */ 2177 /* update the BBT for chip */
2175 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase_nand: nand_update_bbt " 2178 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase_nand: nand_update_bbt "
2176 "(%d:0x%0x 0x%0x)\n", chipnr, rewrite_bbt[chipnr], 2179 "(%d:0x%0llx 0x%0x)\n", chipnr, rewrite_bbt[chipnr],
2177 chip->bbt_td->pages[chipnr]); 2180 chip->bbt_td->pages[chipnr]);
2178 nand_update_bbt(mtd, rewrite_bbt[chipnr]); 2181 nand_update_bbt(mtd, rewrite_bbt[chipnr]);
2179 } 2182 }
@@ -2365,7 +2368,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2365 if (!mtd->name) 2368 if (!mtd->name)
2366 mtd->name = type->name; 2369 mtd->name = type->name;
2367 2370
2368 chip->chipsize = type->chipsize << 20; 2371 chip->chipsize = (uint64_t)type->chipsize << 20;
2369 2372
2370 /* Newer devices have all the information in additional id bytes */ 2373 /* Newer devices have all the information in additional id bytes */
2371 if (!type->pagesize) { 2374 if (!type->pagesize) {
@@ -2423,7 +2426,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2423 2426
2424 chip->bbt_erase_shift = chip->phys_erase_shift = 2427 chip->bbt_erase_shift = chip->phys_erase_shift =
2425 ffs(mtd->erasesize) - 1; 2428 ffs(mtd->erasesize) - 1;
2426 chip->chip_shift = ffs(chip->chipsize) - 1; 2429 if (chip->chipsize & 0xffffffff)
2430 chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
2431 else
2432 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1;
2427 2433
2428 /* Set the bad block position */ 2434 /* Set the bad block position */
2429 chip->badblockpos = mtd->writesize > 512 ? 2435 chip->badblockpos = mtd->writesize > 512 ?
@@ -2517,7 +2523,6 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips)
2517/** 2523/**
2518 * nand_scan_tail - [NAND Interface] Scan for the NAND device 2524 * nand_scan_tail - [NAND Interface] Scan for the NAND device
2519 * @mtd: MTD device structure 2525 * @mtd: MTD device structure
2520 * @maxchips: Number of chips to scan for
2521 * 2526 *
2522 * This is the second phase of the normal nand_scan() function. It 2527 * This is the second phase of the normal nand_scan() function. It
2523 * fills out all the uninitialized function pointers with the defaults 2528 * fills out all the uninitialized function pointers with the defaults
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 0b1c48595f12..55c23e5cd210 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -171,16 +171,16 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
171 if (tmp == msk) 171 if (tmp == msk)
172 continue; 172 continue;
173 if (reserved_block_code && (tmp == reserved_block_code)) { 173 if (reserved_block_code && (tmp == reserved_block_code)) {
174 printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%08x\n", 174 printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%012llx\n",
175 ((offs << 2) + (act >> 1)) << this->bbt_erase_shift); 175 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
176 this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06); 176 this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
177 mtd->ecc_stats.bbtblocks++; 177 mtd->ecc_stats.bbtblocks++;
178 continue; 178 continue;
179 } 179 }
180 /* Leave it for now, if its matured we can move this 180 /* Leave it for now, if its matured we can move this
181 * message to MTD_DEBUG_LEVEL0 */ 181 * message to MTD_DEBUG_LEVEL0 */
182 printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%08x\n", 182 printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%012llx\n",
183 ((offs << 2) + (act >> 1)) << this->bbt_erase_shift); 183 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
184 /* Factory marked bad or worn out ? */ 184 /* Factory marked bad or worn out ? */
185 if (tmp == 0) 185 if (tmp == 0)
186 this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06); 186 this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
@@ -284,7 +284,7 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
284 284
285 /* Read the primary version, if available */ 285 /* Read the primary version, if available */
286 if (td->options & NAND_BBT_VERSION) { 286 if (td->options & NAND_BBT_VERSION) {
287 scan_read_raw(mtd, buf, td->pages[0] << this->page_shift, 287 scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
288 mtd->writesize); 288 mtd->writesize);
289 td->version[0] = buf[mtd->writesize + td->veroffs]; 289 td->version[0] = buf[mtd->writesize + td->veroffs];
290 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", 290 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
@@ -293,7 +293,7 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
293 293
294 /* Read the mirror version, if available */ 294 /* Read the mirror version, if available */
295 if (md && (md->options & NAND_BBT_VERSION)) { 295 if (md && (md->options & NAND_BBT_VERSION)) {
296 scan_read_raw(mtd, buf, md->pages[0] << this->page_shift, 296 scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
297 mtd->writesize); 297 mtd->writesize);
298 md->version[0] = buf[mtd->writesize + md->veroffs]; 298 md->version[0] = buf[mtd->writesize + md->veroffs];
299 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", 299 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
@@ -411,7 +411,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
411 numblocks = this->chipsize >> (this->bbt_erase_shift - 1); 411 numblocks = this->chipsize >> (this->bbt_erase_shift - 1);
412 startblock = chip * numblocks; 412 startblock = chip * numblocks;
413 numblocks += startblock; 413 numblocks += startblock;
414 from = startblock << (this->bbt_erase_shift - 1); 414 from = (loff_t)startblock << (this->bbt_erase_shift - 1);
415 } 415 }
416 416
417 for (i = startblock; i < numblocks;) { 417 for (i = startblock; i < numblocks;) {
@@ -428,8 +428,8 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
428 428
429 if (ret) { 429 if (ret) {
430 this->bbt[i >> 3] |= 0x03 << (i & 0x6); 430 this->bbt[i >> 3] |= 0x03 << (i & 0x6);
431 printk(KERN_WARNING "Bad eraseblock %d at 0x%08x\n", 431 printk(KERN_WARNING "Bad eraseblock %d at 0x%012llx\n",
432 i >> 1, (unsigned int)from); 432 i >> 1, (unsigned long long)from);
433 mtd->ecc_stats.badblocks++; 433 mtd->ecc_stats.badblocks++;
434 } 434 }
435 435
@@ -495,7 +495,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
495 for (block = 0; block < td->maxblocks; block++) { 495 for (block = 0; block < td->maxblocks; block++) {
496 496
497 int actblock = startblock + dir * block; 497 int actblock = startblock + dir * block;
498 loff_t offs = actblock << this->bbt_erase_shift; 498 loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
499 499
500 /* Read first page */ 500 /* Read first page */
501 scan_read_raw(mtd, buf, offs, mtd->writesize); 501 scan_read_raw(mtd, buf, offs, mtd->writesize);
@@ -719,7 +719,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
719 719
720 memset(&einfo, 0, sizeof(einfo)); 720 memset(&einfo, 0, sizeof(einfo));
721 einfo.mtd = mtd; 721 einfo.mtd = mtd;
722 einfo.addr = (unsigned long)to; 722 einfo.addr = to;
723 einfo.len = 1 << this->bbt_erase_shift; 723 einfo.len = 1 << this->bbt_erase_shift;
724 res = nand_erase_nand(mtd, &einfo, 1); 724 res = nand_erase_nand(mtd, &einfo, 1);
725 if (res < 0) 725 if (res < 0)
@@ -729,8 +729,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
729 if (res < 0) 729 if (res < 0)
730 goto outerr; 730 goto outerr;
731 731
732 printk(KERN_DEBUG "Bad block table written to 0x%08x, version " 732 printk(KERN_DEBUG "Bad block table written to 0x%012llx, version "
733 "0x%02X\n", (unsigned int)to, td->version[chip]); 733 "0x%02X\n", (unsigned long long)to, td->version[chip]);
734 734
735 /* Mark it as used */ 735 /* Mark it as used */
736 td->pages[chip] = page; 736 td->pages[chip] = page;
@@ -910,7 +910,7 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
910 newval = oldval | (0x2 << (block & 0x06)); 910 newval = oldval | (0x2 << (block & 0x06));
911 this->bbt[(block >> 3)] = newval; 911 this->bbt[(block >> 3)] = newval;
912 if ((oldval != newval) && td->reserved_block_code) 912 if ((oldval != newval) && td->reserved_block_code)
913 nand_update_bbt(mtd, block << (this->bbt_erase_shift - 1)); 913 nand_update_bbt(mtd, (loff_t)block << (this->bbt_erase_shift - 1));
914 continue; 914 continue;
915 } 915 }
916 update = 0; 916 update = 0;
@@ -931,7 +931,7 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
931 new ones have been marked, then we need to update the stored 931 new ones have been marked, then we need to update the stored
932 bbts. This should only happen once. */ 932 bbts. This should only happen once. */
933 if (update && td->reserved_block_code) 933 if (update && td->reserved_block_code)
934 nand_update_bbt(mtd, (block - 2) << (this->bbt_erase_shift - 1)); 934 nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1));
935 } 935 }
936} 936}
937 937
@@ -1027,7 +1027,6 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
1027 if (!this->bbt || !td) 1027 if (!this->bbt || !td)
1028 return -EINVAL; 1028 return -EINVAL;
1029 1029
1030 len = mtd->size >> (this->bbt_erase_shift + 2);
1031 /* Allocate a temporary buffer for one eraseblock incl. oob */ 1030 /* Allocate a temporary buffer for one eraseblock incl. oob */
1032 len = (1 << this->bbt_erase_shift); 1031 len = (1 << this->bbt_erase_shift);
1033 len += (len >> this->page_shift) * mtd->oobsize; 1032 len += (len >> this->page_shift) * mtd->oobsize;
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index ae7c57781a68..cd0711b83ac4 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -38,6 +38,9 @@
38#include <linux/delay.h> 38#include <linux/delay.h>
39#include <linux/list.h> 39#include <linux/list.h>
40#include <linux/random.h> 40#include <linux/random.h>
41#include <linux/sched.h>
42#include <linux/fs.h>
43#include <linux/pagemap.h>
41 44
42/* Default simulator parameters values */ 45/* Default simulator parameters values */
43#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \ 46#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
@@ -100,6 +103,7 @@ static unsigned int bitflips = 0;
100static char *gravepages = NULL; 103static char *gravepages = NULL;
101static unsigned int rptwear = 0; 104static unsigned int rptwear = 0;
102static unsigned int overridesize = 0; 105static unsigned int overridesize = 0;
106static char *cache_file = NULL;
103 107
104module_param(first_id_byte, uint, 0400); 108module_param(first_id_byte, uint, 0400);
105module_param(second_id_byte, uint, 0400); 109module_param(second_id_byte, uint, 0400);
@@ -122,12 +126,13 @@ module_param(bitflips, uint, 0400);
122module_param(gravepages, charp, 0400); 126module_param(gravepages, charp, 0400);
123module_param(rptwear, uint, 0400); 127module_param(rptwear, uint, 0400);
124module_param(overridesize, uint, 0400); 128module_param(overridesize, uint, 0400);
129module_param(cache_file, charp, 0400);
125 130
126MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)"); 131MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)");
127MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)"); 132MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)");
128MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command"); 133MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command");
129MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command"); 134MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command");
130MODULE_PARM_DESC(access_delay, "Initial page access delay (microiseconds)"); 135MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
131MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds"); 136MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
132MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)"); 137MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
133MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)"); 138MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)");
@@ -153,6 +158,7 @@ MODULE_PARM_DESC(rptwear, "Number of erases inbetween reporting wear, if
153MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. " 158MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
154 "The size is specified in erase blocks and as the exponent of a power of two" 159 "The size is specified in erase blocks and as the exponent of a power of two"
155 " e.g. 5 means a size of 32 erase blocks"); 160 " e.g. 5 means a size of 32 erase blocks");
161MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
156 162
157/* The largest possible page size */ 163/* The largest possible page size */
158#define NS_LARGEST_PAGE_SIZE 2048 164#define NS_LARGEST_PAGE_SIZE 2048
@@ -266,6 +272,9 @@ MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the I
266 */ 272 */
267#define NS_MAX_PREVSTATES 1 273#define NS_MAX_PREVSTATES 1
268 274
275/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
276#define NS_MAX_HELD_PAGES 16
277
269/* 278/*
270 * A union to represent flash memory contents and flash buffer. 279 * A union to represent flash memory contents and flash buffer.
271 */ 280 */
@@ -295,6 +304,9 @@ struct nandsim {
295 /* The simulated NAND flash pages array */ 304 /* The simulated NAND flash pages array */
296 union ns_mem *pages; 305 union ns_mem *pages;
297 306
307 /* Slab allocator for nand pages */
308 struct kmem_cache *nand_pages_slab;
309
298 /* Internal buffer of page + OOB size bytes */ 310 /* Internal buffer of page + OOB size bytes */
299 union ns_mem buf; 311 union ns_mem buf;
300 312
@@ -335,6 +347,13 @@ struct nandsim {
335 int ale; /* address Latch Enable */ 347 int ale; /* address Latch Enable */
336 int wp; /* write Protect */ 348 int wp; /* write Protect */
337 } lines; 349 } lines;
350
351 /* Fields needed when using a cache file */
352 struct file *cfile; /* Open file */
353 unsigned char *pages_written; /* Which pages have been written */
354 void *file_buf;
355 struct page *held_pages[NS_MAX_HELD_PAGES];
356 int held_cnt;
338}; 357};
339 358
340/* 359/*
@@ -420,25 +439,69 @@ static struct mtd_info *nsmtd;
420static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE]; 439static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE];
421 440
422/* 441/*
423 * Allocate array of page pointers and initialize the array to NULL 442 * Allocate array of page pointers, create slab allocation for an array
424 * pointers. 443 * and initialize the array by NULL pointers.
425 * 444 *
426 * RETURNS: 0 if success, -ENOMEM if memory alloc fails. 445 * RETURNS: 0 if success, -ENOMEM if memory alloc fails.
427 */ 446 */
428static int alloc_device(struct nandsim *ns) 447static int alloc_device(struct nandsim *ns)
429{ 448{
430 int i; 449 struct file *cfile;
450 int i, err;
451
452 if (cache_file) {
453 cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
454 if (IS_ERR(cfile))
455 return PTR_ERR(cfile);
456 if (!cfile->f_op || (!cfile->f_op->read && !cfile->f_op->aio_read)) {
457 NS_ERR("alloc_device: cache file not readable\n");
458 err = -EINVAL;
459 goto err_close;
460 }
461 if (!cfile->f_op->write && !cfile->f_op->aio_write) {
462 NS_ERR("alloc_device: cache file not writeable\n");
463 err = -EINVAL;
464 goto err_close;
465 }
466 ns->pages_written = vmalloc(ns->geom.pgnum);
467 if (!ns->pages_written) {
468 NS_ERR("alloc_device: unable to allocate pages written array\n");
469 err = -ENOMEM;
470 goto err_close;
471 }
472 ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
473 if (!ns->file_buf) {
474 NS_ERR("alloc_device: unable to allocate file buf\n");
475 err = -ENOMEM;
476 goto err_free;
477 }
478 ns->cfile = cfile;
479 memset(ns->pages_written, 0, ns->geom.pgnum);
480 return 0;
481 }
431 482
432 ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem)); 483 ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem));
433 if (!ns->pages) { 484 if (!ns->pages) {
434 NS_ERR("alloc_map: unable to allocate page array\n"); 485 NS_ERR("alloc_device: unable to allocate page array\n");
435 return -ENOMEM; 486 return -ENOMEM;
436 } 487 }
437 for (i = 0; i < ns->geom.pgnum; i++) { 488 for (i = 0; i < ns->geom.pgnum; i++) {
438 ns->pages[i].byte = NULL; 489 ns->pages[i].byte = NULL;
439 } 490 }
491 ns->nand_pages_slab = kmem_cache_create("nandsim",
492 ns->geom.pgszoob, 0, 0, NULL);
493 if (!ns->nand_pages_slab) {
494 NS_ERR("cache_create: unable to create kmem_cache\n");
495 return -ENOMEM;
496 }
440 497
441 return 0; 498 return 0;
499
500err_free:
501 vfree(ns->pages_written);
502err_close:
503 filp_close(cfile, NULL);
504 return err;
442} 505}
443 506
444/* 507/*
@@ -448,11 +511,20 @@ static void free_device(struct nandsim *ns)
448{ 511{
449 int i; 512 int i;
450 513
514 if (ns->cfile) {
515 kfree(ns->file_buf);
516 vfree(ns->pages_written);
517 filp_close(ns->cfile, NULL);
518 return;
519 }
520
451 if (ns->pages) { 521 if (ns->pages) {
452 for (i = 0; i < ns->geom.pgnum; i++) { 522 for (i = 0; i < ns->geom.pgnum; i++) {
453 if (ns->pages[i].byte) 523 if (ns->pages[i].byte)
454 kfree(ns->pages[i].byte); 524 kmem_cache_free(ns->nand_pages_slab,
525 ns->pages[i].byte);
455 } 526 }
527 kmem_cache_destroy(ns->nand_pages_slab);
456 vfree(ns->pages); 528 vfree(ns->pages);
457 } 529 }
458} 530}
@@ -464,7 +536,7 @@ static char *get_partition_name(int i)
464 return kstrdup(buf, GFP_KERNEL); 536 return kstrdup(buf, GFP_KERNEL);
465} 537}
466 538
467static u_int64_t divide(u_int64_t n, u_int32_t d) 539static uint64_t divide(uint64_t n, uint32_t d)
468{ 540{
469 do_div(n, d); 541 do_div(n, d);
470 return n; 542 return n;
@@ -480,8 +552,8 @@ static int init_nandsim(struct mtd_info *mtd)
480 struct nand_chip *chip = (struct nand_chip *)mtd->priv; 552 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
481 struct nandsim *ns = (struct nandsim *)(chip->priv); 553 struct nandsim *ns = (struct nandsim *)(chip->priv);
482 int i, ret = 0; 554 int i, ret = 0;
483 u_int64_t remains; 555 uint64_t remains;
484 u_int64_t next_offset; 556 uint64_t next_offset;
485 557
486 if (NS_IS_INITIALIZED(ns)) { 558 if (NS_IS_INITIALIZED(ns)) {
487 NS_ERR("init_nandsim: nandsim is already initialized\n"); 559 NS_ERR("init_nandsim: nandsim is already initialized\n");
@@ -548,7 +620,7 @@ static int init_nandsim(struct mtd_info *mtd)
548 remains = ns->geom.totsz; 620 remains = ns->geom.totsz;
549 next_offset = 0; 621 next_offset = 0;
550 for (i = 0; i < parts_num; ++i) { 622 for (i = 0; i < parts_num; ++i) {
551 u_int64_t part_sz = (u_int64_t)parts[i] * ns->geom.secsz; 623 uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz;
552 624
553 if (!part_sz || part_sz > remains) { 625 if (!part_sz || part_sz > remains) {
554 NS_ERR("bad partition size.\n"); 626 NS_ERR("bad partition size.\n");
@@ -1211,6 +1283,97 @@ static int find_operation(struct nandsim *ns, uint32_t flag)
1211 return -1; 1283 return -1;
1212} 1284}
1213 1285
1286static void put_pages(struct nandsim *ns)
1287{
1288 int i;
1289
1290 for (i = 0; i < ns->held_cnt; i++)
1291 page_cache_release(ns->held_pages[i]);
1292}
1293
1294/* Get page cache pages in advance to provide NOFS memory allocation */
1295static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos)
1296{
1297 pgoff_t index, start_index, end_index;
1298 struct page *page;
1299 struct address_space *mapping = file->f_mapping;
1300
1301 start_index = pos >> PAGE_CACHE_SHIFT;
1302 end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT;
1303 if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
1304 return -EINVAL;
1305 ns->held_cnt = 0;
1306 for (index = start_index; index <= end_index; index++) {
1307 page = find_get_page(mapping, index);
1308 if (page == NULL) {
1309 page = find_or_create_page(mapping, index, GFP_NOFS);
1310 if (page == NULL) {
1311 write_inode_now(mapping->host, 1);
1312 page = find_or_create_page(mapping, index, GFP_NOFS);
1313 }
1314 if (page == NULL) {
1315 put_pages(ns);
1316 return -ENOMEM;
1317 }
1318 unlock_page(page);
1319 }
1320 ns->held_pages[ns->held_cnt++] = page;
1321 }
1322 return 0;
1323}
1324
1325static int set_memalloc(void)
1326{
1327 if (current->flags & PF_MEMALLOC)
1328 return 0;
1329 current->flags |= PF_MEMALLOC;
1330 return 1;
1331}
1332
1333static void clear_memalloc(int memalloc)
1334{
1335 if (memalloc)
1336 current->flags &= ~PF_MEMALLOC;
1337}
1338
1339static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
1340{
1341 mm_segment_t old_fs;
1342 ssize_t tx;
1343 int err, memalloc;
1344
1345 err = get_pages(ns, file, count, *pos);
1346 if (err)
1347 return err;
1348 old_fs = get_fs();
1349 set_fs(get_ds());
1350 memalloc = set_memalloc();
1351 tx = vfs_read(file, (char __user *)buf, count, pos);
1352 clear_memalloc(memalloc);
1353 set_fs(old_fs);
1354 put_pages(ns);
1355 return tx;
1356}
1357
1358static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
1359{
1360 mm_segment_t old_fs;
1361 ssize_t tx;
1362 int err, memalloc;
1363
1364 err = get_pages(ns, file, count, *pos);
1365 if (err)
1366 return err;
1367 old_fs = get_fs();
1368 set_fs(get_ds());
1369 memalloc = set_memalloc();
1370 tx = vfs_write(file, (char __user *)buf, count, pos);
1371 clear_memalloc(memalloc);
1372 set_fs(old_fs);
1373 put_pages(ns);
1374 return tx;
1375}
1376
1214/* 1377/*
1215 * Returns a pointer to the current page. 1378 * Returns a pointer to the current page.
1216 */ 1379 */
@@ -1227,6 +1390,38 @@ static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
1227 return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off; 1390 return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
1228} 1391}
1229 1392
1393int do_read_error(struct nandsim *ns, int num)
1394{
1395 unsigned int page_no = ns->regs.row;
1396
1397 if (read_error(page_no)) {
1398 int i;
1399 memset(ns->buf.byte, 0xFF, num);
1400 for (i = 0; i < num; ++i)
1401 ns->buf.byte[i] = random32();
1402 NS_WARN("simulating read error in page %u\n", page_no);
1403 return 1;
1404 }
1405 return 0;
1406}
1407
1408void do_bit_flips(struct nandsim *ns, int num)
1409{
1410 if (bitflips && random32() < (1 << 22)) {
1411 int flips = 1;
1412 if (bitflips > 1)
1413 flips = (random32() % (int) bitflips) + 1;
1414 while (flips--) {
1415 int pos = random32() % (num * 8);
1416 ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
1417 NS_WARN("read_page: flipping bit %d in page %d "
1418 "reading from %d ecc: corrected=%u failed=%u\n",
1419 pos, ns->regs.row, ns->regs.column + ns->regs.off,
1420 nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
1421 }
1422 }
1423}
1424
1230/* 1425/*
1231 * Fill the NAND buffer with data read from the specified page. 1426 * Fill the NAND buffer with data read from the specified page.
1232 */ 1427 */
@@ -1234,36 +1429,40 @@ static void read_page(struct nandsim *ns, int num)
1234{ 1429{
1235 union ns_mem *mypage; 1430 union ns_mem *mypage;
1236 1431
1432 if (ns->cfile) {
1433 if (!ns->pages_written[ns->regs.row]) {
1434 NS_DBG("read_page: page %d not written\n", ns->regs.row);
1435 memset(ns->buf.byte, 0xFF, num);
1436 } else {
1437 loff_t pos;
1438 ssize_t tx;
1439
1440 NS_DBG("read_page: page %d written, reading from %d\n",
1441 ns->regs.row, ns->regs.column + ns->regs.off);
1442 if (do_read_error(ns, num))
1443 return;
1444 pos = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
1445 tx = read_file(ns, ns->cfile, ns->buf.byte, num, &pos);
1446 if (tx != num) {
1447 NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
1448 return;
1449 }
1450 do_bit_flips(ns, num);
1451 }
1452 return;
1453 }
1454
1237 mypage = NS_GET_PAGE(ns); 1455 mypage = NS_GET_PAGE(ns);
1238 if (mypage->byte == NULL) { 1456 if (mypage->byte == NULL) {
1239 NS_DBG("read_page: page %d not allocated\n", ns->regs.row); 1457 NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
1240 memset(ns->buf.byte, 0xFF, num); 1458 memset(ns->buf.byte, 0xFF, num);
1241 } else { 1459 } else {
1242 unsigned int page_no = ns->regs.row;
1243 NS_DBG("read_page: page %d allocated, reading from %d\n", 1460 NS_DBG("read_page: page %d allocated, reading from %d\n",
1244 ns->regs.row, ns->regs.column + ns->regs.off); 1461 ns->regs.row, ns->regs.column + ns->regs.off);
1245 if (read_error(page_no)) { 1462 if (do_read_error(ns, num))
1246 int i;
1247 memset(ns->buf.byte, 0xFF, num);
1248 for (i = 0; i < num; ++i)
1249 ns->buf.byte[i] = random32();
1250 NS_WARN("simulating read error in page %u\n", page_no);
1251 return; 1463 return;
1252 }
1253 memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num); 1464 memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
1254 if (bitflips && random32() < (1 << 22)) { 1465 do_bit_flips(ns, num);
1255 int flips = 1;
1256 if (bitflips > 1)
1257 flips = (random32() % (int) bitflips) + 1;
1258 while (flips--) {
1259 int pos = random32() % (num * 8);
1260 ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
1261 NS_WARN("read_page: flipping bit %d in page %d "
1262 "reading from %d ecc: corrected=%u failed=%u\n",
1263 pos, ns->regs.row, ns->regs.column + ns->regs.off,
1264 nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
1265 }
1266 }
1267 } 1466 }
1268} 1467}
1269 1468
@@ -1275,11 +1474,20 @@ static void erase_sector(struct nandsim *ns)
1275 union ns_mem *mypage; 1474 union ns_mem *mypage;
1276 int i; 1475 int i;
1277 1476
1477 if (ns->cfile) {
1478 for (i = 0; i < ns->geom.pgsec; i++)
1479 if (ns->pages_written[ns->regs.row + i]) {
1480 NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
1481 ns->pages_written[ns->regs.row + i] = 0;
1482 }
1483 return;
1484 }
1485
1278 mypage = NS_GET_PAGE(ns); 1486 mypage = NS_GET_PAGE(ns);
1279 for (i = 0; i < ns->geom.pgsec; i++) { 1487 for (i = 0; i < ns->geom.pgsec; i++) {
1280 if (mypage->byte != NULL) { 1488 if (mypage->byte != NULL) {
1281 NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i); 1489 NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
1282 kfree(mypage->byte); 1490 kmem_cache_free(ns->nand_pages_slab, mypage->byte);
1283 mypage->byte = NULL; 1491 mypage->byte = NULL;
1284 } 1492 }
1285 mypage++; 1493 mypage++;
@@ -1295,16 +1503,57 @@ static int prog_page(struct nandsim *ns, int num)
1295 union ns_mem *mypage; 1503 union ns_mem *mypage;
1296 u_char *pg_off; 1504 u_char *pg_off;
1297 1505
1506 if (ns->cfile) {
1507 loff_t off, pos;
1508 ssize_t tx;
1509 int all;
1510
1511 NS_DBG("prog_page: writing page %d\n", ns->regs.row);
1512 pg_off = ns->file_buf + ns->regs.column + ns->regs.off;
1513 off = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
1514 if (!ns->pages_written[ns->regs.row]) {
1515 all = 1;
1516 memset(ns->file_buf, 0xff, ns->geom.pgszoob);
1517 } else {
1518 all = 0;
1519 pos = off;
1520 tx = read_file(ns, ns->cfile, pg_off, num, &pos);
1521 if (tx != num) {
1522 NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
1523 return -1;
1524 }
1525 }
1526 for (i = 0; i < num; i++)
1527 pg_off[i] &= ns->buf.byte[i];
1528 if (all) {
1529 pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
1530 tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, &pos);
1531 if (tx != ns->geom.pgszoob) {
1532 NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
1533 return -1;
1534 }
1535 ns->pages_written[ns->regs.row] = 1;
1536 } else {
1537 pos = off;
1538 tx = write_file(ns, ns->cfile, pg_off, num, &pos);
1539 if (tx != num) {
1540 NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
1541 return -1;
1542 }
1543 }
1544 return 0;
1545 }
1546
1298 mypage = NS_GET_PAGE(ns); 1547 mypage = NS_GET_PAGE(ns);
1299 if (mypage->byte == NULL) { 1548 if (mypage->byte == NULL) {
1300 NS_DBG("prog_page: allocating page %d\n", ns->regs.row); 1549 NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
1301 /* 1550 /*
1302 * We allocate memory with GFP_NOFS because a flash FS may 1551 * We allocate memory with GFP_NOFS because a flash FS may
1303 * utilize this. If it is holding an FS lock, then gets here, 1552 * utilize this. If it is holding an FS lock, then gets here,
1304 * then kmalloc runs writeback which goes to the FS again 1553 * then kernel memory alloc runs writeback which goes to the FS
1305 * and deadlocks. This was seen in practice. 1554 * again and deadlocks. This was seen in practice.
1306 */ 1555 */
1307 mypage->byte = kmalloc(ns->geom.pgszoob, GFP_NOFS); 1556 mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS);
1308 if (mypage->byte == NULL) { 1557 if (mypage->byte == NULL) {
1309 NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row); 1558 NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
1310 return -1; 1559 return -1;
@@ -1736,13 +1985,17 @@ static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
1736 1985
1737 /* Check if chip is expecting command */ 1986 /* Check if chip is expecting command */
1738 if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) { 1987 if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
1739 /* 1988 /* Do not warn if only 2 id bytes are read */
1740 * We are in situation when something else (not command) 1989 if (!(ns->regs.command == NAND_CMD_READID &&
1741 * was expected but command was input. In this case ignore 1990 NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) {
1742 * previous command(s)/state(s) and accept the last one. 1991 /*
1743 */ 1992 * We are in situation when something else (not command)
1744 NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, " 1993 * was expected but command was input. In this case ignore
1745 "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate)); 1994 * previous command(s)/state(s) and accept the last one.
1995 */
1996 NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
1997 "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
1998 }
1746 switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); 1999 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1747 } 2000 }
1748 2001
@@ -2044,7 +2297,7 @@ static int __init ns_init_module(void)
2044 } 2297 }
2045 2298
2046 if (overridesize) { 2299 if (overridesize) {
2047 u_int64_t new_size = (u_int64_t)nsmtd->erasesize << overridesize; 2300 uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
2048 if (new_size >> overridesize != nsmtd->erasesize) { 2301 if (new_size >> overridesize != nsmtd->erasesize) {
2049 NS_ERR("overridesize is too big\n"); 2302 NS_ERR("overridesize is too big\n");
2050 goto err_exit; 2303 goto err_exit;
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 955959eb02d4..582cf80f555a 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -2,12 +2,20 @@
2 * drivers/mtd/ndfc.c 2 * drivers/mtd/ndfc.c
3 * 3 *
4 * Overview: 4 * Overview:
5 * Platform independend driver for NDFC (NanD Flash Controller) 5 * Platform independent driver for NDFC (NanD Flash Controller)
6 * integrated into EP440 cores 6 * integrated into EP440 cores
7 * 7 *
8 * Ported to an OF platform driver by Sean MacLennan
9 *
10 * The NDFC supports multiple chips, but this driver only supports a
11 * single chip since I do not have access to any boards with
12 * multiple chips.
13 *
8 * Author: Thomas Gleixner 14 * Author: Thomas Gleixner
9 * 15 *
10 * Copyright 2006 IBM 16 * Copyright 2006 IBM
17 * Copyright 2008 PIKA Technologies
18 * Sean MacLennan <smaclennan@pikatech.com>
11 * 19 *
12 * This program is free software; you can redistribute it and/or modify it 20 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the 21 * under the terms of the GNU General Public License as published by the
@@ -21,27 +29,20 @@
21#include <linux/mtd/partitions.h> 29#include <linux/mtd/partitions.h>
22#include <linux/mtd/ndfc.h> 30#include <linux/mtd/ndfc.h>
23#include <linux/mtd/mtd.h> 31#include <linux/mtd/mtd.h>
24#include <linux/platform_device.h> 32#include <linux/of_platform.h>
25
26#include <asm/io.h> 33#include <asm/io.h>
27#ifdef CONFIG_40x
28#include <asm/ibm405.h>
29#else
30#include <asm/ibm44x.h>
31#endif
32
33struct ndfc_nand_mtd {
34 struct mtd_info mtd;
35 struct nand_chip chip;
36 struct platform_nand_chip *pl_chip;
37};
38 34
39static struct ndfc_nand_mtd ndfc_mtd[NDFC_MAX_BANKS];
40 35
41struct ndfc_controller { 36struct ndfc_controller {
42 void __iomem *ndfcbase; 37 struct of_device *ofdev;
43 struct nand_hw_control ndfc_control; 38 void __iomem *ndfcbase;
44 atomic_t childs_active; 39 struct mtd_info mtd;
40 struct nand_chip chip;
41 int chip_select;
42 struct nand_hw_control ndfc_control;
43#ifdef CONFIG_MTD_PARTITIONS
44 struct mtd_partition *parts;
45#endif
45}; 46};
46 47
47static struct ndfc_controller ndfc_ctrl; 48static struct ndfc_controller ndfc_ctrl;
@@ -50,17 +51,14 @@ static void ndfc_select_chip(struct mtd_info *mtd, int chip)
50{ 51{
51 uint32_t ccr; 52 uint32_t ccr;
52 struct ndfc_controller *ndfc = &ndfc_ctrl; 53 struct ndfc_controller *ndfc = &ndfc_ctrl;
53 struct nand_chip *nandchip = mtd->priv;
54 struct ndfc_nand_mtd *nandmtd = nandchip->priv;
55 struct platform_nand_chip *pchip = nandmtd->pl_chip;
56 54
57 ccr = __raw_readl(ndfc->ndfcbase + NDFC_CCR); 55 ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
58 if (chip >= 0) { 56 if (chip >= 0) {
59 ccr &= ~NDFC_CCR_BS_MASK; 57 ccr &= ~NDFC_CCR_BS_MASK;
60 ccr |= NDFC_CCR_BS(chip + pchip->chip_offset); 58 ccr |= NDFC_CCR_BS(chip + ndfc->chip_select);
61 } else 59 } else
62 ccr |= NDFC_CCR_RESET_CE; 60 ccr |= NDFC_CCR_RESET_CE;
63 __raw_writel(ccr, ndfc->ndfcbase + NDFC_CCR); 61 out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
64} 62}
65 63
66static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) 64static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
@@ -80,7 +78,7 @@ static int ndfc_ready(struct mtd_info *mtd)
80{ 78{
81 struct ndfc_controller *ndfc = &ndfc_ctrl; 79 struct ndfc_controller *ndfc = &ndfc_ctrl;
82 80
83 return __raw_readl(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY; 81 return in_be32(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY;
84} 82}
85 83
86static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode) 84static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
@@ -88,9 +86,9 @@ static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
88 uint32_t ccr; 86 uint32_t ccr;
89 struct ndfc_controller *ndfc = &ndfc_ctrl; 87 struct ndfc_controller *ndfc = &ndfc_ctrl;
90 88
91 ccr = __raw_readl(ndfc->ndfcbase + NDFC_CCR); 89 ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
92 ccr |= NDFC_CCR_RESET_ECC; 90 ccr |= NDFC_CCR_RESET_ECC;
93 __raw_writel(ccr, ndfc->ndfcbase + NDFC_CCR); 91 out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
94 wmb(); 92 wmb();
95} 93}
96 94
@@ -102,9 +100,10 @@ static int ndfc_calculate_ecc(struct mtd_info *mtd,
102 uint8_t *p = (uint8_t *)&ecc; 100 uint8_t *p = (uint8_t *)&ecc;
103 101
104 wmb(); 102 wmb();
105 ecc = __raw_readl(ndfc->ndfcbase + NDFC_ECC); 103 ecc = in_be32(ndfc->ndfcbase + NDFC_ECC);
106 ecc_code[0] = p[1]; 104 /* The NDFC uses Smart Media (SMC) bytes order */
107 ecc_code[1] = p[2]; 105 ecc_code[0] = p[2];
106 ecc_code[1] = p[1];
108 ecc_code[2] = p[3]; 107 ecc_code[2] = p[3];
109 108
110 return 0; 109 return 0;
@@ -123,7 +122,7 @@ static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
123 uint32_t *p = (uint32_t *) buf; 122 uint32_t *p = (uint32_t *) buf;
124 123
125 for(;len > 0; len -= 4) 124 for(;len > 0; len -= 4)
126 *p++ = __raw_readl(ndfc->ndfcbase + NDFC_DATA); 125 *p++ = in_be32(ndfc->ndfcbase + NDFC_DATA);
127} 126}
128 127
129static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 128static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
@@ -132,7 +131,7 @@ static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
132 uint32_t *p = (uint32_t *) buf; 131 uint32_t *p = (uint32_t *) buf;
133 132
134 for(;len > 0; len -= 4) 133 for(;len > 0; len -= 4)
135 __raw_writel(*p++, ndfc->ndfcbase + NDFC_DATA); 134 out_be32(ndfc->ndfcbase + NDFC_DATA, *p++);
136} 135}
137 136
138static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 137static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
@@ -141,7 +140,7 @@ static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
141 uint32_t *p = (uint32_t *) buf; 140 uint32_t *p = (uint32_t *) buf;
142 141
143 for(;len > 0; len -= 4) 142 for(;len > 0; len -= 4)
144 if (*p++ != __raw_readl(ndfc->ndfcbase + NDFC_DATA)) 143 if (*p++ != in_be32(ndfc->ndfcbase + NDFC_DATA))
145 return -EFAULT; 144 return -EFAULT;
146 return 0; 145 return 0;
147} 146}
@@ -149,10 +148,19 @@ static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
149/* 148/*
150 * Initialize chip structure 149 * Initialize chip structure
151 */ 150 */
152static void ndfc_chip_init(struct ndfc_nand_mtd *mtd) 151static int ndfc_chip_init(struct ndfc_controller *ndfc,
152 struct device_node *node)
153{ 153{
154 struct ndfc_controller *ndfc = &ndfc_ctrl; 154#ifdef CONFIG_MTD_PARTITIONS
155 struct nand_chip *chip = &mtd->chip; 155#ifdef CONFIG_MTD_CMDLINE_PARTS
156 static const char *part_types[] = { "cmdlinepart", NULL };
157#else
158 static const char *part_types[] = { NULL };
159#endif
160#endif
161 struct device_node *flash_np;
162 struct nand_chip *chip = &ndfc->chip;
163 int ret;
156 164
157 chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA; 165 chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA;
158 chip->IO_ADDR_W = ndfc->ndfcbase + NDFC_DATA; 166 chip->IO_ADDR_W = ndfc->ndfcbase + NDFC_DATA;
@@ -160,8 +168,6 @@ static void ndfc_chip_init(struct ndfc_nand_mtd *mtd)
160 chip->dev_ready = ndfc_ready; 168 chip->dev_ready = ndfc_ready;
161 chip->select_chip = ndfc_select_chip; 169 chip->select_chip = ndfc_select_chip;
162 chip->chip_delay = 50; 170 chip->chip_delay = 50;
163 chip->priv = mtd;
164 chip->options = mtd->pl_chip->options;
165 chip->controller = &ndfc->ndfc_control; 171 chip->controller = &ndfc->ndfc_control;
166 chip->read_buf = ndfc_read_buf; 172 chip->read_buf = ndfc_read_buf;
167 chip->write_buf = ndfc_write_buf; 173 chip->write_buf = ndfc_write_buf;
@@ -172,143 +178,136 @@ static void ndfc_chip_init(struct ndfc_nand_mtd *mtd)
172 chip->ecc.mode = NAND_ECC_HW; 178 chip->ecc.mode = NAND_ECC_HW;
173 chip->ecc.size = 256; 179 chip->ecc.size = 256;
174 chip->ecc.bytes = 3; 180 chip->ecc.bytes = 3;
175 chip->ecclayout = chip->ecc.layout = mtd->pl_chip->ecclayout;
176 mtd->mtd.priv = chip;
177 mtd->mtd.owner = THIS_MODULE;
178}
179
180static int ndfc_chip_probe(struct platform_device *pdev)
181{
182 struct platform_nand_chip *nc = pdev->dev.platform_data;
183 struct ndfc_chip_settings *settings = nc->priv;
184 struct ndfc_controller *ndfc = &ndfc_ctrl;
185 struct ndfc_nand_mtd *nandmtd;
186
187 if (nc->chip_offset >= NDFC_MAX_BANKS || nc->nr_chips > NDFC_MAX_BANKS)
188 return -EINVAL;
189
190 /* Set the bank settings */
191 __raw_writel(settings->bank_settings,
192 ndfc->ndfcbase + NDFC_BCFG0 + (nc->chip_offset << 2));
193 181
194 nandmtd = &ndfc_mtd[pdev->id]; 182 ndfc->mtd.priv = chip;
195 if (nandmtd->pl_chip) 183 ndfc->mtd.owner = THIS_MODULE;
196 return -EBUSY;
197 184
198 nandmtd->pl_chip = nc; 185 flash_np = of_get_next_child(node, NULL);
199 ndfc_chip_init(nandmtd); 186 if (!flash_np)
200
201 /* Scan for chips */
202 if (nand_scan(&nandmtd->mtd, nc->nr_chips)) {
203 nandmtd->pl_chip = NULL;
204 return -ENODEV; 187 return -ENODEV;
188
189 ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
190 ndfc->ofdev->dev.bus_id, flash_np->name);
191 if (!ndfc->mtd.name) {
192 ret = -ENOMEM;
193 goto err;
205 } 194 }
206 195
207#ifdef CONFIG_MTD_PARTITIONS 196 ret = nand_scan(&ndfc->mtd, 1);
208 printk("Number of partitions %d\n", nc->nr_partitions); 197 if (ret)
209 if (nc->nr_partitions) { 198 goto err;
210 /* Add the full device, so complete dumps can be made */
211 add_mtd_device(&nandmtd->mtd);
212 add_mtd_partitions(&nandmtd->mtd, nc->partitions,
213 nc->nr_partitions);
214 199
215 } else 200#ifdef CONFIG_MTD_PARTITIONS
216#else 201 ret = parse_mtd_partitions(&ndfc->mtd, part_types, &ndfc->parts, 0);
217 add_mtd_device(&nandmtd->mtd); 202 if (ret < 0)
203 goto err;
204
205#ifdef CONFIG_MTD_OF_PARTS
206 if (ret == 0) {
207 ret = of_mtd_parse_partitions(&ndfc->ofdev->dev, flash_np,
208 &ndfc->parts);
209 if (ret < 0)
210 goto err;
211 }
218#endif 212#endif
219 213
220 atomic_inc(&ndfc->childs_active); 214 if (ret > 0)
221 return 0; 215 ret = add_mtd_partitions(&ndfc->mtd, ndfc->parts, ret);
222} 216 else
217#endif
218 ret = add_mtd_device(&ndfc->mtd);
223 219
224static int ndfc_chip_remove(struct platform_device *pdev) 220err:
225{ 221 of_node_put(flash_np);
226 return 0; 222 if (ret)
223 kfree(ndfc->mtd.name);
224 return ret;
227} 225}
228 226
229static int ndfc_nand_probe(struct platform_device *pdev) 227static int __devinit ndfc_probe(struct of_device *ofdev,
228 const struct of_device_id *match)
230{ 229{
231 struct platform_nand_ctrl *nc = pdev->dev.platform_data;
232 struct ndfc_controller_settings *settings = nc->priv;
233 struct resource *res = pdev->resource;
234 struct ndfc_controller *ndfc = &ndfc_ctrl; 230 struct ndfc_controller *ndfc = &ndfc_ctrl;
235 unsigned long long phys = settings->ndfc_erpn | res->start; 231 const u32 *reg;
232 u32 ccr;
233 int err, len;
236 234
237#ifndef CONFIG_PHYS_64BIT 235 spin_lock_init(&ndfc->ndfc_control.lock);
238 ndfc->ndfcbase = ioremap((phys_addr_t)phys, res->end - res->start + 1); 236 init_waitqueue_head(&ndfc->ndfc_control.wq);
239#else 237 ndfc->ofdev = ofdev;
240 ndfc->ndfcbase = ioremap64(phys, res->end - res->start + 1); 238 dev_set_drvdata(&ofdev->dev, ndfc);
241#endif 239
240 /* Read the reg property to get the chip select */
241 reg = of_get_property(ofdev->node, "reg", &len);
242 if (reg == NULL || len != 12) {
243 dev_err(&ofdev->dev, "unable read reg property (%d)\n", len);
244 return -ENOENT;
245 }
246 ndfc->chip_select = reg[0];
247
248 ndfc->ndfcbase = of_iomap(ofdev->node, 0);
242 if (!ndfc->ndfcbase) { 249 if (!ndfc->ndfcbase) {
243 printk(KERN_ERR "NDFC: ioremap failed\n"); 250 dev_err(&ofdev->dev, "failed to get memory\n");
244 return -EIO; 251 return -EIO;
245 } 252 }
246 253
247 __raw_writel(settings->ccr_settings, ndfc->ndfcbase + NDFC_CCR); 254 ccr = NDFC_CCR_BS(ndfc->chip_select);
248 255
249 spin_lock_init(&ndfc->ndfc_control.lock); 256 /* It is ok if ccr does not exist - just default to 0 */
250 init_waitqueue_head(&ndfc->ndfc_control.wq); 257 reg = of_get_property(ofdev->node, "ccr", NULL);
258 if (reg)
259 ccr |= *reg;
251 260
252 platform_set_drvdata(pdev, ndfc); 261 out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
253 262
254 printk("NDFC NAND Driver initialized. Chip-Rev: 0x%08x\n", 263 /* Set the bank settings if given */
255 __raw_readl(ndfc->ndfcbase + NDFC_REVID)); 264 reg = of_get_property(ofdev->node, "bank-settings", NULL);
265 if (reg) {
266 int offset = NDFC_BCFG0 + (ndfc->chip_select << 2);
267 out_be32(ndfc->ndfcbase + offset, *reg);
268 }
269
270 err = ndfc_chip_init(ndfc, ofdev->node);
271 if (err) {
272 iounmap(ndfc->ndfcbase);
273 return err;
274 }
256 275
257 return 0; 276 return 0;
258} 277}
259 278
260static int ndfc_nand_remove(struct platform_device *pdev) 279static int __devexit ndfc_remove(struct of_device *ofdev)
261{ 280{
262 struct ndfc_controller *ndfc = platform_get_drvdata(pdev); 281 struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
263 282
264 if (atomic_read(&ndfc->childs_active)) 283 nand_release(&ndfc->mtd);
265 return -EBUSY;
266 284
267 if (ndfc) {
268 platform_set_drvdata(pdev, NULL);
269 iounmap(ndfc_ctrl.ndfcbase);
270 ndfc_ctrl.ndfcbase = NULL;
271 }
272 return 0; 285 return 0;
273} 286}
274 287
275/* driver device registration */ 288static const struct of_device_id ndfc_match[] = {
276 289 { .compatible = "ibm,ndfc", },
277static struct platform_driver ndfc_chip_driver = { 290 {}
278 .probe = ndfc_chip_probe,
279 .remove = ndfc_chip_remove,
280 .driver = {
281 .name = "ndfc-chip",
282 .owner = THIS_MODULE,
283 },
284}; 291};
292MODULE_DEVICE_TABLE(of, ndfc_match);
285 293
286static struct platform_driver ndfc_nand_driver = { 294static struct of_platform_driver ndfc_driver = {
287 .probe = ndfc_nand_probe, 295 .driver = {
288 .remove = ndfc_nand_remove, 296 .name = "ndfc",
289 .driver = {
290 .name = "ndfc-nand",
291 .owner = THIS_MODULE,
292 }, 297 },
298 .match_table = ndfc_match,
299 .probe = ndfc_probe,
300 .remove = __devexit_p(ndfc_remove),
293}; 301};
294 302
295static int __init ndfc_nand_init(void) 303static int __init ndfc_nand_init(void)
296{ 304{
297 int ret; 305 return of_register_platform_driver(&ndfc_driver);
298
299 spin_lock_init(&ndfc_ctrl.ndfc_control.lock);
300 init_waitqueue_head(&ndfc_ctrl.ndfc_control.wq);
301
302 ret = platform_driver_register(&ndfc_nand_driver);
303 if (!ret)
304 ret = platform_driver_register(&ndfc_chip_driver);
305 return ret;
306} 306}
307 307
308static void __exit ndfc_nand_exit(void) 308static void __exit ndfc_nand_exit(void)
309{ 309{
310 platform_driver_unregister(&ndfc_chip_driver); 310 of_unregister_platform_driver(&ndfc_driver);
311 platform_driver_unregister(&ndfc_nand_driver);
312} 311}
313 312
314module_init(ndfc_nand_init); 313module_init(ndfc_nand_init);
@@ -316,6 +315,4 @@ module_exit(ndfc_nand_exit);
316 315
317MODULE_LICENSE("GPL"); 316MODULE_LICENSE("GPL");
318MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); 317MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
319MODULE_DESCRIPTION("Platform driver for NDFC"); 318MODULE_DESCRIPTION("OF Platform driver for NDFC");
320MODULE_ALIAS("platform:ndfc-chip");
321MODULE_ALIAS("platform:ndfc-nand");
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index fc4144495610..cc55cbc2b308 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -298,7 +298,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = {
298#define NDTR1_tAR(c) (min((c), 15) << 0) 298#define NDTR1_tAR(c) (min((c), 15) << 0)
299 299
300/* convert nano-seconds to nand flash controller clock cycles */ 300/* convert nano-seconds to nand flash controller clock cycles */
301#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) + 1) 301#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1)
302 302
303static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info, 303static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
304 const struct pxa3xx_nand_timing *t) 304 const struct pxa3xx_nand_timing *t)
@@ -368,14 +368,14 @@ static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
368 /* large block, 2 cycles for column address 368 /* large block, 2 cycles for column address
369 * row address starts from 3rd cycle 369 * row address starts from 3rd cycle
370 */ 370 */
371 info->ndcb1 |= (page_addr << 16) | (column & 0xffff); 371 info->ndcb1 |= page_addr << 16;
372 if (info->row_addr_cycles == 3) 372 if (info->row_addr_cycles == 3)
373 info->ndcb2 = (page_addr >> 16) & 0xff; 373 info->ndcb2 = (page_addr >> 16) & 0xff;
374 } else 374 } else
375 /* small block, 1 cycles for column address 375 /* small block, 1 cycles for column address
376 * row address starts from 2nd cycle 376 * row address starts from 2nd cycle
377 */ 377 */
378 info->ndcb1 = (page_addr << 8) | (column & 0xff); 378 info->ndcb1 = page_addr << 8;
379 379
380 if (cmd == cmdset->program) 380 if (cmd == cmdset->program)
381 info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS; 381 info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS;
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 30a518e211bd..54ec7542a7b7 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -2,6 +2,7 @@
2 * drivers/mtd/nand/sharpsl.c 2 * drivers/mtd/nand/sharpsl.c
3 * 3 *
4 * Copyright (C) 2004 Richard Purdie 4 * Copyright (C) 2004 Richard Purdie
5 * Copyright (C) 2008 Dmitry Baryshkov
5 * 6 *
6 * Based on Sharp's NAND driver sharp_sl.c 7 * Based on Sharp's NAND driver sharp_sl.c
7 * 8 *
@@ -19,22 +20,31 @@
19#include <linux/mtd/nand.h> 20#include <linux/mtd/nand.h>
20#include <linux/mtd/nand_ecc.h> 21#include <linux/mtd/nand_ecc.h>
21#include <linux/mtd/partitions.h> 22#include <linux/mtd/partitions.h>
23#include <linux/mtd/sharpsl.h>
22#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/platform_device.h>
26
23#include <asm/io.h> 27#include <asm/io.h>
24#include <mach/hardware.h> 28#include <mach/hardware.h>
25#include <asm/mach-types.h> 29#include <asm/mach-types.h>
26 30
27static void __iomem *sharpsl_io_base; 31struct sharpsl_nand {
28static int sharpsl_phys_base = 0x0C000000; 32 struct mtd_info mtd;
33 struct nand_chip chip;
34
35 void __iomem *io;
36};
37
38#define mtd_to_sharpsl(_mtd) container_of(_mtd, struct sharpsl_nand, mtd)
29 39
30/* register offset */ 40/* register offset */
31#define ECCLPLB sharpsl_io_base+0x00 /* line parity 7 - 0 bit */ 41#define ECCLPLB 0x00 /* line parity 7 - 0 bit */
32#define ECCLPUB sharpsl_io_base+0x04 /* line parity 15 - 8 bit */ 42#define ECCLPUB 0x04 /* line parity 15 - 8 bit */
33#define ECCCP sharpsl_io_base+0x08 /* column parity 5 - 0 bit */ 43#define ECCCP 0x08 /* column parity 5 - 0 bit */
34#define ECCCNTR sharpsl_io_base+0x0C /* ECC byte counter */ 44#define ECCCNTR 0x0C /* ECC byte counter */
35#define ECCCLRR sharpsl_io_base+0x10 /* cleare ECC */ 45#define ECCCLRR 0x10 /* cleare ECC */
36#define FLASHIO sharpsl_io_base+0x14 /* Flash I/O */ 46#define FLASHIO 0x14 /* Flash I/O */
37#define FLASHCTL sharpsl_io_base+0x18 /* Flash Control */ 47#define FLASHCTL 0x18 /* Flash Control */
38 48
39/* Flash control bit */ 49/* Flash control bit */
40#define FLRYBY (1 << 5) 50#define FLRYBY (1 << 5)
@@ -45,35 +55,6 @@ static int sharpsl_phys_base = 0x0C000000;
45#define FLCE0 (1 << 0) 55#define FLCE0 (1 << 0)
46 56
47/* 57/*
48 * MTD structure for SharpSL
49 */
50static struct mtd_info *sharpsl_mtd = NULL;
51
52/*
53 * Define partitions for flash device
54 */
55#define DEFAULT_NUM_PARTITIONS 3
56
57static int nr_partitions;
58static struct mtd_partition sharpsl_nand_default_partition_info[] = {
59 {
60 .name = "System Area",
61 .offset = 0,
62 .size = 7 * 1024 * 1024,
63 },
64 {
65 .name = "Root Filesystem",
66 .offset = 7 * 1024 * 1024,
67 .size = 30 * 1024 * 1024,
68 },
69 {
70 .name = "Home Filesystem",
71 .offset = MTDPART_OFS_APPEND,
72 .size = MTDPART_SIZ_FULL,
73 },
74};
75
76/*
77 * hardware specific access to control-lines 58 * hardware specific access to control-lines
78 * ctrl: 59 * ctrl:
79 * NAND_CNE: bit 0 -> ! bit 0 & 4 60 * NAND_CNE: bit 0 -> ! bit 0 & 4
@@ -84,6 +65,7 @@ static struct mtd_partition sharpsl_nand_default_partition_info[] = {
84static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd, 65static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd,
85 unsigned int ctrl) 66 unsigned int ctrl)
86{ 67{
68 struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
87 struct nand_chip *chip = mtd->priv; 69 struct nand_chip *chip = mtd->priv;
88 70
89 if (ctrl & NAND_CTRL_CHANGE) { 71 if (ctrl & NAND_CTRL_CHANGE) {
@@ -93,103 +75,97 @@ static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd,
93 75
94 bits ^= 0x11; 76 bits ^= 0x11;
95 77
96 writeb((readb(FLASHCTL) & ~0x17) | bits, FLASHCTL); 78 writeb((readb(sharpsl->io + FLASHCTL) & ~0x17) | bits, sharpsl->io + FLASHCTL);
97 } 79 }
98 80
99 if (cmd != NAND_CMD_NONE) 81 if (cmd != NAND_CMD_NONE)
100 writeb(cmd, chip->IO_ADDR_W); 82 writeb(cmd, chip->IO_ADDR_W);
101} 83}
102 84
103static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
104
105static struct nand_bbt_descr sharpsl_bbt = {
106 .options = 0,
107 .offs = 4,
108 .len = 2,
109 .pattern = scan_ff_pattern
110};
111
112static struct nand_bbt_descr sharpsl_akita_bbt = {
113 .options = 0,
114 .offs = 4,
115 .len = 1,
116 .pattern = scan_ff_pattern
117};
118
119static struct nand_ecclayout akita_oobinfo = {
120 .eccbytes = 24,
121 .eccpos = {
122 0x5, 0x1, 0x2, 0x3, 0x6, 0x7, 0x15, 0x11,
123 0x12, 0x13, 0x16, 0x17, 0x25, 0x21, 0x22, 0x23,
124 0x26, 0x27, 0x35, 0x31, 0x32, 0x33, 0x36, 0x37},
125 .oobfree = {{0x08, 0x09}}
126};
127
128static int sharpsl_nand_dev_ready(struct mtd_info *mtd) 85static int sharpsl_nand_dev_ready(struct mtd_info *mtd)
129{ 86{
130 return !((readb(FLASHCTL) & FLRYBY) == 0); 87 struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
88 return !((readb(sharpsl->io + FLASHCTL) & FLRYBY) == 0);
131} 89}
132 90
133static void sharpsl_nand_enable_hwecc(struct mtd_info *mtd, int mode) 91static void sharpsl_nand_enable_hwecc(struct mtd_info *mtd, int mode)
134{ 92{
135 writeb(0, ECCCLRR); 93 struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
94 writeb(0, sharpsl->io + ECCCLRR);
136} 95}
137 96
138static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat, u_char * ecc_code) 97static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat, u_char * ecc_code)
139{ 98{
140 ecc_code[0] = ~readb(ECCLPUB); 99 struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
141 ecc_code[1] = ~readb(ECCLPLB); 100 ecc_code[0] = ~readb(sharpsl->io + ECCLPUB);
142 ecc_code[2] = (~readb(ECCCP) << 2) | 0x03; 101 ecc_code[1] = ~readb(sharpsl->io + ECCLPLB);
143 return readb(ECCCNTR) != 0; 102 ecc_code[2] = (~readb(sharpsl->io + ECCCP) << 2) | 0x03;
103 return readb(sharpsl->io + ECCCNTR) != 0;
144} 104}
145 105
146#ifdef CONFIG_MTD_PARTITIONS 106#ifdef CONFIG_MTD_PARTITIONS
147const char *part_probes[] = { "cmdlinepart", NULL }; 107static const char *part_probes[] = { "cmdlinepart", NULL };
148#endif 108#endif
149 109
150/* 110/*
151 * Main initialization routine 111 * Main initialization routine
152 */ 112 */
153static int __init sharpsl_nand_init(void) 113static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
154{ 114{
155 struct nand_chip *this; 115 struct nand_chip *this;
116#ifdef CONFIG_MTD_PARTITIONS
156 struct mtd_partition *sharpsl_partition_info; 117 struct mtd_partition *sharpsl_partition_info;
118 int nr_partitions;
119#endif
120 struct resource *r;
157 int err = 0; 121 int err = 0;
122 struct sharpsl_nand *sharpsl;
123 struct sharpsl_nand_platform_data *data = pdev->dev.platform_data;
124
125 if (!data) {
126 dev_err(&pdev->dev, "no platform data!\n");
127 return -EINVAL;
128 }
158 129
159 /* Allocate memory for MTD device structure and private data */ 130 /* Allocate memory for MTD device structure and private data */
160 sharpsl_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); 131 sharpsl = kzalloc(sizeof(struct sharpsl_nand), GFP_KERNEL);
161 if (!sharpsl_mtd) { 132 if (!sharpsl) {
162 printk("Unable to allocate SharpSL NAND MTD device structure.\n"); 133 printk("Unable to allocate SharpSL NAND MTD device structure.\n");
163 return -ENOMEM; 134 return -ENOMEM;
164 } 135 }
165 136
137 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
138 if (!r) {
139 dev_err(&pdev->dev, "no io memory resource defined!\n");
140 err = -ENODEV;
141 goto err_get_res;
142 }
143
166 /* map physical address */ 144 /* map physical address */
167 sharpsl_io_base = ioremap(sharpsl_phys_base, 0x1000); 145 sharpsl->io = ioremap(r->start, resource_size(r));
168 if (!sharpsl_io_base) { 146 if (!sharpsl->io) {
169 printk("ioremap to access Sharp SL NAND chip failed\n"); 147 printk("ioremap to access Sharp SL NAND chip failed\n");
170 kfree(sharpsl_mtd); 148 err = -EIO;
171 return -EIO; 149 goto err_ioremap;
172 } 150 }
173 151
174 /* Get pointer to private data */ 152 /* Get pointer to private data */
175 this = (struct nand_chip *)(&sharpsl_mtd[1]); 153 this = (struct nand_chip *)(&sharpsl->chip);
176
177 /* Initialize structures */
178 memset(sharpsl_mtd, 0, sizeof(struct mtd_info));
179 memset(this, 0, sizeof(struct nand_chip));
180 154
181 /* Link the private data with the MTD structure */ 155 /* Link the private data with the MTD structure */
182 sharpsl_mtd->priv = this; 156 sharpsl->mtd.priv = this;
183 sharpsl_mtd->owner = THIS_MODULE; 157 sharpsl->mtd.owner = THIS_MODULE;
158
159 platform_set_drvdata(pdev, sharpsl);
184 160
185 /* 161 /*
186 * PXA initialize 162 * PXA initialize
187 */ 163 */
188 writeb(readb(FLASHCTL) | FLWP, FLASHCTL); 164 writeb(readb(sharpsl->io + FLASHCTL) | FLWP, sharpsl->io + FLASHCTL);
189 165
190 /* Set address of NAND IO lines */ 166 /* Set address of NAND IO lines */
191 this->IO_ADDR_R = FLASHIO; 167 this->IO_ADDR_R = sharpsl->io + FLASHIO;
192 this->IO_ADDR_W = FLASHIO; 168 this->IO_ADDR_W = sharpsl->io + FLASHIO;
193 /* Set address of hardware control function */ 169 /* Set address of hardware control function */
194 this->cmd_ctrl = sharpsl_nand_hwcontrol; 170 this->cmd_ctrl = sharpsl_nand_hwcontrol;
195 this->dev_ready = sharpsl_nand_dev_ready; 171 this->dev_ready = sharpsl_nand_dev_ready;
@@ -199,68 +175,89 @@ static int __init sharpsl_nand_init(void)
199 this->ecc.mode = NAND_ECC_HW; 175 this->ecc.mode = NAND_ECC_HW;
200 this->ecc.size = 256; 176 this->ecc.size = 256;
201 this->ecc.bytes = 3; 177 this->ecc.bytes = 3;
202 this->badblock_pattern = &sharpsl_bbt; 178 this->badblock_pattern = data->badblock_pattern;
203 if (machine_is_akita() || machine_is_borzoi()) { 179 this->ecc.layout = data->ecc_layout;
204 this->badblock_pattern = &sharpsl_akita_bbt;
205 this->ecc.layout = &akita_oobinfo;
206 }
207 this->ecc.hwctl = sharpsl_nand_enable_hwecc; 180 this->ecc.hwctl = sharpsl_nand_enable_hwecc;
208 this->ecc.calculate = sharpsl_nand_calculate_ecc; 181 this->ecc.calculate = sharpsl_nand_calculate_ecc;
209 this->ecc.correct = nand_correct_data; 182 this->ecc.correct = nand_correct_data;
210 183
211 /* Scan to find existence of the device */ 184 /* Scan to find existence of the device */
212 err = nand_scan(sharpsl_mtd, 1); 185 err = nand_scan(&sharpsl->mtd, 1);
213 if (err) { 186 if (err)
214 iounmap(sharpsl_io_base); 187 goto err_scan;
215 kfree(sharpsl_mtd);
216 return err;
217 }
218 188
219 /* Register the partitions */ 189 /* Register the partitions */
220 sharpsl_mtd->name = "sharpsl-nand"; 190 sharpsl->mtd.name = "sharpsl-nand";
221 nr_partitions = parse_mtd_partitions(sharpsl_mtd, part_probes, &sharpsl_partition_info, 0); 191#ifdef CONFIG_MTD_PARTITIONS
222 192 nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0);
223 if (nr_partitions <= 0) { 193 if (nr_partitions <= 0) {
224 nr_partitions = DEFAULT_NUM_PARTITIONS; 194 nr_partitions = data->nr_partitions;
225 sharpsl_partition_info = sharpsl_nand_default_partition_info; 195 sharpsl_partition_info = data->partitions;
226 if (machine_is_poodle()) {
227 sharpsl_partition_info[1].size = 22 * 1024 * 1024;
228 } else if (machine_is_corgi() || machine_is_shepherd()) {
229 sharpsl_partition_info[1].size = 25 * 1024 * 1024;
230 } else if (machine_is_husky()) {
231 sharpsl_partition_info[1].size = 53 * 1024 * 1024;
232 } else if (machine_is_spitz()) {
233 sharpsl_partition_info[1].size = 5 * 1024 * 1024;
234 } else if (machine_is_akita()) {
235 sharpsl_partition_info[1].size = 58 * 1024 * 1024;
236 } else if (machine_is_borzoi()) {
237 sharpsl_partition_info[1].size = 32 * 1024 * 1024;
238 }
239 } 196 }
240 197
241 add_mtd_partitions(sharpsl_mtd, sharpsl_partition_info, nr_partitions); 198 if (nr_partitions > 0)
199 err = add_mtd_partitions(&sharpsl->mtd, sharpsl_partition_info, nr_partitions);
200 else
201#endif
202 err = add_mtd_device(&sharpsl->mtd);
203 if (err)
204 goto err_add;
242 205
243 /* Return happy */ 206 /* Return happy */
244 return 0; 207 return 0;
245}
246 208
247module_init(sharpsl_nand_init); 209err_add:
210 nand_release(&sharpsl->mtd);
211
212err_scan:
213 platform_set_drvdata(pdev, NULL);
214 iounmap(sharpsl->io);
215err_ioremap:
216err_get_res:
217 kfree(sharpsl);
218 return err;
219}
248 220
249/* 221/*
250 * Clean up routine 222 * Clean up routine
251 */ 223 */
252static void __exit sharpsl_nand_cleanup(void) 224static int __devexit sharpsl_nand_remove(struct platform_device *pdev)
253{ 225{
226 struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
227
254 /* Release resources, unregister device */ 228 /* Release resources, unregister device */
255 nand_release(sharpsl_mtd); 229 nand_release(&sharpsl->mtd);
256 230
257 iounmap(sharpsl_io_base); 231 platform_set_drvdata(pdev, NULL);
232
233 iounmap(sharpsl->io);
258 234
259 /* Free the MTD device structure */ 235 /* Free the MTD device structure */
260 kfree(sharpsl_mtd); 236 kfree(sharpsl);
237
238 return 0;
239}
240
241static struct platform_driver sharpsl_nand_driver = {
242 .driver = {
243 .name = "sharpsl-nand",
244 .owner = THIS_MODULE,
245 },
246 .probe = sharpsl_nand_probe,
247 .remove = __devexit_p(sharpsl_nand_remove),
248};
249
250static int __init sharpsl_nand_init(void)
251{
252 return platform_driver_register(&sharpsl_nand_driver);
261} 253}
254module_init(sharpsl_nand_init);
262 255
263module_exit(sharpsl_nand_cleanup); 256static void __exit sharpsl_nand_exit(void)
257{
258 platform_driver_unregister(&sharpsl_nand_driver);
259}
260module_exit(sharpsl_nand_exit);
264 261
265MODULE_LICENSE("GPL"); 262MODULE_LICENSE("GPL");
266MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>"); 263MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index 320b929abe79..d1c4546513f7 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -39,7 +39,7 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
39 struct NFTLrecord *nftl; 39 struct NFTLrecord *nftl;
40 unsigned long temp; 40 unsigned long temp;
41 41
42 if (mtd->type != MTD_NANDFLASH) 42 if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
43 return; 43 return;
44 /* OK, this is moderately ugly. But probably safe. Alternatives? */ 44 /* OK, this is moderately ugly. But probably safe. Alternatives? */
45 if (memcmp(mtd->name, "DiskOnChip", 10)) 45 if (memcmp(mtd->name, "DiskOnChip", 10))
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index ccc4f209fbb5..8b22b1836e9f 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -51,7 +51,7 @@ static int find_boot_record(struct NFTLrecord *nftl)
51 the mtd device accordingly. We could even get rid of 51 the mtd device accordingly. We could even get rid of
52 nftl->EraseSize if there were any point in doing so. */ 52 nftl->EraseSize if there were any point in doing so. */
53 nftl->EraseSize = nftl->mbd.mtd->erasesize; 53 nftl->EraseSize = nftl->mbd.mtd->erasesize;
54 nftl->nb_blocks = nftl->mbd.mtd->size / nftl->EraseSize; 54 nftl->nb_blocks = (u32)nftl->mbd.mtd->size / nftl->EraseSize;
55 55
56 nftl->MediaUnit = BLOCK_NIL; 56 nftl->MediaUnit = BLOCK_NIL;
57 nftl->SpareMediaUnit = BLOCK_NIL; 57 nftl->SpareMediaUnit = BLOCK_NIL;
@@ -168,7 +168,7 @@ device is already correct.
168 printk(KERN_NOTICE "WARNING: Support for NFTL with UnitSizeFactor 0x%02x is experimental\n", 168 printk(KERN_NOTICE "WARNING: Support for NFTL with UnitSizeFactor 0x%02x is experimental\n",
169 mh->UnitSizeFactor); 169 mh->UnitSizeFactor);
170 nftl->EraseSize = nftl->mbd.mtd->erasesize << (0xff - mh->UnitSizeFactor); 170 nftl->EraseSize = nftl->mbd.mtd->erasesize << (0xff - mh->UnitSizeFactor);
171 nftl->nb_blocks = nftl->mbd.mtd->size / nftl->EraseSize; 171 nftl->nb_blocks = (u32)nftl->mbd.mtd->size / nftl->EraseSize;
172 } 172 }
173#endif 173#endif
174 nftl->nb_boot_blocks = le16_to_cpu(mh->FirstPhysicalEUN); 174 nftl->nb_boot_blocks = le16_to_cpu(mh->FirstPhysicalEUN);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 90ed319f26e6..529af271db17 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1772,7 +1772,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
1772 int len; 1772 int len;
1773 int ret = 0; 1773 int ret = 0;
1774 1774
1775 DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%08x, len = %i\n", (unsigned int) instr->addr, (unsigned int) instr->len); 1775 DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%012llx, len = %llu\n", (unsigned long long) instr->addr, (unsigned long long) instr->len);
1776 1776
1777 block_size = (1 << this->erase_shift); 1777 block_size = (1 << this->erase_shift);
1778 1778
@@ -1810,7 +1810,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
1810 1810
1811 /* Check if we have a bad block, we do not erase bad blocks */ 1811 /* Check if we have a bad block, we do not erase bad blocks */
1812 if (onenand_block_isbad_nolock(mtd, addr, 0)) { 1812 if (onenand_block_isbad_nolock(mtd, addr, 0)) {
1813 printk (KERN_WARNING "onenand_erase: attempt to erase a bad block at addr 0x%08x\n", (unsigned int) addr); 1813 printk (KERN_WARNING "onenand_erase: attempt to erase a bad block at addr 0x%012llx\n", (unsigned long long) addr);
1814 instr->state = MTD_ERASE_FAILED; 1814 instr->state = MTD_ERASE_FAILED;
1815 goto erase_exit; 1815 goto erase_exit;
1816 } 1816 }
@@ -2029,7 +2029,7 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
2029 * 2029 *
2030 * Lock one or more blocks 2030 * Lock one or more blocks
2031 */ 2031 */
2032static int onenand_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 2032static int onenand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2033{ 2033{
2034 int ret; 2034 int ret;
2035 2035
@@ -2047,7 +2047,7 @@ static int onenand_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
2047 * 2047 *
2048 * Unlock one or more blocks 2048 * Unlock one or more blocks
2049 */ 2049 */
2050static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 2050static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2051{ 2051{
2052 int ret; 2052 int ret;
2053 2053
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index e538c0a72abb..d2aa9c46530f 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -21,8 +21,6 @@
21 21
22#include <asm/types.h> 22#include <asm/types.h>
23 23
24#define const_cpu_to_le16 __constant_cpu_to_le16
25
26static int block_size = 0; 24static int block_size = 0;
27module_param(block_size, int, 0); 25module_param(block_size, int, 0);
28MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size"); 26MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
@@ -156,7 +154,7 @@ static int scan_header(struct partition *part)
156 size_t retlen; 154 size_t retlen;
157 155
158 sectors_per_block = part->block_size / SECTOR_SIZE; 156 sectors_per_block = part->block_size / SECTOR_SIZE;
159 part->total_blocks = part->mbd.mtd->size / part->block_size; 157 part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
160 158
161 if (part->total_blocks < 2) 159 if (part->total_blocks < 2)
162 return -ENOENT; 160 return -ENOENT;
@@ -276,16 +274,17 @@ static void erase_callback(struct erase_info *erase)
276 274
277 part = (struct partition*)erase->priv; 275 part = (struct partition*)erase->priv;
278 276
279 i = erase->addr / part->block_size; 277 i = (u32)erase->addr / part->block_size;
280 if (i >= part->total_blocks || part->blocks[i].offset != erase->addr) { 278 if (i >= part->total_blocks || part->blocks[i].offset != erase->addr ||
281 printk(KERN_ERR PREFIX "erase callback for unknown offset %x " 279 erase->addr > UINT_MAX) {
282 "on '%s'\n", erase->addr, part->mbd.mtd->name); 280 printk(KERN_ERR PREFIX "erase callback for unknown offset %llx "
281 "on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name);
283 return; 282 return;
284 } 283 }
285 284
286 if (erase->state != MTD_ERASE_DONE) { 285 if (erase->state != MTD_ERASE_DONE) {
287 printk(KERN_WARNING PREFIX "erase failed at 0x%x on '%s', " 286 printk(KERN_WARNING PREFIX "erase failed at 0x%llx on '%s', "
288 "state %d\n", erase->addr, 287 "state %d\n", (unsigned long long)erase->addr,
289 part->mbd.mtd->name, erase->state); 288 part->mbd.mtd->name, erase->state);
290 289
291 part->blocks[i].state = BLOCK_FAILED; 290 part->blocks[i].state = BLOCK_FAILED;
@@ -297,7 +296,7 @@ static void erase_callback(struct erase_info *erase)
297 return; 296 return;
298 } 297 }
299 298
300 magic = const_cpu_to_le16(RFD_MAGIC); 299 magic = cpu_to_le16(RFD_MAGIC);
301 300
302 part->blocks[i].state = BLOCK_ERASED; 301 part->blocks[i].state = BLOCK_ERASED;
303 part->blocks[i].free_sectors = part->data_sectors_per_block; 302 part->blocks[i].free_sectors = part->data_sectors_per_block;
@@ -345,9 +344,9 @@ static int erase_block(struct partition *part, int block)
345 rc = part->mbd.mtd->erase(part->mbd.mtd, erase); 344 rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
346 345
347 if (rc) { 346 if (rc) {
348 printk(KERN_ERR PREFIX "erase of region %x,%x on '%s' " 347 printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
349 "failed\n", erase->addr, erase->len, 348 "failed\n", (unsigned long long)erase->addr,
350 part->mbd.mtd->name); 349 (unsigned long long)erase->len, part->mbd.mtd->name);
351 kfree(erase); 350 kfree(erase);
352 } 351 }
353 352
@@ -587,7 +586,7 @@ static int mark_sector_deleted(struct partition *part, u_long old_addr)
587 int block, offset, rc; 586 int block, offset, rc;
588 u_long addr; 587 u_long addr;
589 size_t retlen; 588 size_t retlen;
590 u16 del = const_cpu_to_le16(SECTOR_DELETED); 589 u16 del = cpu_to_le16(SECTOR_DELETED);
591 590
592 block = old_addr / part->block_size; 591 block = old_addr / part->block_size;
593 offset = (old_addr % part->block_size) / SECTOR_SIZE - 592 offset = (old_addr % part->block_size) / SECTOR_SIZE -
@@ -763,7 +762,7 @@ static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
763{ 762{
764 struct partition *part; 763 struct partition *part;
765 764
766 if (mtd->type != MTD_NORFLASH) 765 if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX)
767 return; 766 return;
768 767
769 part = kzalloc(sizeof(struct partition), GFP_KERNEL); 768 part = kzalloc(sizeof(struct partition), GFP_KERNEL);
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 33a5d6ed6f18..3f67e00d98e0 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -294,7 +294,8 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
294 int cis_sector; 294 int cis_sector;
295 295
296 /* Check for small page NAND flash */ 296 /* Check for small page NAND flash */
297 if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE) 297 if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE ||
298 mtd->size > UINT_MAX)
298 return; 299 return;
299 300
300 /* Check for SSDFC format by reading CIS/IDI sector */ 301 /* Check for SSDFC format by reading CIS/IDI sector */
@@ -316,7 +317,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
316 317
317 ssfdc->cis_block = cis_sector / (mtd->erasesize >> SECTOR_SHIFT); 318 ssfdc->cis_block = cis_sector / (mtd->erasesize >> SECTOR_SHIFT);
318 ssfdc->erase_size = mtd->erasesize; 319 ssfdc->erase_size = mtd->erasesize;
319 ssfdc->map_len = mtd->size / mtd->erasesize; 320 ssfdc->map_len = (u32)mtd->size / mtd->erasesize;
320 321
321 DEBUG(MTD_DEBUG_LEVEL1, 322 DEBUG(MTD_DEBUG_LEVEL1,
322 "SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n", 323 "SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
@@ -327,7 +328,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
327 ssfdc->heads = 16; 328 ssfdc->heads = 16;
328 ssfdc->sectors = 32; 329 ssfdc->sectors = 32;
329 get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors); 330 get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors);
330 ssfdc->cylinders = (unsigned short)((mtd->size >> SECTOR_SHIFT) / 331 ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) /
331 ((long)ssfdc->sectors * (long)ssfdc->heads)); 332 ((long)ssfdc->sectors * (long)ssfdc->heads));
332 333
333 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n", 334 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n",
diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile
new file mode 100644
index 000000000000..c1d501335006
--- /dev/null
+++ b/drivers/mtd/tests/Makefile
@@ -0,0 +1,7 @@
1obj-$(CONFIG_MTD_TESTS) += mtd_oobtest.o
2obj-$(CONFIG_MTD_TESTS) += mtd_pagetest.o
3obj-$(CONFIG_MTD_TESTS) += mtd_readtest.o
4obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o
5obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o
6obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o
7obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
new file mode 100644
index 000000000000..afbc3f8126db
--- /dev/null
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -0,0 +1,742 @@
1/*
2 * Copyright (C) 2006-2008 Nokia Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; see the file COPYING. If not, write to the Free Software
15 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Test OOB read and write on MTD device.
18 *
19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
20 */
21
22#include <asm/div64.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/err.h>
27#include <linux/mtd/mtd.h>
28#include <linux/sched.h>
29
30#define PRINT_PREF KERN_INFO "mtd_oobtest: "
31
32static int dev;
33module_param(dev, int, S_IRUGO);
34MODULE_PARM_DESC(dev, "MTD device number to use");
35
36static struct mtd_info *mtd;
37static unsigned char *readbuf;
38static unsigned char *writebuf;
39static unsigned char *bbt;
40
41static int ebcnt;
42static int pgcnt;
43static int errcnt;
44static int use_offset;
45static int use_len;
46static int use_len_max;
47static int vary_offset;
48static unsigned long next = 1;
49
50static inline unsigned int simple_rand(void)
51{
52 next = next * 1103515245 + 12345;
53 return (unsigned int)((next / 65536) % 32768);
54}
55
56static inline void simple_srand(unsigned long seed)
57{
58 next = seed;
59}
60
61static void set_random_data(unsigned char *buf, size_t len)
62{
63 size_t i;
64
65 for (i = 0; i < len; ++i)
66 buf[i] = simple_rand();
67}
68
69static int erase_eraseblock(int ebnum)
70{
71 int err;
72 struct erase_info ei;
73 loff_t addr = ebnum * mtd->erasesize;
74
75 memset(&ei, 0, sizeof(struct erase_info));
76 ei.mtd = mtd;
77 ei.addr = addr;
78 ei.len = mtd->erasesize;
79
80 err = mtd->erase(mtd, &ei);
81 if (err) {
82 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
83 return err;
84 }
85
86 if (ei.state == MTD_ERASE_FAILED) {
87 printk(PRINT_PREF "some erase error occurred at EB %d\n",
88 ebnum);
89 return -EIO;
90 }
91
92 return 0;
93}
94
95static int erase_whole_device(void)
96{
97 int err;
98 unsigned int i;
99
100 printk(PRINT_PREF "erasing whole device\n");
101 for (i = 0; i < ebcnt; ++i) {
102 if (bbt[i])
103 continue;
104 err = erase_eraseblock(i);
105 if (err)
106 return err;
107 cond_resched();
108 }
109 printk(PRINT_PREF "erased %u eraseblocks\n", i);
110 return 0;
111}
112
113static void do_vary_offset(void)
114{
115 use_len -= 1;
116 if (use_len < 1) {
117 use_offset += 1;
118 if (use_offset >= use_len_max)
119 use_offset = 0;
120 use_len = use_len_max - use_offset;
121 }
122}
123
124static int write_eraseblock(int ebnum)
125{
126 int i;
127 struct mtd_oob_ops ops;
128 int err = 0;
129 loff_t addr = ebnum * mtd->erasesize;
130
131 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
132 set_random_data(writebuf, use_len);
133 ops.mode = MTD_OOB_AUTO;
134 ops.len = 0;
135 ops.retlen = 0;
136 ops.ooblen = use_len;
137 ops.oobretlen = 0;
138 ops.ooboffs = use_offset;
139 ops.datbuf = 0;
140 ops.oobbuf = writebuf;
141 err = mtd->write_oob(mtd, addr, &ops);
142 if (err || ops.oobretlen != use_len) {
143 printk(PRINT_PREF "error: writeoob failed at %#llx\n",
144 (long long)addr);
145 printk(PRINT_PREF "error: use_len %d, use_offset %d\n",
146 use_len, use_offset);
147 errcnt += 1;
148 return err ? err : -1;
149 }
150 if (vary_offset)
151 do_vary_offset();
152 }
153
154 return err;
155}
156
157static int write_whole_device(void)
158{
159 int err;
160 unsigned int i;
161
162 printk(PRINT_PREF "writing OOBs of whole device\n");
163 for (i = 0; i < ebcnt; ++i) {
164 if (bbt[i])
165 continue;
166 err = write_eraseblock(i);
167 if (err)
168 return err;
169 if (i % 256 == 0)
170 printk(PRINT_PREF "written up to eraseblock %u\n", i);
171 cond_resched();
172 }
173 printk(PRINT_PREF "written %u eraseblocks\n", i);
174 return 0;
175}
176
177static int verify_eraseblock(int ebnum)
178{
179 int i;
180 struct mtd_oob_ops ops;
181 int err = 0;
182 loff_t addr = ebnum * mtd->erasesize;
183
184 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
185 set_random_data(writebuf, use_len);
186 ops.mode = MTD_OOB_AUTO;
187 ops.len = 0;
188 ops.retlen = 0;
189 ops.ooblen = use_len;
190 ops.oobretlen = 0;
191 ops.ooboffs = use_offset;
192 ops.datbuf = 0;
193 ops.oobbuf = readbuf;
194 err = mtd->read_oob(mtd, addr, &ops);
195 if (err || ops.oobretlen != use_len) {
196 printk(PRINT_PREF "error: readoob failed at %#llx\n",
197 (long long)addr);
198 errcnt += 1;
199 return err ? err : -1;
200 }
201 if (memcmp(readbuf, writebuf, use_len)) {
202 printk(PRINT_PREF "error: verify failed at %#llx\n",
203 (long long)addr);
204 errcnt += 1;
205 if (errcnt > 1000) {
206 printk(PRINT_PREF "error: too many errors\n");
207 return -1;
208 }
209 }
210 if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) {
211 int k;
212
213 ops.mode = MTD_OOB_AUTO;
214 ops.len = 0;
215 ops.retlen = 0;
216 ops.ooblen = mtd->ecclayout->oobavail;
217 ops.oobretlen = 0;
218 ops.ooboffs = 0;
219 ops.datbuf = 0;
220 ops.oobbuf = readbuf;
221 err = mtd->read_oob(mtd, addr, &ops);
222 if (err || ops.oobretlen != mtd->ecclayout->oobavail) {
223 printk(PRINT_PREF "error: readoob failed at "
224 "%#llx\n", (long long)addr);
225 errcnt += 1;
226 return err ? err : -1;
227 }
228 if (memcmp(readbuf + use_offset, writebuf, use_len)) {
229 printk(PRINT_PREF "error: verify failed at "
230 "%#llx\n", (long long)addr);
231 errcnt += 1;
232 if (errcnt > 1000) {
233 printk(PRINT_PREF "error: too many "
234 "errors\n");
235 return -1;
236 }
237 }
238 for (k = 0; k < use_offset; ++k)
239 if (readbuf[k] != 0xff) {
240 printk(PRINT_PREF "error: verify 0xff "
241 "failed at %#llx\n",
242 (long long)addr);
243 errcnt += 1;
244 if (errcnt > 1000) {
245 printk(PRINT_PREF "error: too "
246 "many errors\n");
247 return -1;
248 }
249 }
250 for (k = use_offset + use_len;
251 k < mtd->ecclayout->oobavail; ++k)
252 if (readbuf[k] != 0xff) {
253 printk(PRINT_PREF "error: verify 0xff "
254 "failed at %#llx\n",
255 (long long)addr);
256 errcnt += 1;
257 if (errcnt > 1000) {
258 printk(PRINT_PREF "error: too "
259 "many errors\n");
260 return -1;
261 }
262 }
263 }
264 if (vary_offset)
265 do_vary_offset();
266 }
267 return err;
268}
269
270static int verify_eraseblock_in_one_go(int ebnum)
271{
272 struct mtd_oob_ops ops;
273 int err = 0;
274 loff_t addr = ebnum * mtd->erasesize;
275 size_t len = mtd->ecclayout->oobavail * pgcnt;
276
277 set_random_data(writebuf, len);
278 ops.mode = MTD_OOB_AUTO;
279 ops.len = 0;
280 ops.retlen = 0;
281 ops.ooblen = len;
282 ops.oobretlen = 0;
283 ops.ooboffs = 0;
284 ops.datbuf = 0;
285 ops.oobbuf = readbuf;
286 err = mtd->read_oob(mtd, addr, &ops);
287 if (err || ops.oobretlen != len) {
288 printk(PRINT_PREF "error: readoob failed at %#llx\n",
289 (long long)addr);
290 errcnt += 1;
291 return err ? err : -1;
292 }
293 if (memcmp(readbuf, writebuf, len)) {
294 printk(PRINT_PREF "error: verify failed at %#llx\n",
295 (long long)addr);
296 errcnt += 1;
297 if (errcnt > 1000) {
298 printk(PRINT_PREF "error: too many errors\n");
299 return -1;
300 }
301 }
302
303 return err;
304}
305
306static int verify_all_eraseblocks(void)
307{
308 int err;
309 unsigned int i;
310
311 printk(PRINT_PREF "verifying all eraseblocks\n");
312 for (i = 0; i < ebcnt; ++i) {
313 if (bbt[i])
314 continue;
315 err = verify_eraseblock(i);
316 if (err)
317 return err;
318 if (i % 256 == 0)
319 printk(PRINT_PREF "verified up to eraseblock %u\n", i);
320 cond_resched();
321 }
322 printk(PRINT_PREF "verified %u eraseblocks\n", i);
323 return 0;
324}
325
326static int is_block_bad(int ebnum)
327{
328 int ret;
329 loff_t addr = ebnum * mtd->erasesize;
330
331 ret = mtd->block_isbad(mtd, addr);
332 if (ret)
333 printk(PRINT_PREF "block %d is bad\n", ebnum);
334 return ret;
335}
336
337static int scan_for_bad_eraseblocks(void)
338{
339 int i, bad = 0;
340
341 bbt = kmalloc(ebcnt, GFP_KERNEL);
342 if (!bbt) {
343 printk(PRINT_PREF "error: cannot allocate memory\n");
344 return -ENOMEM;
345 }
346 memset(bbt, 0 , ebcnt);
347
348 printk(PRINT_PREF "scanning for bad eraseblocks\n");
349 for (i = 0; i < ebcnt; ++i) {
350 bbt[i] = is_block_bad(i) ? 1 : 0;
351 if (bbt[i])
352 bad += 1;
353 cond_resched();
354 }
355 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
356 return 0;
357}
358
359static int __init mtd_oobtest_init(void)
360{
361 int err = 0;
362 unsigned int i;
363 uint64_t tmp;
364 struct mtd_oob_ops ops;
365 loff_t addr = 0, addr0;
366
367 printk(KERN_INFO "\n");
368 printk(KERN_INFO "=================================================\n");
369 printk(PRINT_PREF "MTD device: %d\n", dev);
370
371 mtd = get_mtd_device(NULL, dev);
372 if (IS_ERR(mtd)) {
373 err = PTR_ERR(mtd);
374 printk(PRINT_PREF "error: cannot get MTD device\n");
375 return err;
376 }
377
378 if (mtd->type != MTD_NANDFLASH) {
379 printk(PRINT_PREF "this test requires NAND flash\n");
380 goto out;
381 }
382
383 tmp = mtd->size;
384 do_div(tmp, mtd->erasesize);
385 ebcnt = tmp;
386 pgcnt = mtd->erasesize / mtd->writesize;
387
388 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
389 "page size %u, count of eraseblocks %u, pages per "
390 "eraseblock %u, OOB size %u\n",
391 (unsigned long long)mtd->size, mtd->erasesize,
392 mtd->writesize, ebcnt, pgcnt, mtd->oobsize);
393
394 err = -ENOMEM;
395 mtd->erasesize = mtd->erasesize;
396 readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
397 if (!readbuf) {
398 printk(PRINT_PREF "error: cannot allocate memory\n");
399 goto out;
400 }
401 writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
402 if (!writebuf) {
403 printk(PRINT_PREF "error: cannot allocate memory\n");
404 goto out;
405 }
406
407 err = scan_for_bad_eraseblocks();
408 if (err)
409 goto out;
410
411 use_offset = 0;
412 use_len = mtd->ecclayout->oobavail;
413 use_len_max = mtd->ecclayout->oobavail;
414 vary_offset = 0;
415
416 /* First test: write all OOB, read it back and verify */
417 printk(PRINT_PREF "test 1 of 5\n");
418
419 err = erase_whole_device();
420 if (err)
421 goto out;
422
423 simple_srand(1);
424 err = write_whole_device();
425 if (err)
426 goto out;
427
428 simple_srand(1);
429 err = verify_all_eraseblocks();
430 if (err)
431 goto out;
432
433 /*
434 * Second test: write all OOB, a block at a time, read it back and
435 * verify.
436 */
437 printk(PRINT_PREF "test 2 of 5\n");
438
439 err = erase_whole_device();
440 if (err)
441 goto out;
442
443 simple_srand(3);
444 err = write_whole_device();
445 if (err)
446 goto out;
447
448 /* Check all eraseblocks */
449 simple_srand(3);
450 printk(PRINT_PREF "verifying all eraseblocks\n");
451 for (i = 0; i < ebcnt; ++i) {
452 if (bbt[i])
453 continue;
454 err = verify_eraseblock_in_one_go(i);
455 if (err)
456 goto out;
457 if (i % 256 == 0)
458 printk(PRINT_PREF "verified up to eraseblock %u\n", i);
459 cond_resched();
460 }
461 printk(PRINT_PREF "verified %u eraseblocks\n", i);
462
463 /*
464 * Third test: write OOB at varying offsets and lengths, read it back
465 * and verify.
466 */
467 printk(PRINT_PREF "test 3 of 5\n");
468
469 err = erase_whole_device();
470 if (err)
471 goto out;
472
473 /* Write all eraseblocks */
474 use_offset = 0;
475 use_len = mtd->ecclayout->oobavail;
476 use_len_max = mtd->ecclayout->oobavail;
477 vary_offset = 1;
478 simple_srand(5);
479 printk(PRINT_PREF "writing OOBs of whole device\n");
480 for (i = 0; i < ebcnt; ++i) {
481 if (bbt[i])
482 continue;
483 err = write_eraseblock(i);
484 if (err)
485 goto out;
486 if (i % 256 == 0)
487 printk(PRINT_PREF "written up to eraseblock %u\n", i);
488 cond_resched();
489 }
490 printk(PRINT_PREF "written %u eraseblocks\n", i);
491
492 /* Check all eraseblocks */
493 use_offset = 0;
494 use_len = mtd->ecclayout->oobavail;
495 use_len_max = mtd->ecclayout->oobavail;
496 vary_offset = 1;
497 simple_srand(5);
498 err = verify_all_eraseblocks();
499 if (err)
500 goto out;
501
502 use_offset = 0;
503 use_len = mtd->ecclayout->oobavail;
504 use_len_max = mtd->ecclayout->oobavail;
505 vary_offset = 0;
506
507 /* Fourth test: try to write off end of device */
508 printk(PRINT_PREF "test 4 of 5\n");
509
510 err = erase_whole_device();
511 if (err)
512 goto out;
513
514 addr0 = 0;
515 for (i = 0; bbt[i] && i < ebcnt; ++i)
516 addr0 += mtd->erasesize;
517
518 /* Attempt to write off end of OOB */
519 ops.mode = MTD_OOB_AUTO;
520 ops.len = 0;
521 ops.retlen = 0;
522 ops.ooblen = 1;
523 ops.oobretlen = 0;
524 ops.ooboffs = mtd->ecclayout->oobavail;
525 ops.datbuf = 0;
526 ops.oobbuf = writebuf;
527 printk(PRINT_PREF "attempting to start write past end of OOB\n");
528 printk(PRINT_PREF "an error is expected...\n");
529 err = mtd->write_oob(mtd, addr0, &ops);
530 if (err) {
531 printk(PRINT_PREF "error occurred as expected\n");
532 err = 0;
533 } else {
534 printk(PRINT_PREF "error: can write past end of OOB\n");
535 errcnt += 1;
536 }
537
538 /* Attempt to read off end of OOB */
539 ops.mode = MTD_OOB_AUTO;
540 ops.len = 0;
541 ops.retlen = 0;
542 ops.ooblen = 1;
543 ops.oobretlen = 0;
544 ops.ooboffs = mtd->ecclayout->oobavail;
545 ops.datbuf = 0;
546 ops.oobbuf = readbuf;
547 printk(PRINT_PREF "attempting to start read past end of OOB\n");
548 printk(PRINT_PREF "an error is expected...\n");
549 err = mtd->read_oob(mtd, addr0, &ops);
550 if (err) {
551 printk(PRINT_PREF "error occurred as expected\n");
552 err = 0;
553 } else {
554 printk(PRINT_PREF "error: can read past end of OOB\n");
555 errcnt += 1;
556 }
557
558 if (bbt[ebcnt - 1])
559 printk(PRINT_PREF "skipping end of device tests because last "
560 "block is bad\n");
561 else {
562 /* Attempt to write off end of device */
563 ops.mode = MTD_OOB_AUTO;
564 ops.len = 0;
565 ops.retlen = 0;
566 ops.ooblen = mtd->ecclayout->oobavail + 1;
567 ops.oobretlen = 0;
568 ops.ooboffs = 0;
569 ops.datbuf = 0;
570 ops.oobbuf = writebuf;
571 printk(PRINT_PREF "attempting to write past end of device\n");
572 printk(PRINT_PREF "an error is expected...\n");
573 err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops);
574 if (err) {
575 printk(PRINT_PREF "error occurred as expected\n");
576 err = 0;
577 } else {
578 printk(PRINT_PREF "error: wrote past end of device\n");
579 errcnt += 1;
580 }
581
582 /* Attempt to read off end of device */
583 ops.mode = MTD_OOB_AUTO;
584 ops.len = 0;
585 ops.retlen = 0;
586 ops.ooblen = mtd->ecclayout->oobavail + 1;
587 ops.oobretlen = 0;
588 ops.ooboffs = 0;
589 ops.datbuf = 0;
590 ops.oobbuf = readbuf;
591 printk(PRINT_PREF "attempting to read past end of device\n");
592 printk(PRINT_PREF "an error is expected...\n");
593 err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops);
594 if (err) {
595 printk(PRINT_PREF "error occurred as expected\n");
596 err = 0;
597 } else {
598 printk(PRINT_PREF "error: read past end of device\n");
599 errcnt += 1;
600 }
601
602 err = erase_eraseblock(ebcnt - 1);
603 if (err)
604 goto out;
605
606 /* Attempt to write off end of device */
607 ops.mode = MTD_OOB_AUTO;
608 ops.len = 0;
609 ops.retlen = 0;
610 ops.ooblen = mtd->ecclayout->oobavail;
611 ops.oobretlen = 0;
612 ops.ooboffs = 1;
613 ops.datbuf = 0;
614 ops.oobbuf = writebuf;
615 printk(PRINT_PREF "attempting to write past end of device\n");
616 printk(PRINT_PREF "an error is expected...\n");
617 err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops);
618 if (err) {
619 printk(PRINT_PREF "error occurred as expected\n");
620 err = 0;
621 } else {
622 printk(PRINT_PREF "error: wrote past end of device\n");
623 errcnt += 1;
624 }
625
626 /* Attempt to read off end of device */
627 ops.mode = MTD_OOB_AUTO;
628 ops.len = 0;
629 ops.retlen = 0;
630 ops.ooblen = mtd->ecclayout->oobavail;
631 ops.oobretlen = 0;
632 ops.ooboffs = 1;
633 ops.datbuf = 0;
634 ops.oobbuf = readbuf;
635 printk(PRINT_PREF "attempting to read past end of device\n");
636 printk(PRINT_PREF "an error is expected...\n");
637 err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops);
638 if (err) {
639 printk(PRINT_PREF "error occurred as expected\n");
640 err = 0;
641 } else {
642 printk(PRINT_PREF "error: read past end of device\n");
643 errcnt += 1;
644 }
645 }
646
647 /* Fifth test: write / read across block boundaries */
648 printk(PRINT_PREF "test 5 of 5\n");
649
650 /* Erase all eraseblocks */
651 err = erase_whole_device();
652 if (err)
653 goto out;
654
655 /* Write all eraseblocks */
656 simple_srand(11);
657 printk(PRINT_PREF "writing OOBs of whole device\n");
658 for (i = 0; i < ebcnt - 1; ++i) {
659 int cnt = 2;
660 int pg;
661 size_t sz = mtd->ecclayout->oobavail;
662 if (bbt[i] || bbt[i + 1])
663 continue;
664 addr = (i + 1) * mtd->erasesize - mtd->writesize;
665 for (pg = 0; pg < cnt; ++pg) {
666 set_random_data(writebuf, sz);
667 ops.mode = MTD_OOB_AUTO;
668 ops.len = 0;
669 ops.retlen = 0;
670 ops.ooblen = sz;
671 ops.oobretlen = 0;
672 ops.ooboffs = 0;
673 ops.datbuf = 0;
674 ops.oobbuf = writebuf;
675 err = mtd->write_oob(mtd, addr, &ops);
676 if (err)
677 goto out;
678 if (i % 256 == 0)
679 printk(PRINT_PREF "written up to eraseblock "
680 "%u\n", i);
681 cond_resched();
682 addr += mtd->writesize;
683 }
684 }
685 printk(PRINT_PREF "written %u eraseblocks\n", i);
686
687 /* Check all eraseblocks */
688 simple_srand(11);
689 printk(PRINT_PREF "verifying all eraseblocks\n");
690 for (i = 0; i < ebcnt - 1; ++i) {
691 if (bbt[i] || bbt[i + 1])
692 continue;
693 set_random_data(writebuf, mtd->ecclayout->oobavail * 2);
694 addr = (i + 1) * mtd->erasesize - mtd->writesize;
695 ops.mode = MTD_OOB_AUTO;
696 ops.len = 0;
697 ops.retlen = 0;
698 ops.ooblen = mtd->ecclayout->oobavail * 2;
699 ops.oobretlen = 0;
700 ops.ooboffs = 0;
701 ops.datbuf = 0;
702 ops.oobbuf = readbuf;
703 err = mtd->read_oob(mtd, addr, &ops);
704 if (err)
705 goto out;
706 if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) {
707 printk(PRINT_PREF "error: verify failed at %#llx\n",
708 (long long)addr);
709 errcnt += 1;
710 if (errcnt > 1000) {
711 printk(PRINT_PREF "error: too many errors\n");
712 goto out;
713 }
714 }
715 if (i % 256 == 0)
716 printk(PRINT_PREF "verified up to eraseblock %u\n", i);
717 cond_resched();
718 }
719 printk(PRINT_PREF "verified %u eraseblocks\n", i);
720
721 printk(PRINT_PREF "finished with %d errors\n", errcnt);
722out:
723 kfree(bbt);
724 kfree(writebuf);
725 kfree(readbuf);
726 put_mtd_device(mtd);
727 if (err)
728 printk(PRINT_PREF "error %d occurred\n", err);
729 printk(KERN_INFO "=================================================\n");
730 return err;
731}
732module_init(mtd_oobtest_init);
733
734static void __exit mtd_oobtest_exit(void)
735{
736 return;
737}
738module_exit(mtd_oobtest_exit);
739
740MODULE_DESCRIPTION("Out-of-band test module");
741MODULE_AUTHOR("Adrian Hunter");
742MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
new file mode 100644
index 000000000000..9648818b9e2c
--- /dev/null
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -0,0 +1,632 @@
1/*
2 * Copyright (C) 2006-2008 Nokia Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; see the file COPYING. If not, write to the Free Software
15 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Test page read and write on MTD device.
18 *
19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
20 */
21
22#include <asm/div64.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/err.h>
27#include <linux/mtd/mtd.h>
28#include <linux/sched.h>
29
30#define PRINT_PREF KERN_INFO "mtd_pagetest: "
31
32static int dev;
33module_param(dev, int, S_IRUGO);
34MODULE_PARM_DESC(dev, "MTD device number to use");
35
36static struct mtd_info *mtd;
37static unsigned char *twopages;
38static unsigned char *writebuf;
39static unsigned char *boundary;
40static unsigned char *bbt;
41
42static int pgsize;
43static int bufsize;
44static int ebcnt;
45static int pgcnt;
46static int errcnt;
47static unsigned long next = 1;
48
49static inline unsigned int simple_rand(void)
50{
51 next = next * 1103515245 + 12345;
52 return (unsigned int)((next / 65536) % 32768);
53}
54
55static inline void simple_srand(unsigned long seed)
56{
57 next = seed;
58}
59
60static void set_random_data(unsigned char *buf, size_t len)
61{
62 size_t i;
63
64 for (i = 0; i < len; ++i)
65 buf[i] = simple_rand();
66}
67
68static int erase_eraseblock(int ebnum)
69{
70 int err;
71 struct erase_info ei;
72 loff_t addr = ebnum * mtd->erasesize;
73
74 memset(&ei, 0, sizeof(struct erase_info));
75 ei.mtd = mtd;
76 ei.addr = addr;
77 ei.len = mtd->erasesize;
78
79 err = mtd->erase(mtd, &ei);
80 if (err) {
81 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
82 return err;
83 }
84
85 if (ei.state == MTD_ERASE_FAILED) {
86 printk(PRINT_PREF "some erase error occurred at EB %d\n",
87 ebnum);
88 return -EIO;
89 }
90
91 return 0;
92}
93
94static int write_eraseblock(int ebnum)
95{
96 int err = 0;
97 size_t written = 0;
98 loff_t addr = ebnum * mtd->erasesize;
99
100 set_random_data(writebuf, mtd->erasesize);
101 cond_resched();
102 err = mtd->write(mtd, addr, mtd->erasesize, &written, writebuf);
103 if (err || written != mtd->erasesize)
104 printk(PRINT_PREF "error: write failed at %#llx\n",
105 (long long)addr);
106
107 return err;
108}
109
110static int verify_eraseblock(int ebnum)
111{
112 uint32_t j;
113 size_t read = 0;
114 int err = 0, i;
115 loff_t addr0, addrn;
116 loff_t addr = ebnum * mtd->erasesize;
117
118 addr0 = 0;
119 for (i = 0; bbt[i] && i < ebcnt; ++i)
120 addr0 += mtd->erasesize;
121
122 addrn = mtd->size;
123 for (i = 0; bbt[ebcnt - i - 1] && i < ebcnt; ++i)
124 addrn -= mtd->erasesize;
125
126 set_random_data(writebuf, mtd->erasesize);
127 for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
128 /* Do a read to set the internal dataRAMs to different data */
129 err = mtd->read(mtd, addr0, bufsize, &read, twopages);
130 if (err == -EUCLEAN)
131 err = 0;
132 if (err || read != bufsize) {
133 printk(PRINT_PREF "error: read failed at %#llx\n",
134 (long long)addr0);
135 return err;
136 }
137 err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
138 if (err == -EUCLEAN)
139 err = 0;
140 if (err || read != bufsize) {
141 printk(PRINT_PREF "error: read failed at %#llx\n",
142 (long long)(addrn - bufsize));
143 return err;
144 }
145 memset(twopages, 0, bufsize);
146 read = 0;
147 err = mtd->read(mtd, addr, bufsize, &read, twopages);
148 if (err == -EUCLEAN)
149 err = 0;
150 if (err || read != bufsize) {
151 printk(PRINT_PREF "error: read failed at %#llx\n",
152 (long long)addr);
153 break;
154 }
155 if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) {
156 printk(PRINT_PREF "error: verify failed at %#llx\n",
157 (long long)addr);
158 errcnt += 1;
159 }
160 }
161 /* Check boundary between eraseblocks */
162 if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) {
163 unsigned long oldnext = next;
164 /* Do a read to set the internal dataRAMs to different data */
165 err = mtd->read(mtd, addr0, bufsize, &read, twopages);
166 if (err == -EUCLEAN)
167 err = 0;
168 if (err || read != bufsize) {
169 printk(PRINT_PREF "error: read failed at %#llx\n",
170 (long long)addr0);
171 return err;
172 }
173 err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
174 if (err == -EUCLEAN)
175 err = 0;
176 if (err || read != bufsize) {
177 printk(PRINT_PREF "error: read failed at %#llx\n",
178 (long long)(addrn - bufsize));
179 return err;
180 }
181 memset(twopages, 0, bufsize);
182 read = 0;
183 err = mtd->read(mtd, addr, bufsize, &read, twopages);
184 if (err == -EUCLEAN)
185 err = 0;
186 if (err || read != bufsize) {
187 printk(PRINT_PREF "error: read failed at %#llx\n",
188 (long long)addr);
189 return err;
190 }
191 memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize);
192 set_random_data(boundary + pgsize, pgsize);
193 if (memcmp(twopages, boundary, bufsize)) {
194 printk(PRINT_PREF "error: verify failed at %#llx\n",
195 (long long)addr);
196 errcnt += 1;
197 }
198 next = oldnext;
199 }
200 return err;
201}
202
203static int crosstest(void)
204{
205 size_t read = 0;
206 int err = 0, i;
207 loff_t addr, addr0, addrn;
208 unsigned char *pp1, *pp2, *pp3, *pp4;
209
210 printk(PRINT_PREF "crosstest\n");
211 pp1 = kmalloc(pgsize * 4, GFP_KERNEL);
212 if (!pp1) {
213 printk(PRINT_PREF "error: cannot allocate memory\n");
214 return -ENOMEM;
215 }
216 pp2 = pp1 + pgsize;
217 pp3 = pp2 + pgsize;
218 pp4 = pp3 + pgsize;
219 memset(pp1, 0, pgsize * 4);
220
221 addr0 = 0;
222 for (i = 0; bbt[i] && i < ebcnt; ++i)
223 addr0 += mtd->erasesize;
224
225 addrn = mtd->size;
226 for (i = 0; bbt[ebcnt - i - 1] && i < ebcnt; ++i)
227 addrn -= mtd->erasesize;
228
229 /* Read 2nd-to-last page to pp1 */
230 read = 0;
231 addr = addrn - pgsize - pgsize;
232 err = mtd->read(mtd, addr, pgsize, &read, pp1);
233 if (err == -EUCLEAN)
234 err = 0;
235 if (err || read != pgsize) {
236 printk(PRINT_PREF "error: read failed at %#llx\n",
237 (long long)addr);
238 kfree(pp1);
239 return err;
240 }
241
242 /* Read 3rd-to-last page to pp1 */
243 read = 0;
244 addr = addrn - pgsize - pgsize - pgsize;
245 err = mtd->read(mtd, addr, pgsize, &read, pp1);
246 if (err == -EUCLEAN)
247 err = 0;
248 if (err || read != pgsize) {
249 printk(PRINT_PREF "error: read failed at %#llx\n",
250 (long long)addr);
251 kfree(pp1);
252 return err;
253 }
254
255 /* Read first page to pp2 */
256 read = 0;
257 addr = addr0;
258 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
259 err = mtd->read(mtd, addr, pgsize, &read, pp2);
260 if (err == -EUCLEAN)
261 err = 0;
262 if (err || read != pgsize) {
263 printk(PRINT_PREF "error: read failed at %#llx\n",
264 (long long)addr);
265 kfree(pp1);
266 return err;
267 }
268
269 /* Read last page to pp3 */
270 read = 0;
271 addr = addrn - pgsize;
272 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
273 err = mtd->read(mtd, addr, pgsize, &read, pp3);
274 if (err == -EUCLEAN)
275 err = 0;
276 if (err || read != pgsize) {
277 printk(PRINT_PREF "error: read failed at %#llx\n",
278 (long long)addr);
279 kfree(pp1);
280 return err;
281 }
282
283 /* Read first page again to pp4 */
284 read = 0;
285 addr = addr0;
286 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
287 err = mtd->read(mtd, addr, pgsize, &read, pp4);
288 if (err == -EUCLEAN)
289 err = 0;
290 if (err || read != pgsize) {
291 printk(PRINT_PREF "error: read failed at %#llx\n",
292 (long long)addr);
293 kfree(pp1);
294 return err;
295 }
296
297 /* pp2 and pp4 should be the same */
298 printk(PRINT_PREF "verifying pages read at %#llx match\n",
299 (long long)addr0);
300 if (memcmp(pp2, pp4, pgsize)) {
301 printk(PRINT_PREF "verify failed!\n");
302 errcnt += 1;
303 } else if (!err)
304 printk(PRINT_PREF "crosstest ok\n");
305 kfree(pp1);
306 return err;
307}
308
309static int erasecrosstest(void)
310{
311 size_t read = 0, written = 0;
312 int err = 0, i, ebnum, ok = 1, ebnum2;
313 loff_t addr0;
314 char *readbuf = twopages;
315
316 printk(PRINT_PREF "erasecrosstest\n");
317
318 ebnum = 0;
319 addr0 = 0;
320 for (i = 0; bbt[i] && i < ebcnt; ++i) {
321 addr0 += mtd->erasesize;
322 ebnum += 1;
323 }
324
325 ebnum2 = ebcnt - 1;
326 while (ebnum2 && bbt[ebnum2])
327 ebnum2 -= 1;
328
329 printk(PRINT_PREF "erasing block %d\n", ebnum);
330 err = erase_eraseblock(ebnum);
331 if (err)
332 return err;
333
334 printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
335 set_random_data(writebuf, pgsize);
336 strcpy(writebuf, "There is no data like this!");
337 err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
338 if (err || written != pgsize) {
339 printk(PRINT_PREF "error: write failed at %#llx\n",
340 (long long)addr0);
341 return err ? err : -1;
342 }
343
344 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
345 memset(readbuf, 0, pgsize);
346 err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
347 if (err == -EUCLEAN)
348 err = 0;
349 if (err || read != pgsize) {
350 printk(PRINT_PREF "error: read failed at %#llx\n",
351 (long long)addr0);
352 return err ? err : -1;
353 }
354
355 printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum);
356 if (memcmp(writebuf, readbuf, pgsize)) {
357 printk(PRINT_PREF "verify failed!\n");
358 errcnt += 1;
359 ok = 0;
360 return err;
361 }
362
363 printk(PRINT_PREF "erasing block %d\n", ebnum);
364 err = erase_eraseblock(ebnum);
365 if (err)
366 return err;
367
368 printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
369 set_random_data(writebuf, pgsize);
370 strcpy(writebuf, "There is no data like this!");
371 err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
372 if (err || written != pgsize) {
373 printk(PRINT_PREF "error: write failed at %#llx\n",
374 (long long)addr0);
375 return err ? err : -1;
376 }
377
378 printk(PRINT_PREF "erasing block %d\n", ebnum2);
379 err = erase_eraseblock(ebnum2);
380 if (err)
381 return err;
382
383 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
384 memset(readbuf, 0, pgsize);
385 err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
386 if (err == -EUCLEAN)
387 err = 0;
388 if (err || read != pgsize) {
389 printk(PRINT_PREF "error: read failed at %#llx\n",
390 (long long)addr0);
391 return err ? err : -1;
392 }
393
394 printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum);
395 if (memcmp(writebuf, readbuf, pgsize)) {
396 printk(PRINT_PREF "verify failed!\n");
397 errcnt += 1;
398 ok = 0;
399 }
400
401 if (ok && !err)
402 printk(PRINT_PREF "erasecrosstest ok\n");
403 return err;
404}
405
406static int erasetest(void)
407{
408 size_t read = 0, written = 0;
409 int err = 0, i, ebnum, ok = 1;
410 loff_t addr0;
411
412 printk(PRINT_PREF "erasetest\n");
413
414 ebnum = 0;
415 addr0 = 0;
416 for (i = 0; bbt[i] && i < ebcnt; ++i) {
417 addr0 += mtd->erasesize;
418 ebnum += 1;
419 }
420
421 printk(PRINT_PREF "erasing block %d\n", ebnum);
422 err = erase_eraseblock(ebnum);
423 if (err)
424 return err;
425
426 printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
427 set_random_data(writebuf, pgsize);
428 err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
429 if (err || written != pgsize) {
430 printk(PRINT_PREF "error: write failed at %#llx\n",
431 (long long)addr0);
432 return err ? err : -1;
433 }
434
435 printk(PRINT_PREF "erasing block %d\n", ebnum);
436 err = erase_eraseblock(ebnum);
437 if (err)
438 return err;
439
440 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
441 err = mtd->read(mtd, addr0, pgsize, &read, twopages);
442 if (err == -EUCLEAN)
443 err = 0;
444 if (err || read != pgsize) {
445 printk(PRINT_PREF "error: read failed at %#llx\n",
446 (long long)addr0);
447 return err ? err : -1;
448 }
449
450 printk(PRINT_PREF "verifying 1st page of block %d is all 0xff\n",
451 ebnum);
452 for (i = 0; i < pgsize; ++i)
453 if (twopages[i] != 0xff) {
454 printk(PRINT_PREF "verifying all 0xff failed at %d\n",
455 i);
456 errcnt += 1;
457 ok = 0;
458 break;
459 }
460
461 if (ok && !err)
462 printk(PRINT_PREF "erasetest ok\n");
463
464 return err;
465}
466
467static int is_block_bad(int ebnum)
468{
469 loff_t addr = ebnum * mtd->erasesize;
470 int ret;
471
472 ret = mtd->block_isbad(mtd, addr);
473 if (ret)
474 printk(PRINT_PREF "block %d is bad\n", ebnum);
475 return ret;
476}
477
478static int scan_for_bad_eraseblocks(void)
479{
480 int i, bad = 0;
481
482 bbt = kmalloc(ebcnt, GFP_KERNEL);
483 if (!bbt) {
484 printk(PRINT_PREF "error: cannot allocate memory\n");
485 return -ENOMEM;
486 }
487 memset(bbt, 0 , ebcnt);
488
489 printk(PRINT_PREF "scanning for bad eraseblocks\n");
490 for (i = 0; i < ebcnt; ++i) {
491 bbt[i] = is_block_bad(i) ? 1 : 0;
492 if (bbt[i])
493 bad += 1;
494 cond_resched();
495 }
496 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
497 return 0;
498}
499
500static int __init mtd_pagetest_init(void)
501{
502 int err = 0;
503 uint64_t tmp;
504 uint32_t i;
505
506 printk(KERN_INFO "\n");
507 printk(KERN_INFO "=================================================\n");
508 printk(PRINT_PREF "MTD device: %d\n", dev);
509
510 mtd = get_mtd_device(NULL, dev);
511 if (IS_ERR(mtd)) {
512 err = PTR_ERR(mtd);
513 printk(PRINT_PREF "error: cannot get MTD device\n");
514 return err;
515 }
516
517 if (mtd->type != MTD_NANDFLASH) {
518 printk(PRINT_PREF "this test requires NAND flash\n");
519 goto out;
520 }
521
522 tmp = mtd->size;
523 do_div(tmp, mtd->erasesize);
524 ebcnt = tmp;
525 pgcnt = mtd->erasesize / mtd->writesize;
526
527 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
528 "page size %u, count of eraseblocks %u, pages per "
529 "eraseblock %u, OOB size %u\n",
530 (unsigned long long)mtd->size, mtd->erasesize,
531 pgsize, ebcnt, pgcnt, mtd->oobsize);
532
533 err = -ENOMEM;
534 bufsize = pgsize * 2;
535 writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
536 if (!writebuf) {
537 printk(PRINT_PREF "error: cannot allocate memory\n");
538 goto out;
539 }
540 twopages = kmalloc(bufsize, GFP_KERNEL);
541 if (!twopages) {
542 printk(PRINT_PREF "error: cannot allocate memory\n");
543 goto out;
544 }
545 boundary = kmalloc(bufsize, GFP_KERNEL);
546 if (!boundary) {
547 printk(PRINT_PREF "error: cannot allocate memory\n");
548 goto out;
549 }
550
551 err = scan_for_bad_eraseblocks();
552 if (err)
553 goto out;
554
555 /* Erase all eraseblocks */
556 printk(PRINT_PREF "erasing whole device\n");
557 for (i = 0; i < ebcnt; ++i) {
558 if (bbt[i])
559 continue;
560 err = erase_eraseblock(i);
561 if (err)
562 goto out;
563 cond_resched();
564 }
565 printk(PRINT_PREF "erased %u eraseblocks\n", i);
566
567 /* Write all eraseblocks */
568 simple_srand(1);
569 printk(PRINT_PREF "writing whole device\n");
570 for (i = 0; i < ebcnt; ++i) {
571 if (bbt[i])
572 continue;
573 err = write_eraseblock(i);
574 if (err)
575 goto out;
576 if (i % 256 == 0)
577 printk(PRINT_PREF "written up to eraseblock %u\n", i);
578 cond_resched();
579 }
580 printk(PRINT_PREF "written %u eraseblocks\n", i);
581
582 /* Check all eraseblocks */
583 simple_srand(1);
584 printk(PRINT_PREF "verifying all eraseblocks\n");
585 for (i = 0; i < ebcnt; ++i) {
586 if (bbt[i])
587 continue;
588 err = verify_eraseblock(i);
589 if (err)
590 goto out;
591 if (i % 256 == 0)
592 printk(PRINT_PREF "verified up to eraseblock %u\n", i);
593 cond_resched();
594 }
595 printk(PRINT_PREF "verified %u eraseblocks\n", i);
596
597 err = crosstest();
598 if (err)
599 goto out;
600
601 err = erasecrosstest();
602 if (err)
603 goto out;
604
605 err = erasetest();
606 if (err)
607 goto out;
608
609 printk(PRINT_PREF "finished with %d errors\n", errcnt);
610out:
611
612 kfree(bbt);
613 kfree(boundary);
614 kfree(twopages);
615 kfree(writebuf);
616 put_mtd_device(mtd);
617 if (err)
618 printk(PRINT_PREF "error %d occurred\n", err);
619 printk(KERN_INFO "=================================================\n");
620 return err;
621}
622module_init(mtd_pagetest_init);
623
624static void __exit mtd_pagetest_exit(void)
625{
626 return;
627}
628module_exit(mtd_pagetest_exit);
629
630MODULE_DESCRIPTION("NAND page test");
631MODULE_AUTHOR("Adrian Hunter");
632MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c
new file mode 100644
index 000000000000..645e77fdc63d
--- /dev/null
+++ b/drivers/mtd/tests/mtd_readtest.c
@@ -0,0 +1,253 @@
1/*
2 * Copyright (C) 2006-2008 Nokia Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; see the file COPYING. If not, write to the Free Software
15 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Check MTD device read.
18 *
19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
20 */
21
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/err.h>
26#include <linux/mtd/mtd.h>
27#include <linux/sched.h>
28
29#define PRINT_PREF KERN_INFO "mtd_readtest: "
30
31static int dev;
32module_param(dev, int, S_IRUGO);
33MODULE_PARM_DESC(dev, "MTD device number to use");
34
35static struct mtd_info *mtd;
36static unsigned char *iobuf;
37static unsigned char *iobuf1;
38static unsigned char *bbt;
39
40static int pgsize;
41static int ebcnt;
42static int pgcnt;
43
44static int read_eraseblock_by_page(int ebnum)
45{
46 size_t read = 0;
47 int i, ret, err = 0;
48 loff_t addr = ebnum * mtd->erasesize;
49 void *buf = iobuf;
50 void *oobbuf = iobuf1;
51
52 for (i = 0; i < pgcnt; i++) {
53 memset(buf, 0 , pgcnt);
54 ret = mtd->read(mtd, addr, pgsize, &read, buf);
55 if (ret == -EUCLEAN)
56 ret = 0;
57 if (ret || read != pgsize) {
58 printk(PRINT_PREF "error: read failed at %#llx\n",
59 (long long)addr);
60 if (!err)
61 err = ret;
62 if (!err)
63 err = -EINVAL;
64 }
65 if (mtd->oobsize) {
66 struct mtd_oob_ops ops;
67
68 ops.mode = MTD_OOB_PLACE;
69 ops.len = 0;
70 ops.retlen = 0;
71 ops.ooblen = mtd->oobsize;
72 ops.oobretlen = 0;
73 ops.ooboffs = 0;
74 ops.datbuf = 0;
75 ops.oobbuf = oobbuf;
76 ret = mtd->read_oob(mtd, addr, &ops);
77 if (ret || ops.oobretlen != mtd->oobsize) {
78 printk(PRINT_PREF "error: read oob failed at "
79 "%#llx\n", (long long)addr);
80 if (!err)
81 err = ret;
82 if (!err)
83 err = -EINVAL;
84 }
85 oobbuf += mtd->oobsize;
86 }
87 addr += pgsize;
88 buf += pgsize;
89 }
90
91 return err;
92}
93
94static void dump_eraseblock(int ebnum)
95{
96 int i, j, n;
97 char line[128];
98 int pg, oob;
99
100 printk(PRINT_PREF "dumping eraseblock %d\n", ebnum);
101 n = mtd->erasesize;
102 for (i = 0; i < n;) {
103 char *p = line;
104
105 p += sprintf(p, "%05x: ", i);
106 for (j = 0; j < 32 && i < n; j++, i++)
107 p += sprintf(p, "%02x", (unsigned int)iobuf[i]);
108 printk(KERN_CRIT "%s\n", line);
109 cond_resched();
110 }
111 if (!mtd->oobsize)
112 return;
113 printk(PRINT_PREF "dumping oob from eraseblock %d\n", ebnum);
114 n = mtd->oobsize;
115 for (pg = 0, i = 0; pg < pgcnt; pg++)
116 for (oob = 0; oob < n;) {
117 char *p = line;
118
119 p += sprintf(p, "%05x: ", i);
120 for (j = 0; j < 32 && oob < n; j++, oob++, i++)
121 p += sprintf(p, "%02x",
122 (unsigned int)iobuf1[i]);
123 printk(KERN_CRIT "%s\n", line);
124 cond_resched();
125 }
126}
127
128static int is_block_bad(int ebnum)
129{
130 loff_t addr = ebnum * mtd->erasesize;
131 int ret;
132
133 ret = mtd->block_isbad(mtd, addr);
134 if (ret)
135 printk(PRINT_PREF "block %d is bad\n", ebnum);
136 return ret;
137}
138
139static int scan_for_bad_eraseblocks(void)
140{
141 int i, bad = 0;
142
143 bbt = kmalloc(ebcnt, GFP_KERNEL);
144 if (!bbt) {
145 printk(PRINT_PREF "error: cannot allocate memory\n");
146 return -ENOMEM;
147 }
148 memset(bbt, 0 , ebcnt);
149
150 printk(PRINT_PREF "scanning for bad eraseblocks\n");
151 for (i = 0; i < ebcnt; ++i) {
152 bbt[i] = is_block_bad(i) ? 1 : 0;
153 if (bbt[i])
154 bad += 1;
155 cond_resched();
156 }
157 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
158 return 0;
159}
160
161static int __init mtd_readtest_init(void)
162{
163 uint64_t tmp;
164 int err, i;
165
166 printk(KERN_INFO "\n");
167 printk(KERN_INFO "=================================================\n");
168 printk(PRINT_PREF "MTD device: %d\n", dev);
169
170 mtd = get_mtd_device(NULL, dev);
171 if (IS_ERR(mtd)) {
172 err = PTR_ERR(mtd);
173 printk(PRINT_PREF "error: Cannot get MTD device\n");
174 return err;
175 }
176
177 if (mtd->writesize == 1) {
178 printk(PRINT_PREF "not NAND flash, assume page size is 512 "
179 "bytes.\n");
180 pgsize = 512;
181 } else
182 pgsize = mtd->writesize;
183
184 tmp = mtd->size;
185 do_div(tmp, mtd->erasesize);
186 ebcnt = tmp;
187 pgcnt = mtd->erasesize / mtd->writesize;
188
189 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
190 "page size %u, count of eraseblocks %u, pages per "
191 "eraseblock %u, OOB size %u\n",
192 (unsigned long long)mtd->size, mtd->erasesize,
193 pgsize, ebcnt, pgcnt, mtd->oobsize);
194
195 err = -ENOMEM;
196 iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
197 if (!iobuf) {
198 printk(PRINT_PREF "error: cannot allocate memory\n");
199 goto out;
200 }
201 iobuf1 = kmalloc(mtd->erasesize, GFP_KERNEL);
202 if (!iobuf1) {
203 printk(PRINT_PREF "error: cannot allocate memory\n");
204 goto out;
205 }
206
207 err = scan_for_bad_eraseblocks();
208 if (err)
209 goto out;
210
211 /* Read all eraseblocks 1 page at a time */
212 printk(PRINT_PREF "testing page read\n");
213 for (i = 0; i < ebcnt; ++i) {
214 int ret;
215
216 if (bbt[i])
217 continue;
218 ret = read_eraseblock_by_page(i);
219 if (ret) {
220 dump_eraseblock(i);
221 if (!err)
222 err = ret;
223 }
224 cond_resched();
225 }
226
227 if (err)
228 printk(PRINT_PREF "finished with errors\n");
229 else
230 printk(PRINT_PREF "finished\n");
231
232out:
233
234 kfree(iobuf);
235 kfree(iobuf1);
236 kfree(bbt);
237 put_mtd_device(mtd);
238 if (err)
239 printk(PRINT_PREF "error %d occurred\n", err);
240 printk(KERN_INFO "=================================================\n");
241 return err;
242}
243module_init(mtd_readtest_init);
244
245static void __exit mtd_readtest_exit(void)
246{
247 return;
248}
249module_exit(mtd_readtest_exit);
250
251MODULE_DESCRIPTION("Read test module");
252MODULE_AUTHOR("Adrian Hunter");
253MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
new file mode 100644
index 000000000000..141363a7e805
--- /dev/null
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -0,0 +1,502 @@
1/*
2 * Copyright (C) 2007 Nokia Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; see the file COPYING. If not, write to the Free Software
15 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Test read and write speed of a MTD device.
18 *
19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
20 */
21
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/err.h>
26#include <linux/mtd/mtd.h>
27#include <linux/sched.h>
28
29#define PRINT_PREF KERN_INFO "mtd_speedtest: "
30
31static int dev;
32module_param(dev, int, S_IRUGO);
33MODULE_PARM_DESC(dev, "MTD device number to use");
34
35static struct mtd_info *mtd;
36static unsigned char *iobuf;
37static unsigned char *bbt;
38
39static int pgsize;
40static int ebcnt;
41static int pgcnt;
42static int goodebcnt;
43static struct timeval start, finish;
44static unsigned long next = 1;
45
46static inline unsigned int simple_rand(void)
47{
48 next = next * 1103515245 + 12345;
49 return (unsigned int)((next / 65536) % 32768);
50}
51
52static inline void simple_srand(unsigned long seed)
53{
54 next = seed;
55}
56
57static void set_random_data(unsigned char *buf, size_t len)
58{
59 size_t i;
60
61 for (i = 0; i < len; ++i)
62 buf[i] = simple_rand();
63}
64
65static int erase_eraseblock(int ebnum)
66{
67 int err;
68 struct erase_info ei;
69 loff_t addr = ebnum * mtd->erasesize;
70
71 memset(&ei, 0, sizeof(struct erase_info));
72 ei.mtd = mtd;
73 ei.addr = addr;
74 ei.len = mtd->erasesize;
75
76 err = mtd->erase(mtd, &ei);
77 if (err) {
78 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
79 return err;
80 }
81
82 if (ei.state == MTD_ERASE_FAILED) {
83 printk(PRINT_PREF "some erase error occurred at EB %d\n",
84 ebnum);
85 return -EIO;
86 }
87
88 return 0;
89}
90
91static int erase_whole_device(void)
92{
93 int err;
94 unsigned int i;
95
96 for (i = 0; i < ebcnt; ++i) {
97 if (bbt[i])
98 continue;
99 err = erase_eraseblock(i);
100 if (err)
101 return err;
102 cond_resched();
103 }
104 return 0;
105}
106
107static int write_eraseblock(int ebnum)
108{
109 size_t written = 0;
110 int err = 0;
111 loff_t addr = ebnum * mtd->erasesize;
112
113 err = mtd->write(mtd, addr, mtd->erasesize, &written, iobuf);
114 if (err || written != mtd->erasesize) {
115 printk(PRINT_PREF "error: write failed at %#llx\n", addr);
116 if (!err)
117 err = -EINVAL;
118 }
119
120 return err;
121}
122
123static int write_eraseblock_by_page(int ebnum)
124{
125 size_t written = 0;
126 int i, err = 0;
127 loff_t addr = ebnum * mtd->erasesize;
128 void *buf = iobuf;
129
130 for (i = 0; i < pgcnt; i++) {
131 err = mtd->write(mtd, addr, pgsize, &written, buf);
132 if (err || written != pgsize) {
133 printk(PRINT_PREF "error: write failed at %#llx\n",
134 addr);
135 if (!err)
136 err = -EINVAL;
137 break;
138 }
139 addr += pgsize;
140 buf += pgsize;
141 }
142
143 return err;
144}
145
146static int write_eraseblock_by_2pages(int ebnum)
147{
148 size_t written = 0, sz = pgsize * 2;
149 int i, n = pgcnt / 2, err = 0;
150 loff_t addr = ebnum * mtd->erasesize;
151 void *buf = iobuf;
152
153 for (i = 0; i < n; i++) {
154 err = mtd->write(mtd, addr, sz, &written, buf);
155 if (err || written != sz) {
156 printk(PRINT_PREF "error: write failed at %#llx\n",
157 addr);
158 if (!err)
159 err = -EINVAL;
160 return err;
161 }
162 addr += sz;
163 buf += sz;
164 }
165 if (pgcnt % 2) {
166 err = mtd->write(mtd, addr, pgsize, &written, buf);
167 if (err || written != pgsize) {
168 printk(PRINT_PREF "error: write failed at %#llx\n",
169 addr);
170 if (!err)
171 err = -EINVAL;
172 }
173 }
174
175 return err;
176}
177
178static int read_eraseblock(int ebnum)
179{
180 size_t read = 0;
181 int err = 0;
182 loff_t addr = ebnum * mtd->erasesize;
183
184 err = mtd->read(mtd, addr, mtd->erasesize, &read, iobuf);
185 /* Ignore corrected ECC errors */
186 if (err == -EUCLEAN)
187 err = 0;
188 if (err || read != mtd->erasesize) {
189 printk(PRINT_PREF "error: read failed at %#llx\n", addr);
190 if (!err)
191 err = -EINVAL;
192 }
193
194 return err;
195}
196
197static int read_eraseblock_by_page(int ebnum)
198{
199 size_t read = 0;
200 int i, err = 0;
201 loff_t addr = ebnum * mtd->erasesize;
202 void *buf = iobuf;
203
204 for (i = 0; i < pgcnt; i++) {
205 err = mtd->read(mtd, addr, pgsize, &read, buf);
206 /* Ignore corrected ECC errors */
207 if (err == -EUCLEAN)
208 err = 0;
209 if (err || read != pgsize) {
210 printk(PRINT_PREF "error: read failed at %#llx\n",
211 addr);
212 if (!err)
213 err = -EINVAL;
214 break;
215 }
216 addr += pgsize;
217 buf += pgsize;
218 }
219
220 return err;
221}
222
223static int read_eraseblock_by_2pages(int ebnum)
224{
225 size_t read = 0, sz = pgsize * 2;
226 int i, n = pgcnt / 2, err = 0;
227 loff_t addr = ebnum * mtd->erasesize;
228 void *buf = iobuf;
229
230 for (i = 0; i < n; i++) {
231 err = mtd->read(mtd, addr, sz, &read, buf);
232 /* Ignore corrected ECC errors */
233 if (err == -EUCLEAN)
234 err = 0;
235 if (err || read != sz) {
236 printk(PRINT_PREF "error: read failed at %#llx\n",
237 addr);
238 if (!err)
239 err = -EINVAL;
240 return err;
241 }
242 addr += sz;
243 buf += sz;
244 }
245 if (pgcnt % 2) {
246 err = mtd->read(mtd, addr, pgsize, &read, buf);
247 /* Ignore corrected ECC errors */
248 if (err == -EUCLEAN)
249 err = 0;
250 if (err || read != pgsize) {
251 printk(PRINT_PREF "error: read failed at %#llx\n",
252 addr);
253 if (!err)
254 err = -EINVAL;
255 }
256 }
257
258 return err;
259}
260
261static int is_block_bad(int ebnum)
262{
263 loff_t addr = ebnum * mtd->erasesize;
264 int ret;
265
266 ret = mtd->block_isbad(mtd, addr);
267 if (ret)
268 printk(PRINT_PREF "block %d is bad\n", ebnum);
269 return ret;
270}
271
272static inline void start_timing(void)
273{
274 do_gettimeofday(&start);
275}
276
277static inline void stop_timing(void)
278{
279 do_gettimeofday(&finish);
280}
281
282static long calc_speed(void)
283{
284 long ms, k, speed;
285
286 ms = (finish.tv_sec - start.tv_sec) * 1000 +
287 (finish.tv_usec - start.tv_usec) / 1000;
288 k = goodebcnt * mtd->erasesize / 1024;
289 speed = (k * 1000) / ms;
290 return speed;
291}
292
293static int scan_for_bad_eraseblocks(void)
294{
295 int i, bad = 0;
296
297 bbt = kmalloc(ebcnt, GFP_KERNEL);
298 if (!bbt) {
299 printk(PRINT_PREF "error: cannot allocate memory\n");
300 return -ENOMEM;
301 }
302 memset(bbt, 0 , ebcnt);
303
304 printk(PRINT_PREF "scanning for bad eraseblocks\n");
305 for (i = 0; i < ebcnt; ++i) {
306 bbt[i] = is_block_bad(i) ? 1 : 0;
307 if (bbt[i])
308 bad += 1;
309 cond_resched();
310 }
311 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
312 goodebcnt = ebcnt - bad;
313 return 0;
314}
315
316static int __init mtd_speedtest_init(void)
317{
318 int err, i;
319 long speed;
320 uint64_t tmp;
321
322 printk(KERN_INFO "\n");
323 printk(KERN_INFO "=================================================\n");
324 printk(PRINT_PREF "MTD device: %d\n", dev);
325
326 mtd = get_mtd_device(NULL, dev);
327 if (IS_ERR(mtd)) {
328 err = PTR_ERR(mtd);
329 printk(PRINT_PREF "error: cannot get MTD device\n");
330 return err;
331 }
332
333 if (mtd->writesize == 1) {
334 printk(PRINT_PREF "not NAND flash, assume page size is 512 "
335 "bytes.\n");
336 pgsize = 512;
337 } else
338 pgsize = mtd->writesize;
339
340 tmp = mtd->size;
341 do_div(tmp, mtd->erasesize);
342 ebcnt = tmp;
343 pgcnt = mtd->erasesize / mtd->writesize;
344
345 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
346 "page size %u, count of eraseblocks %u, pages per "
347 "eraseblock %u, OOB size %u\n",
348 (unsigned long long)mtd->size, mtd->erasesize,
349 pgsize, ebcnt, pgcnt, mtd->oobsize);
350
351 err = -ENOMEM;
352 iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
353 if (!iobuf) {
354 printk(PRINT_PREF "error: cannot allocate memory\n");
355 goto out;
356 }
357
358 simple_srand(1);
359 set_random_data(iobuf, mtd->erasesize);
360
361 err = scan_for_bad_eraseblocks();
362 if (err)
363 goto out;
364
365 err = erase_whole_device();
366 if (err)
367 goto out;
368
369 /* Write all eraseblocks, 1 eraseblock at a time */
370 printk(PRINT_PREF "testing eraseblock write speed\n");
371 start_timing();
372 for (i = 0; i < ebcnt; ++i) {
373 if (bbt[i])
374 continue;
375 err = write_eraseblock(i);
376 if (err)
377 goto out;
378 cond_resched();
379 }
380 stop_timing();
381 speed = calc_speed();
382 printk(PRINT_PREF "eraseblock write speed is %ld KiB/s\n", speed);
383
384 /* Read all eraseblocks, 1 eraseblock at a time */
385 printk(PRINT_PREF "testing eraseblock read speed\n");
386 start_timing();
387 for (i = 0; i < ebcnt; ++i) {
388 if (bbt[i])
389 continue;
390 err = read_eraseblock(i);
391 if (err)
392 goto out;
393 cond_resched();
394 }
395 stop_timing();
396 speed = calc_speed();
397 printk(PRINT_PREF "eraseblock read speed is %ld KiB/s\n", speed);
398
399 err = erase_whole_device();
400 if (err)
401 goto out;
402
403 /* Write all eraseblocks, 1 page at a time */
404 printk(PRINT_PREF "testing page write speed\n");
405 start_timing();
406 for (i = 0; i < ebcnt; ++i) {
407 if (bbt[i])
408 continue;
409 err = write_eraseblock_by_page(i);
410 if (err)
411 goto out;
412 cond_resched();
413 }
414 stop_timing();
415 speed = calc_speed();
416 printk(PRINT_PREF "page write speed is %ld KiB/s\n", speed);
417
418 /* Read all eraseblocks, 1 page at a time */
419 printk(PRINT_PREF "testing page read speed\n");
420 start_timing();
421 for (i = 0; i < ebcnt; ++i) {
422 if (bbt[i])
423 continue;
424 err = read_eraseblock_by_page(i);
425 if (err)
426 goto out;
427 cond_resched();
428 }
429 stop_timing();
430 speed = calc_speed();
431 printk(PRINT_PREF "page read speed is %ld KiB/s\n", speed);
432
433 err = erase_whole_device();
434 if (err)
435 goto out;
436
437 /* Write all eraseblocks, 2 pages at a time */
438 printk(PRINT_PREF "testing 2 page write speed\n");
439 start_timing();
440 for (i = 0; i < ebcnt; ++i) {
441 if (bbt[i])
442 continue;
443 err = write_eraseblock_by_2pages(i);
444 if (err)
445 goto out;
446 cond_resched();
447 }
448 stop_timing();
449 speed = calc_speed();
450 printk(PRINT_PREF "2 page write speed is %ld KiB/s\n", speed);
451
452 /* Read all eraseblocks, 2 pages at a time */
453 printk(PRINT_PREF "testing 2 page read speed\n");
454 start_timing();
455 for (i = 0; i < ebcnt; ++i) {
456 if (bbt[i])
457 continue;
458 err = read_eraseblock_by_2pages(i);
459 if (err)
460 goto out;
461 cond_resched();
462 }
463 stop_timing();
464 speed = calc_speed();
465 printk(PRINT_PREF "2 page read speed is %ld KiB/s\n", speed);
466
467 /* Erase all eraseblocks */
468 printk(PRINT_PREF "Testing erase speed\n");
469 start_timing();
470 for (i = 0; i < ebcnt; ++i) {
471 if (bbt[i])
472 continue;
473 err = erase_eraseblock(i);
474 if (err)
475 goto out;
476 cond_resched();
477 }
478 stop_timing();
479 speed = calc_speed();
480 printk(PRINT_PREF "erase speed is %ld KiB/s\n", speed);
481
482 printk(PRINT_PREF "finished\n");
483out:
484 kfree(iobuf);
485 kfree(bbt);
486 put_mtd_device(mtd);
487 if (err)
488 printk(PRINT_PREF "error %d occurred\n", err);
489 printk(KERN_INFO "=================================================\n");
490 return err;
491}
492module_init(mtd_speedtest_init);
493
494static void __exit mtd_speedtest_exit(void)
495{
496 return;
497}
498module_exit(mtd_speedtest_exit);
499
500MODULE_DESCRIPTION("Speed test module");
501MODULE_AUTHOR("Adrian Hunter");
502MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
new file mode 100644
index 000000000000..63920476b57a
--- /dev/null
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -0,0 +1,330 @@
1/*
2 * Copyright (C) 2006-2008 Nokia Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; see the file COPYING. If not, write to the Free Software
15 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Test random reads, writes and erases on MTD device.
18 *
19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
20 */
21
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/err.h>
26#include <linux/mtd/mtd.h>
27#include <linux/sched.h>
28#include <linux/vmalloc.h>
29
30#define PRINT_PREF KERN_INFO "mtd_stresstest: "
31
32static int dev;
33module_param(dev, int, S_IRUGO);
34MODULE_PARM_DESC(dev, "MTD device number to use");
35
36static int count = 10000;
37module_param(count, int, S_IRUGO);
38MODULE_PARM_DESC(count, "Number of operations to do (default is 10000)");
39
40static struct mtd_info *mtd;
41static unsigned char *writebuf;
42static unsigned char *readbuf;
43static unsigned char *bbt;
44static int *offsets;
45
46static int pgsize;
47static int bufsize;
48static int ebcnt;
49static int pgcnt;
50static unsigned long next = 1;
51
52static inline unsigned int simple_rand(void)
53{
54 next = next * 1103515245 + 12345;
55 return (unsigned int)((next / 65536) % 32768);
56}
57
58static inline void simple_srand(unsigned long seed)
59{
60 next = seed;
61}
62
63static int rand_eb(void)
64{
65 int eb;
66
67again:
68 if (ebcnt < 32768)
69 eb = simple_rand();
70 else
71 eb = (simple_rand() << 15) | simple_rand();
72 /* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
73 eb %= (ebcnt - 1);
74 if (bbt[eb])
75 goto again;
76 return eb;
77}
78
79static int rand_offs(void)
80{
81 int offs;
82
83 if (bufsize < 32768)
84 offs = simple_rand();
85 else
86 offs = (simple_rand() << 15) | simple_rand();
87 offs %= bufsize;
88 return offs;
89}
90
91static int rand_len(int offs)
92{
93 int len;
94
95 if (bufsize < 32768)
96 len = simple_rand();
97 else
98 len = (simple_rand() << 15) | simple_rand();
99 len %= (bufsize - offs);
100 return len;
101}
102
103static int erase_eraseblock(int ebnum)
104{
105 int err;
106 struct erase_info ei;
107 loff_t addr = ebnum * mtd->erasesize;
108
109 memset(&ei, 0, sizeof(struct erase_info));
110 ei.mtd = mtd;
111 ei.addr = addr;
112 ei.len = mtd->erasesize;
113
114 err = mtd->erase(mtd, &ei);
115 if (unlikely(err)) {
116 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
117 return err;
118 }
119
120 if (unlikely(ei.state == MTD_ERASE_FAILED)) {
121 printk(PRINT_PREF "some erase error occurred at EB %d\n",
122 ebnum);
123 return -EIO;
124 }
125
126 return 0;
127}
128
129static int is_block_bad(int ebnum)
130{
131 loff_t addr = ebnum * mtd->erasesize;
132 int ret;
133
134 ret = mtd->block_isbad(mtd, addr);
135 if (ret)
136 printk(PRINT_PREF "block %d is bad\n", ebnum);
137 return ret;
138}
139
140static int do_read(void)
141{
142 size_t read = 0;
143 int eb = rand_eb();
144 int offs = rand_offs();
145 int len = rand_len(offs), err;
146 loff_t addr;
147
148 if (bbt[eb + 1]) {
149 if (offs >= mtd->erasesize)
150 offs -= mtd->erasesize;
151 if (offs + len > mtd->erasesize)
152 len = mtd->erasesize - offs;
153 }
154 addr = eb * mtd->erasesize + offs;
155 err = mtd->read(mtd, addr, len, &read, readbuf);
156 if (err == -EUCLEAN)
157 err = 0;
158 if (unlikely(err || read != len)) {
159 printk(PRINT_PREF "error: read failed at 0x%llx\n",
160 (long long)addr);
161 if (!err)
162 err = -EINVAL;
163 return err;
164 }
165 return 0;
166}
167
168static int do_write(void)
169{
170 int eb = rand_eb(), offs, err, len;
171 size_t written = 0;
172 loff_t addr;
173
174 offs = offsets[eb];
175 if (offs >= mtd->erasesize) {
176 err = erase_eraseblock(eb);
177 if (err)
178 return err;
179 offs = offsets[eb] = 0;
180 }
181 len = rand_len(offs);
182 len = ((len + pgsize - 1) / pgsize) * pgsize;
183 if (offs + len > mtd->erasesize) {
184 if (bbt[eb + 1])
185 len = mtd->erasesize - offs;
186 else {
187 err = erase_eraseblock(eb + 1);
188 if (err)
189 return err;
190 offsets[eb + 1] = 0;
191 }
192 }
193 addr = eb * mtd->erasesize + offs;
194 err = mtd->write(mtd, addr, len, &written, writebuf);
195 if (unlikely(err || written != len)) {
196 printk(PRINT_PREF "error: write failed at 0x%llx\n",
197 (long long)addr);
198 if (!err)
199 err = -EINVAL;
200 return err;
201 }
202 offs += len;
203 while (offs > mtd->erasesize) {
204 offsets[eb++] = mtd->erasesize;
205 offs -= mtd->erasesize;
206 }
207 offsets[eb] = offs;
208 return 0;
209}
210
211static int do_operation(void)
212{
213 if (simple_rand() & 1)
214 return do_read();
215 else
216 return do_write();
217}
218
219static int scan_for_bad_eraseblocks(void)
220{
221 int i, bad = 0;
222
223 bbt = kmalloc(ebcnt, GFP_KERNEL);
224 if (!bbt) {
225 printk(PRINT_PREF "error: cannot allocate memory\n");
226 return -ENOMEM;
227 }
228 memset(bbt, 0 , ebcnt);
229
230 printk(PRINT_PREF "scanning for bad eraseblocks\n");
231 for (i = 0; i < ebcnt; ++i) {
232 bbt[i] = is_block_bad(i) ? 1 : 0;
233 if (bbt[i])
234 bad += 1;
235 cond_resched();
236 }
237 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
238 return 0;
239}
240
241static int __init mtd_stresstest_init(void)
242{
243 int err;
244 int i, op;
245 uint64_t tmp;
246
247 printk(KERN_INFO "\n");
248 printk(KERN_INFO "=================================================\n");
249 printk(PRINT_PREF "MTD device: %d\n", dev);
250
251 mtd = get_mtd_device(NULL, dev);
252 if (IS_ERR(mtd)) {
253 err = PTR_ERR(mtd);
254 printk(PRINT_PREF "error: cannot get MTD device\n");
255 return err;
256 }
257
258 if (mtd->writesize == 1) {
259 printk(PRINT_PREF "not NAND flash, assume page size is 512 "
260 "bytes.\n");
261 pgsize = 512;
262 } else
263 pgsize = mtd->writesize;
264
265 tmp = mtd->size;
266 do_div(tmp, mtd->erasesize);
267 ebcnt = tmp;
268 pgcnt = mtd->erasesize / mtd->writesize;
269
270 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
271 "page size %u, count of eraseblocks %u, pages per "
272 "eraseblock %u, OOB size %u\n",
273 (unsigned long long)mtd->size, mtd->erasesize,
274 pgsize, ebcnt, pgcnt, mtd->oobsize);
275
276 /* Read or write up 2 eraseblocks at a time */
277 bufsize = mtd->erasesize * 2;
278
279 err = -ENOMEM;
280 readbuf = vmalloc(bufsize);
281 writebuf = vmalloc(bufsize);
282 offsets = kmalloc(ebcnt * sizeof(int), GFP_KERNEL);
283 if (!readbuf || !writebuf || !offsets) {
284 printk(PRINT_PREF "error: cannot allocate memory\n");
285 goto out;
286 }
287 for (i = 0; i < ebcnt; i++)
288 offsets[i] = mtd->erasesize;
289 simple_srand(current->pid);
290 for (i = 0; i < bufsize; i++)
291 writebuf[i] = simple_rand();
292
293 err = scan_for_bad_eraseblocks();
294 if (err)
295 goto out;
296
297 /* Do operations */
298 printk(PRINT_PREF "doing operations\n");
299 for (op = 0; op < count; op++) {
300 if ((op & 1023) == 0)
301 printk(PRINT_PREF "%d operations done\n", op);
302 err = do_operation();
303 if (err)
304 goto out;
305 cond_resched();
306 }
307 printk(PRINT_PREF "finished, %d operations done\n", op);
308
309out:
310 kfree(offsets);
311 kfree(bbt);
312 vfree(writebuf);
313 vfree(readbuf);
314 put_mtd_device(mtd);
315 if (err)
316 printk(PRINT_PREF "error %d occurred\n", err);
317 printk(KERN_INFO "=================================================\n");
318 return err;
319}
320module_init(mtd_stresstest_init);
321
322static void __exit mtd_stresstest_exit(void)
323{
324 return;
325}
326module_exit(mtd_stresstest_exit);
327
328MODULE_DESCRIPTION("Stress test module");
329MODULE_AUTHOR("Adrian Hunter");
330MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
new file mode 100644
index 000000000000..5b889724268e
--- /dev/null
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -0,0 +1,525 @@
1/*
2 * Copyright (C) 2006-2007 Nokia Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; see the file COPYING. If not, write to the Free Software
15 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Test sub-page read and write on MTD device.
18 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
19 *
20 */
21
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/err.h>
26#include <linux/mtd/mtd.h>
27#include <linux/sched.h>
28
29#define PRINT_PREF KERN_INFO "mtd_subpagetest: "
30
31static int dev;
32module_param(dev, int, S_IRUGO);
33MODULE_PARM_DESC(dev, "MTD device number to use");
34
35static struct mtd_info *mtd;
36static unsigned char *writebuf;
37static unsigned char *readbuf;
38static unsigned char *bbt;
39
40static int subpgsize;
41static int bufsize;
42static int ebcnt;
43static int pgcnt;
44static int errcnt;
45static unsigned long next = 1;
46
47static inline unsigned int simple_rand(void)
48{
49 next = next * 1103515245 + 12345;
50 return (unsigned int)((next / 65536) % 32768);
51}
52
53static inline void simple_srand(unsigned long seed)
54{
55 next = seed;
56}
57
58static void set_random_data(unsigned char *buf, size_t len)
59{
60 size_t i;
61
62 for (i = 0; i < len; ++i)
63 buf[i] = simple_rand();
64}
65
66static inline void clear_data(unsigned char *buf, size_t len)
67{
68 memset(buf, 0, len);
69}
70
71static int erase_eraseblock(int ebnum)
72{
73 int err;
74 struct erase_info ei;
75 loff_t addr = ebnum * mtd->erasesize;
76
77 memset(&ei, 0, sizeof(struct erase_info));
78 ei.mtd = mtd;
79 ei.addr = addr;
80 ei.len = mtd->erasesize;
81
82 err = mtd->erase(mtd, &ei);
83 if (err) {
84 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
85 return err;
86 }
87
88 if (ei.state == MTD_ERASE_FAILED) {
89 printk(PRINT_PREF "some erase error occurred at EB %d\n",
90 ebnum);
91 return -EIO;
92 }
93
94 return 0;
95}
96
97static int erase_whole_device(void)
98{
99 int err;
100 unsigned int i;
101
102 printk(PRINT_PREF "erasing whole device\n");
103 for (i = 0; i < ebcnt; ++i) {
104 if (bbt[i])
105 continue;
106 err = erase_eraseblock(i);
107 if (err)
108 return err;
109 cond_resched();
110 }
111 printk(PRINT_PREF "erased %u eraseblocks\n", i);
112 return 0;
113}
114
115static int write_eraseblock(int ebnum)
116{
117 size_t written = 0;
118 int err = 0;
119 loff_t addr = ebnum * mtd->erasesize;
120
121 set_random_data(writebuf, subpgsize);
122 err = mtd->write(mtd, addr, subpgsize, &written, writebuf);
123 if (unlikely(err || written != subpgsize)) {
124 printk(PRINT_PREF "error: write failed at %#llx\n",
125 (long long)addr);
126 if (written != subpgsize) {
127 printk(PRINT_PREF " write size: %#x\n", subpgsize);
128 printk(PRINT_PREF " written: %#zx\n", written);
129 }
130 return err ? err : -1;
131 }
132
133 addr += subpgsize;
134
135 set_random_data(writebuf, subpgsize);
136 err = mtd->write(mtd, addr, subpgsize, &written, writebuf);
137 if (unlikely(err || written != subpgsize)) {
138 printk(PRINT_PREF "error: write failed at %#llx\n",
139 (long long)addr);
140 if (written != subpgsize) {
141 printk(PRINT_PREF " write size: %#x\n", subpgsize);
142 printk(PRINT_PREF " written: %#zx\n", written);
143 }
144 return err ? err : -1;
145 }
146
147 return err;
148}
149
150static int write_eraseblock2(int ebnum)
151{
152 size_t written = 0;
153 int err = 0, k;
154 loff_t addr = ebnum * mtd->erasesize;
155
156 for (k = 1; k < 33; ++k) {
157 if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
158 break;
159 set_random_data(writebuf, subpgsize * k);
160 err = mtd->write(mtd, addr, subpgsize * k, &written, writebuf);
161 if (unlikely(err || written != subpgsize * k)) {
162 printk(PRINT_PREF "error: write failed at %#llx\n",
163 (long long)addr);
164 if (written != subpgsize) {
165 printk(PRINT_PREF " write size: %#x\n",
166 subpgsize * k);
167 printk(PRINT_PREF " written: %#08zx\n",
168 written);
169 }
170 return err ? err : -1;
171 }
172 addr += subpgsize * k;
173 }
174
175 return err;
176}
177
178static void print_subpage(unsigned char *p)
179{
180 int i, j;
181
182 for (i = 0; i < subpgsize; ) {
183 for (j = 0; i < subpgsize && j < 32; ++i, ++j)
184 printk("%02x", *p++);
185 printk("\n");
186 }
187}
188
189static int verify_eraseblock(int ebnum)
190{
191 size_t read = 0;
192 int err = 0;
193 loff_t addr = ebnum * mtd->erasesize;
194
195 set_random_data(writebuf, subpgsize);
196 clear_data(readbuf, subpgsize);
197 read = 0;
198 err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
199 if (unlikely(err || read != subpgsize)) {
200 if (err == -EUCLEAN && read == subpgsize) {
201 printk(PRINT_PREF "ECC correction at %#llx\n",
202 (long long)addr);
203 err = 0;
204 } else {
205 printk(PRINT_PREF "error: read failed at %#llx\n",
206 (long long)addr);
207 return err ? err : -1;
208 }
209 }
210 if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
211 printk(PRINT_PREF "error: verify failed at %#llx\n",
212 (long long)addr);
213 printk(PRINT_PREF "------------- written----------------\n");
214 print_subpage(writebuf);
215 printk(PRINT_PREF "------------- read ------------------\n");
216 print_subpage(readbuf);
217 printk(PRINT_PREF "-------------------------------------\n");
218 errcnt += 1;
219 }
220
221 addr += subpgsize;
222
223 set_random_data(writebuf, subpgsize);
224 clear_data(readbuf, subpgsize);
225 read = 0;
226 err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
227 if (unlikely(err || read != subpgsize)) {
228 if (err == -EUCLEAN && read == subpgsize) {
229 printk(PRINT_PREF "ECC correction at %#llx\n",
230 (long long)addr);
231 err = 0;
232 } else {
233 printk(PRINT_PREF "error: read failed at %#llx\n",
234 (long long)addr);
235 return err ? err : -1;
236 }
237 }
238 if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
239 printk(PRINT_PREF "error: verify failed at %#llx\n",
240 (long long)addr);
241 printk(PRINT_PREF "------------- written----------------\n");
242 print_subpage(writebuf);
243 printk(PRINT_PREF "------------- read ------------------\n");
244 print_subpage(readbuf);
245 printk(PRINT_PREF "-------------------------------------\n");
246 errcnt += 1;
247 }
248
249 return err;
250}
251
252static int verify_eraseblock2(int ebnum)
253{
254 size_t read = 0;
255 int err = 0, k;
256 loff_t addr = ebnum * mtd->erasesize;
257
258 for (k = 1; k < 33; ++k) {
259 if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
260 break;
261 set_random_data(writebuf, subpgsize * k);
262 clear_data(readbuf, subpgsize * k);
263 read = 0;
264 err = mtd->read(mtd, addr, subpgsize * k, &read, readbuf);
265 if (unlikely(err || read != subpgsize * k)) {
266 if (err == -EUCLEAN && read == subpgsize * k) {
267 printk(PRINT_PREF "ECC correction at %#llx\n",
268 (long long)addr);
269 err = 0;
270 } else {
271 printk(PRINT_PREF "error: read failed at "
272 "%#llx\n", (long long)addr);
273 return err ? err : -1;
274 }
275 }
276 if (unlikely(memcmp(readbuf, writebuf, subpgsize * k))) {
277 printk(PRINT_PREF "error: verify failed at %#llx\n",
278 (long long)addr);
279 errcnt += 1;
280 }
281 addr += subpgsize * k;
282 }
283
284 return err;
285}
286
287static int verify_eraseblock_ff(int ebnum)
288{
289 uint32_t j;
290 size_t read = 0;
291 int err = 0;
292 loff_t addr = ebnum * mtd->erasesize;
293
294 memset(writebuf, 0xff, subpgsize);
295 for (j = 0; j < mtd->erasesize / subpgsize; ++j) {
296 clear_data(readbuf, subpgsize);
297 read = 0;
298 err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
299 if (unlikely(err || read != subpgsize)) {
300 if (err == -EUCLEAN && read == subpgsize) {
301 printk(PRINT_PREF "ECC correction at %#llx\n",
302 (long long)addr);
303 err = 0;
304 } else {
305 printk(PRINT_PREF "error: read failed at "
306 "%#llx\n", (long long)addr);
307 return err ? err : -1;
308 }
309 }
310 if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
311 printk(PRINT_PREF "error: verify 0xff failed at "
312 "%#llx\n", (long long)addr);
313 errcnt += 1;
314 }
315 addr += subpgsize;
316 }
317
318 return err;
319}
320
321static int verify_all_eraseblocks_ff(void)
322{
323 int err;
324 unsigned int i;
325
326 printk(PRINT_PREF "verifying all eraseblocks for 0xff\n");
327 for (i = 0; i < ebcnt; ++i) {
328 if (bbt[i])
329 continue;
330 err = verify_eraseblock_ff(i);
331 if (err)
332 return err;
333 if (i % 256 == 0)
334 printk(PRINT_PREF "verified up to eraseblock %u\n", i);
335 cond_resched();
336 }
337 printk(PRINT_PREF "verified %u eraseblocks\n", i);
338 return 0;
339}
340
341static int is_block_bad(int ebnum)
342{
343 loff_t addr = ebnum * mtd->erasesize;
344 int ret;
345
346 ret = mtd->block_isbad(mtd, addr);
347 if (ret)
348 printk(PRINT_PREF "block %d is bad\n", ebnum);
349 return ret;
350}
351
352static int scan_for_bad_eraseblocks(void)
353{
354 int i, bad = 0;
355
356 bbt = kmalloc(ebcnt, GFP_KERNEL);
357 if (!bbt) {
358 printk(PRINT_PREF "error: cannot allocate memory\n");
359 return -ENOMEM;
360 }
361 memset(bbt, 0 , ebcnt);
362
363 printk(PRINT_PREF "scanning for bad eraseblocks\n");
364 for (i = 0; i < ebcnt; ++i) {
365 bbt[i] = is_block_bad(i) ? 1 : 0;
366 if (bbt[i])
367 bad += 1;
368 cond_resched();
369 }
370 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
371 return 0;
372}
373
374static int __init mtd_subpagetest_init(void)
375{
376 int err = 0;
377 uint32_t i;
378 uint64_t tmp;
379
380 printk(KERN_INFO "\n");
381 printk(KERN_INFO "=================================================\n");
382 printk(PRINT_PREF "MTD device: %d\n", dev);
383
384 mtd = get_mtd_device(NULL, dev);
385 if (IS_ERR(mtd)) {
386 err = PTR_ERR(mtd);
387 printk(PRINT_PREF "error: cannot get MTD device\n");
388 return err;
389 }
390
391 if (mtd->type != MTD_NANDFLASH) {
392 printk(PRINT_PREF "this test requires NAND flash\n");
393 goto out;
394 }
395
396 subpgsize = mtd->writesize >> mtd->subpage_sft;
397 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
398 "page size %u, subpage size %u, count of eraseblocks %u, "
399 "pages per eraseblock %u, OOB size %u\n",
400 (unsigned long long)mtd->size, mtd->erasesize,
401 mtd->writesize, subpgsize, ebcnt, pgcnt, mtd->oobsize);
402
403 err = -ENOMEM;
404 bufsize = subpgsize * 32;
405 writebuf = kmalloc(bufsize, GFP_KERNEL);
406 if (!writebuf) {
407 printk(PRINT_PREF "error: cannot allocate memory\n");
408 goto out;
409 }
410 readbuf = kmalloc(bufsize, GFP_KERNEL);
411 if (!readbuf) {
412 printk(PRINT_PREF "error: cannot allocate memory\n");
413 goto out;
414 }
415
416 tmp = mtd->size;
417 do_div(tmp, mtd->erasesize);
418 ebcnt = tmp;
419 pgcnt = mtd->erasesize / mtd->writesize;
420
421 err = scan_for_bad_eraseblocks();
422 if (err)
423 goto out;
424
425 err = erase_whole_device();
426 if (err)
427 goto out;
428
429 printk(PRINT_PREF "writing whole device\n");
430 simple_srand(1);
431 for (i = 0; i < ebcnt; ++i) {
432 if (bbt[i])
433 continue;
434 err = write_eraseblock(i);
435 if (unlikely(err))
436 goto out;
437 if (i % 256 == 0)
438 printk(PRINT_PREF "written up to eraseblock %u\n", i);
439 cond_resched();
440 }
441 printk(PRINT_PREF "written %u eraseblocks\n", i);
442
443 simple_srand(1);
444 printk(PRINT_PREF "verifying all eraseblocks\n");
445 for (i = 0; i < ebcnt; ++i) {
446 if (bbt[i])
447 continue;
448 err = verify_eraseblock(i);
449 if (unlikely(err))
450 goto out;
451 if (i % 256 == 0)
452 printk(PRINT_PREF "verified up to eraseblock %u\n", i);
453 cond_resched();
454 }
455 printk(PRINT_PREF "verified %u eraseblocks\n", i);
456
457 err = erase_whole_device();
458 if (err)
459 goto out;
460
461 err = verify_all_eraseblocks_ff();
462 if (err)
463 goto out;
464
465 /* Write all eraseblocks */
466 simple_srand(3);
467 printk(PRINT_PREF "writing whole device\n");
468 for (i = 0; i < ebcnt; ++i) {
469 if (bbt[i])
470 continue;
471 err = write_eraseblock2(i);
472 if (unlikely(err))
473 goto out;
474 if (i % 256 == 0)
475 printk(PRINT_PREF "written up to eraseblock %u\n", i);
476 cond_resched();
477 }
478 printk(PRINT_PREF "written %u eraseblocks\n", i);
479
480 /* Check all eraseblocks */
481 simple_srand(3);
482 printk(PRINT_PREF "verifying all eraseblocks\n");
483 for (i = 0; i < ebcnt; ++i) {
484 if (bbt[i])
485 continue;
486 err = verify_eraseblock2(i);
487 if (unlikely(err))
488 goto out;
489 if (i % 256 == 0)
490 printk(PRINT_PREF "verified up to eraseblock %u\n", i);
491 cond_resched();
492 }
493 printk(PRINT_PREF "verified %u eraseblocks\n", i);
494
495 err = erase_whole_device();
496 if (err)
497 goto out;
498
499 err = verify_all_eraseblocks_ff();
500 if (err)
501 goto out;
502
503 printk(PRINT_PREF "finished with %d errors\n", errcnt);
504
505out:
506 kfree(bbt);
507 kfree(readbuf);
508 kfree(writebuf);
509 put_mtd_device(mtd);
510 if (err)
511 printk(PRINT_PREF "error %d occurred\n", err);
512 printk(KERN_INFO "=================================================\n");
513 return err;
514}
515module_init(mtd_subpagetest_init);
516
517static void __exit mtd_subpagetest_exit(void)
518{
519 return;
520}
521module_exit(mtd_subpagetest_exit);
522
523MODULE_DESCRIPTION("Subpage test module");
524MODULE_AUTHOR("Adrian Hunter");
525MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/mtd_torturetest.c
new file mode 100644
index 000000000000..631a0ab3a33c
--- /dev/null
+++ b/drivers/mtd/tests/mtd_torturetest.c
@@ -0,0 +1,530 @@
1/*
2 * Copyright (C) 2006-2008 Artem Bityutskiy
3 * Copyright (C) 2006-2008 Jarkko Lavinen
4 * Copyright (C) 2006-2008 Adrian Hunter
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; see the file COPYING. If not, write to the Free Software
17 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Authors: Artem Bityutskiy, Jarkko Lavinen, Adria Hunter
20 *
21 * WARNING: this test program may kill your flash and your device. Do not
22 * use it unless you know what you do. Authors are not responsible for any
23 * damage caused by this program.
24 */
25
26#include <linux/init.h>
27#include <linux/module.h>
28#include <linux/moduleparam.h>
29#include <linux/err.h>
30#include <linux/mtd/mtd.h>
31#include <linux/sched.h>
32
33#define PRINT_PREF KERN_INFO "mtd_torturetest: "
34#define RETRIES 3
35
36static int eb = 8;
37module_param(eb, int, S_IRUGO);
38MODULE_PARM_DESC(eb, "eraseblock number within the selected MTD device");
39
40static int ebcnt = 32;
41module_param(ebcnt, int, S_IRUGO);
42MODULE_PARM_DESC(ebcnt, "number of consecutive eraseblocks to torture");
43
44static int pgcnt;
45module_param(pgcnt, int, S_IRUGO);
46MODULE_PARM_DESC(pgcnt, "number of pages per eraseblock to torture (0 => all)");
47
48static int dev;
49module_param(dev, int, S_IRUGO);
50MODULE_PARM_DESC(dev, "MTD device number to use");
51
52static int gran = 512;
53module_param(gran, int, S_IRUGO);
54MODULE_PARM_DESC(gran, "how often the status information should be printed");
55
56static int check = 1;
57module_param(check, int, S_IRUGO);
58MODULE_PARM_DESC(check, "if the written data should be checked");
59
60static unsigned int cycles_count;
61module_param(cycles_count, uint, S_IRUGO);
62MODULE_PARM_DESC(cycles_count, "how many erase cycles to do "
63 "(infinite by default)");
64
65static struct mtd_info *mtd;
66
67/* This buffer contains 0x555555...0xAAAAAA... pattern */
68static unsigned char *patt_5A5;
69/* This buffer contains 0xAAAAAA...0x555555... pattern */
70static unsigned char *patt_A5A;
71/* This buffer contains all 0xFF bytes */
72static unsigned char *patt_FF;
73/* This a temporary buffer is use when checking data */
74static unsigned char *check_buf;
75/* How many erase cycles were done */
76static unsigned int erase_cycles;
77
78static int pgsize;
79static struct timeval start, finish;
80
81static void report_corrupt(unsigned char *read, unsigned char *written);
82
83static inline void start_timing(void)
84{
85 do_gettimeofday(&start);
86}
87
88static inline void stop_timing(void)
89{
90 do_gettimeofday(&finish);
91}
92
93/*
94 * Erase eraseblock number @ebnum.
95 */
96static inline int erase_eraseblock(int ebnum)
97{
98 int err;
99 struct erase_info ei;
100 loff_t addr = ebnum * mtd->erasesize;
101
102 memset(&ei, 0, sizeof(struct erase_info));
103 ei.mtd = mtd;
104 ei.addr = addr;
105 ei.len = mtd->erasesize;
106
107 err = mtd->erase(mtd, &ei);
108 if (err) {
109 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
110 return err;
111 }
112
113 if (ei.state == MTD_ERASE_FAILED) {
114 printk(PRINT_PREF "some erase error occurred at EB %d\n",
115 ebnum);
116 return -EIO;
117 }
118
119 return 0;
120}
121
122/*
123 * Check that the contents of eraseblock number @enbum is equivalent to the
124 * @buf buffer.
125 */
126static inline int check_eraseblock(int ebnum, unsigned char *buf)
127{
128 int err, retries = 0;
129 size_t read = 0;
130 loff_t addr = ebnum * mtd->erasesize;
131 size_t len = mtd->erasesize;
132
133 if (pgcnt) {
134 addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
135 len = pgcnt * pgsize;
136 }
137
138retry:
139 err = mtd->read(mtd, addr, len, &read, check_buf);
140 if (err == -EUCLEAN)
141 printk(PRINT_PREF "single bit flip occurred at EB %d "
142 "MTD reported that it was fixed.\n", ebnum);
143 else if (err) {
144 printk(PRINT_PREF "error %d while reading EB %d, "
145 "read %zd\n", err, ebnum, read);
146 return err;
147 }
148
149 if (read != len) {
150 printk(PRINT_PREF "failed to read %zd bytes from EB %d, "
151 "read only %zd, but no error reported\n",
152 len, ebnum, read);
153 return -EIO;
154 }
155
156 if (memcmp(buf, check_buf, len)) {
157 printk(PRINT_PREF "read wrong data from EB %d\n", ebnum);
158 report_corrupt(check_buf, buf);
159
160 if (retries++ < RETRIES) {
161 /* Try read again */
162 yield();
163 printk(PRINT_PREF "re-try reading data from EB %d\n",
164 ebnum);
165 goto retry;
166 } else {
167 printk(PRINT_PREF "retried %d times, still errors, "
168 "give-up\n", RETRIES);
169 return -EINVAL;
170 }
171 }
172
173 if (retries != 0)
174 printk(PRINT_PREF "only attempt number %d was OK (!!!)\n",
175 retries);
176
177 return 0;
178}
179
180static inline int write_pattern(int ebnum, void *buf)
181{
182 int err;
183 size_t written = 0;
184 loff_t addr = ebnum * mtd->erasesize;
185 size_t len = mtd->erasesize;
186
187 if (pgcnt) {
188 addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
189 len = pgcnt * pgsize;
190 }
191 err = mtd->write(mtd, addr, len, &written, buf);
192 if (err) {
193 printk(PRINT_PREF "error %d while writing EB %d, written %zd"
194 " bytes\n", err, ebnum, written);
195 return err;
196 }
197 if (written != len) {
198 printk(PRINT_PREF "written only %zd bytes of %zd, but no error"
199 " reported\n", written, len);
200 return -EIO;
201 }
202
203 return 0;
204}
205
206static int __init tort_init(void)
207{
208 int err = 0, i, infinite = !cycles_count;
209 int bad_ebs[ebcnt];
210
211 printk(KERN_INFO "\n");
212 printk(KERN_INFO "=================================================\n");
213 printk(PRINT_PREF "Warning: this program is trying to wear out your "
214 "flash, stop it if this is not wanted.\n");
215 printk(PRINT_PREF "MTD device: %d\n", dev);
216 printk(PRINT_PREF "torture %d eraseblocks (%d-%d) of mtd%d\n",
217 ebcnt, eb, eb + ebcnt - 1, dev);
218 if (pgcnt)
219 printk(PRINT_PREF "torturing just %d pages per eraseblock\n",
220 pgcnt);
221 printk(PRINT_PREF "write verify %s\n", check ? "enabled" : "disabled");
222
223 mtd = get_mtd_device(NULL, dev);
224 if (IS_ERR(mtd)) {
225 err = PTR_ERR(mtd);
226 printk(PRINT_PREF "error: cannot get MTD device\n");
227 return err;
228 }
229
230 if (mtd->writesize == 1) {
231 printk(PRINT_PREF "not NAND flash, assume page size is 512 "
232 "bytes.\n");
233 pgsize = 512;
234 } else
235 pgsize = mtd->writesize;
236
237 if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) {
238 printk(PRINT_PREF "error: invalid pgcnt value %d\n", pgcnt);
239 goto out_mtd;
240 }
241
242 err = -ENOMEM;
243 patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL);
244 if (!patt_5A5) {
245 printk(PRINT_PREF "error: cannot allocate memory\n");
246 goto out_mtd;
247 }
248
249 patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL);
250 if (!patt_A5A) {
251 printk(PRINT_PREF "error: cannot allocate memory\n");
252 goto out_patt_5A5;
253 }
254
255 patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL);
256 if (!patt_FF) {
257 printk(PRINT_PREF "error: cannot allocate memory\n");
258 goto out_patt_A5A;
259 }
260
261 check_buf = kmalloc(mtd->erasesize, GFP_KERNEL);
262 if (!check_buf) {
263 printk(PRINT_PREF "error: cannot allocate memory\n");
264 goto out_patt_FF;
265 }
266
267 err = 0;
268
269 /* Initialize patterns */
270 memset(patt_FF, 0xFF, mtd->erasesize);
271 for (i = 0; i < mtd->erasesize / pgsize; i++) {
272 if (!(i & 1)) {
273 memset(patt_5A5 + i * pgsize, 0x55, pgsize);
274 memset(patt_A5A + i * pgsize, 0xAA, pgsize);
275 } else {
276 memset(patt_5A5 + i * pgsize, 0xAA, pgsize);
277 memset(patt_A5A + i * pgsize, 0x55, pgsize);
278 }
279 }
280
281 /*
282 * Check if there is a bad eraseblock among those we are going to test.
283 */
284 memset(&bad_ebs[0], 0, sizeof(int) * ebcnt);
285 if (mtd->block_isbad) {
286 for (i = eb; i < eb + ebcnt; i++) {
287 err = mtd->block_isbad(mtd,
288 (loff_t)i * mtd->erasesize);
289
290 if (err < 0) {
291 printk(PRINT_PREF "block_isbad() returned %d "
292 "for EB %d\n", err, i);
293 goto out;
294 }
295
296 if (err) {
297 printk("EB %d is bad. Skip it.\n", i);
298 bad_ebs[i - eb] = 1;
299 }
300 }
301 }
302
303 start_timing();
304 while (1) {
305 int i;
306 void *patt;
307
308 /* Erase all eraseblocks */
309 for (i = eb; i < eb + ebcnt; i++) {
310 if (bad_ebs[i - eb])
311 continue;
312 err = erase_eraseblock(i);
313 if (err)
314 goto out;
315 cond_resched();
316 }
317
318 /* Check if the eraseblocks contain only 0xFF bytes */
319 if (check) {
320 for (i = eb; i < eb + ebcnt; i++) {
321 if (bad_ebs[i - eb])
322 continue;
323 err = check_eraseblock(i, patt_FF);
324 if (err) {
325 printk(PRINT_PREF "verify failed"
326 " for 0xFF... pattern\n");
327 goto out;
328 }
329 cond_resched();
330 }
331 }
332
333 /* Write the pattern */
334 for (i = eb; i < eb + ebcnt; i++) {
335 if (bad_ebs[i - eb])
336 continue;
337 if ((eb + erase_cycles) & 1)
338 patt = patt_5A5;
339 else
340 patt = patt_A5A;
341 err = write_pattern(i, patt);
342 if (err)
343 goto out;
344 cond_resched();
345 }
346
347 /* Verify what we wrote */
348 if (check) {
349 for (i = eb; i < eb + ebcnt; i++) {
350 if (bad_ebs[i - eb])
351 continue;
352 if ((eb + erase_cycles) & 1)
353 patt = patt_5A5;
354 else
355 patt = patt_A5A;
356 err = check_eraseblock(i, patt);
357 if (err) {
358 printk(PRINT_PREF "verify failed for %s"
359 " pattern\n",
360 ((eb + erase_cycles) & 1) ?
361 "0x55AA55..." : "0xAA55AA...");
362 goto out;
363 }
364 cond_resched();
365 }
366 }
367
368 erase_cycles += 1;
369
370 if (erase_cycles % gran == 0) {
371 long ms;
372
373 stop_timing();
374 ms = (finish.tv_sec - start.tv_sec) * 1000 +
375 (finish.tv_usec - start.tv_usec) / 1000;
376 printk(PRINT_PREF "%08u erase cycles done, took %lu "
377 "milliseconds (%lu seconds)\n",
378 erase_cycles, ms, ms / 1000);
379 start_timing();
380 }
381
382 if (!infinite && --cycles_count == 0)
383 break;
384 }
385out:
386
387 printk(PRINT_PREF "finished after %u erase cycles\n",
388 erase_cycles);
389 kfree(check_buf);
390out_patt_FF:
391 kfree(patt_FF);
392out_patt_A5A:
393 kfree(patt_A5A);
394out_patt_5A5:
395 kfree(patt_5A5);
396out_mtd:
397 put_mtd_device(mtd);
398 if (err)
399 printk(PRINT_PREF "error %d occurred during torturing\n", err);
400 printk(KERN_INFO "=================================================\n");
401 return err;
402}
403module_init(tort_init);
404
405static void __exit tort_exit(void)
406{
407 return;
408}
409module_exit(tort_exit);
410
411static int countdiffs(unsigned char *buf, unsigned char *check_buf,
412 unsigned offset, unsigned len, unsigned *bytesp,
413 unsigned *bitsp);
414static void print_bufs(unsigned char *read, unsigned char *written, int start,
415 int len);
416
417/*
418 * Report the detailed information about how the read EB differs from what was
419 * written.
420 */
421static void report_corrupt(unsigned char *read, unsigned char *written)
422{
423 int i;
424 int bytes, bits, pages, first;
425 int offset, len;
426 size_t check_len = mtd->erasesize;
427
428 if (pgcnt)
429 check_len = pgcnt * pgsize;
430
431 bytes = bits = pages = 0;
432 for (i = 0; i < check_len; i += pgsize)
433 if (countdiffs(written, read, i, pgsize, &bytes,
434 &bits) >= 0)
435 pages++;
436
437 printk(PRINT_PREF "verify fails on %d pages, %d bytes/%d bits\n",
438 pages, bytes, bits);
439 printk(PRINT_PREF "The following is a list of all differences between"
440 " what was read from flash and what was expected\n");
441
442 for (i = 0; i < check_len; i += pgsize) {
443 cond_resched();
444 bytes = bits = 0;
445 first = countdiffs(written, read, i, pgsize, &bytes,
446 &bits);
447 if (first < 0)
448 continue;
449
450 printk("-------------------------------------------------------"
451 "----------------------------------\n");
452
453 printk(PRINT_PREF "Page %zd has %d bytes/%d bits failing verify,"
454 " starting at offset 0x%x\n",
455 (mtd->erasesize - check_len + i) / pgsize,
456 bytes, bits, first);
457
458 offset = first & ~0x7;
459 len = ((first + bytes) | 0x7) + 1 - offset;
460
461 print_bufs(read, written, offset, len);
462 }
463}
464
465static void print_bufs(unsigned char *read, unsigned char *written, int start,
466 int len)
467{
468 int i = 0, j1, j2;
469 char *diff;
470
471 printk("Offset Read Written\n");
472 while (i < len) {
473 printk("0x%08x: ", start + i);
474 diff = " ";
475 for (j1 = 0; j1 < 8 && i + j1 < len; j1++) {
476 printk(" %02x", read[start + i + j1]);
477 if (read[start + i + j1] != written[start + i + j1])
478 diff = "***";
479 }
480
481 while (j1 < 8) {
482 printk(" ");
483 j1 += 1;
484 }
485
486 printk(" %s ", diff);
487
488 for (j2 = 0; j2 < 8 && i + j2 < len; j2++)
489 printk(" %02x", written[start + i + j2]);
490 printk("\n");
491 i += 8;
492 }
493}
494
495/*
496 * Count the number of differing bytes and bits and return the first differing
497 * offset.
498 */
499static int countdiffs(unsigned char *buf, unsigned char *check_buf,
500 unsigned offset, unsigned len, unsigned *bytesp,
501 unsigned *bitsp)
502{
503 unsigned i, bit;
504 int first = -1;
505
506 for (i = offset; i < offset + len; i++)
507 if (buf[i] != check_buf[i]) {
508 first = i;
509 break;
510 }
511
512 while (i < offset + len) {
513 if (buf[i] != check_buf[i]) {
514 (*bytesp)++;
515 bit = 1;
516 while (bit < 256) {
517 if ((buf[i] & bit) != (check_buf[i] & bit))
518 (*bitsp)++;
519 bit <<= 1;
520 }
521 }
522 i++;
523 }
524
525 return first;
526}
527
528MODULE_DESCRIPTION("Eraseblock torturing module");
529MODULE_AUTHOR("Artem Bityutskiy, Jarkko Lavinen, Adrian Hunter");
530MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 7caf22cd5ad0..9082768cc6c3 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -561,7 +561,7 @@ static int io_init(struct ubi_device *ubi)
561 */ 561 */
562 562
563 ubi->peb_size = ubi->mtd->erasesize; 563 ubi->peb_size = ubi->mtd->erasesize;
564 ubi->peb_count = ubi->mtd->size / ubi->mtd->erasesize; 564 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
565 ubi->flash_size = ubi->mtd->size; 565 ubi->flash_size = ubi->mtd->size;
566 566
567 if (ubi->mtd->block_isbad && ubi->mtd->block_markbad) 567 if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 605812bb0b1a..6dd4f5e77f82 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -215,7 +215,8 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
215 struct ubi_volume *vol; 215 struct ubi_volume *vol;
216 struct ubi_device *ubi; 216 struct ubi_device *ubi;
217 217
218 dbg_gen("erase %u bytes at offset %u", instr->len, instr->addr); 218 dbg_gen("erase %llu bytes at offset %llu", (unsigned long long)instr->len,
219 (unsigned long long)instr->addr);
219 220
220 if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize) 221 if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize)
221 return -EINVAL; 222 return -EINVAL;
@@ -223,11 +224,11 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
223 if (instr->len < 0 || instr->addr + instr->len > mtd->size) 224 if (instr->len < 0 || instr->addr + instr->len > mtd->size)
224 return -EINVAL; 225 return -EINVAL;
225 226
226 if (instr->addr % mtd->writesize || instr->len % mtd->writesize) 227 if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd))
227 return -EINVAL; 228 return -EINVAL;
228 229
229 lnum = instr->addr / mtd->erasesize; 230 lnum = mtd_div_by_eb(instr->addr, mtd);
230 count = instr->len / mtd->erasesize; 231 count = mtd_div_by_eb(instr->len, mtd);
231 232
232 vol = container_of(mtd, struct ubi_volume, gluebi_mtd); 233 vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
233 ubi = vol->ubi; 234 ubi = vol->ubi;
@@ -255,7 +256,7 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
255 256
256out_err: 257out_err:
257 instr->state = MTD_ERASE_FAILED; 258 instr->state = MTD_ERASE_FAILED;
258 instr->fail_addr = lnum * mtd->erasesize; 259 instr->fail_addr = (long long)lnum * mtd->erasesize;
259 return err; 260 return err;
260} 261}
261 262
@@ -294,7 +295,7 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol)
294 * bytes. 295 * bytes.
295 */ 296 */
296 if (vol->vol_type == UBI_DYNAMIC_VOLUME) 297 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
297 mtd->size = vol->usable_leb_size * vol->reserved_pebs; 298 mtd->size = (long long)vol->usable_leb_size * vol->reserved_pebs;
298 else 299 else
299 mtd->size = vol->used_bytes; 300 mtd->size = vol->used_bytes;
300 301
@@ -304,8 +305,8 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol)
304 return -ENFILE; 305 return -ENFILE;
305 } 306 }
306 307
307 dbg_gen("added mtd%d (\"%s\"), size %u, EB size %u", 308 dbg_gen("added mtd%d (\"%s\"), size %llu, EB size %u",
308 mtd->index, mtd->name, mtd->size, mtd->erasesize); 309 mtd->index, mtd->name, (unsigned long long)mtd->size, mtd->erasesize);
309 return 0; 310 return 0;
310} 311}
311 312
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 5d9bcf109c13..4abbe573fa40 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -564,7 +564,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_unmap);
564 * @dtype: expected data type 564 * @dtype: expected data type
565 * 565 *
566 * This function maps an un-mapped logical eraseblock @lnum to a physical 566 * This function maps an un-mapped logical eraseblock @lnum to a physical
567 * eraseblock. This means, that after a successfull invocation of this 567 * eraseblock. This means, that after a successful invocation of this
568 * function the logical eraseblock @lnum will be empty (contain only %0xFF 568 * function the logical eraseblock @lnum will be empty (contain only %0xFF
569 * bytes) and be mapped to a physical eraseblock, even if an unclean reboot 569 * bytes) and be mapped to a physical eraseblock, even if an unclean reboot
570 * happens. 570 * happens.
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 97ea7c60e002..65afda4a62d9 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -830,7 +830,7 @@ config ULTRA32
830 830
831config BFIN_MAC 831config BFIN_MAC
832 tristate "Blackfin on-chip MAC support" 832 tristate "Blackfin on-chip MAC support"
833 depends on NET_ETHERNET && (BF526 || BF527 || BF536 || BF537) 833 depends on NET_ETHERNET && (BF516 || BF518 || BF526 || BF527 || BF536 || BF537)
834 select CRC32 834 select CRC32
835 select MII 835 select MII
836 select PHYLIB 836 select PHYLIB
diff --git a/drivers/net/acenic_firmware.h b/drivers/net/acenic_firmware.h
deleted file mode 100644
index fd41f7887e27..000000000000
--- a/drivers/net/acenic_firmware.h
+++ /dev/null
@@ -1,9456 +0,0 @@
1/*
2 * Declare these here even if Tigon I support is disabled to avoid
3 * the compiler complaining about undefined symbols.
4 */
5#define tigonFwReleaseMajor 0xc
6#define tigonFwReleaseMinor 0x4
7#define tigonFwReleaseFix 0xb
8#define tigonFwStartAddr 0x00004000
9#define tigonFwTextAddr 0x00004000
10#define tigonFwTextLen 0x11140
11#define tigonFwRodataAddr 0x00015140
12#define tigonFwRodataLen 0xac0
13#define tigonFwDataAddr 0x00015c20
14#define tigonFwDataLen 0x170
15#define tigonFwSbssAddr 0x00015d90
16#define tigonFwSbssLen 0x38
17#define tigonFwBssAddr 0x00015dd0
18#define tigonFwBssLen 0x2080
19#ifdef CONFIG_ACENIC_OMIT_TIGON_I
20#define tigonFwText NULL
21#define tigonFwData NULL
22#define tigonFwRodata NULL
23#else
24/* Generated by genfw.c */
25static u32 tigonFwText[(MAX_TEXT_LEN/4) + 1] __devinitdata = {
260x10000003,
270x0, 0xd, 0xd, 0x3c1d0001,
280x8fbd5c54, 0x3a0f021, 0x3c100000, 0x26104000,
290xc00100c, 0x0, 0xd, 0x27bdffd8,
300x3c1cc000, 0x3c1b0013, 0x377bd800, 0xd021,
310x3c170013, 0x36f75418, 0x2e02021, 0x340583e8,
320xafbf0024, 0xc002488, 0xafb00020, 0xc0023e8,
330x0, 0x3c040001, 0x248451a4, 0x24050001,
340x2e03021, 0x3821, 0x3c100001, 0x26107e50,
350xafb00010, 0xc002403, 0xafbb0014, 0x3c02000f,
360x3442ffff, 0x2021024, 0x362102b, 0x10400009,
370x24050003, 0x3c040001, 0x248451b0, 0x2003021,
380x3603821, 0x3c020010, 0xafa20010, 0xc002403,
390xafa00014, 0x2021, 0x3405c000, 0x3c010001,
400x370821, 0xa02083b0, 0x3c010001, 0x370821,
410xa02083b2, 0x3c010001, 0x370821, 0xa02083b3,
420x3c010001, 0x370821, 0xac2083b4, 0xa2e004d8,
430x418c0, 0x24840001, 0x771021, 0xac40727c,
440x771021, 0xac407280, 0x2e31021, 0xa445727c,
450x2c820020, 0x1440fff7, 0x418c0, 0x2021,
460x3405c000, 0x418c0, 0x24840001, 0x771021,
470xac40737c, 0x771021, 0xac407380, 0x2e31021,
480xa445737c, 0x2c820080, 0x5440fff7, 0x418c0,
490xaf800054, 0xaf80011c, 0x8f820044, 0x34420040,
500xaf820044, 0x8f820044, 0x34420020, 0xaf820044,
510x8f420218, 0x30420002, 0x10400009, 0x0,
520x8f420220, 0x3c030002, 0x34630004, 0x431025,
530xaee204c4, 0x8f42021c, 0x8001074, 0x34420004,
540x8f420220, 0x3c030002, 0x34630006, 0x431025,
550xaee204c4, 0x8f42021c, 0x34420006, 0xaee204cc,
560x8f420218, 0x30420010, 0x1040000a, 0x0,
570x8f42021c, 0x34420004, 0xaee204c8, 0x8f420220,
580x3c03000a, 0x34630004, 0x431025, 0x800108a,
590xaee204c0, 0x8f420220, 0x3c03000a, 0x34630006,
600x431025, 0xaee204c0, 0x8f42021c, 0x34420006,
610xaee204c8, 0x8f420218, 0x30420200, 0x10400003,
620x24020001, 0x8001091, 0xa2e27248, 0xa2e07248,
630x24020001, 0xaf8200a0, 0xaf8200b0, 0x8f830054,
640x8f820054, 0x8001099, 0x24630064, 0x8f820054,
650x621023, 0x2c420065, 0x1440fffc, 0x0,
660xaf800044, 0x8f420208, 0x8f43020c, 0xaee20010,
670xaee30014, 0x8ee40010, 0x8ee50014, 0x26e20030,
680xaee20028, 0x24020490, 0xaee20018, 0xaf840090,
690xaf850094, 0x8ee20028, 0xaf8200b4, 0x96e2001a,
700xaf82009c, 0x8f8200b0, 0x8ee304cc, 0x431025,
710xaf8200b0, 0x8f8200b0, 0x30420004, 0x1440fffd,
720x0, 0x8ee20450, 0x8ee30454, 0xaee304fc,
730x8ee204fc, 0x2442e000, 0x2c422001, 0x1440000d,
740x26e40030, 0x8ee20450, 0x8ee30454, 0x3c040001,
750x248451bc, 0x3c050001, 0xafa00010, 0xafa00014,
760x8ee704fc, 0x34a5f000, 0xc002403, 0x603021,
770x26e40030, 0xc002488, 0x24050400, 0x27440080,
780xc002488, 0x24050080, 0x26e4777c, 0xc002488,
790x24050400, 0x8f42025c, 0x26e40094, 0xaee20060,
800x8f420260, 0x27450200, 0x24060008, 0xaee20068,
810x24020006, 0xc00249a, 0xaee20064, 0x3c023b9a,
820x3442ca00, 0x2021, 0x24030002, 0xaee30074,
830xaee30070, 0xaee2006c, 0x240203e8, 0xaee20104,
840x24020001, 0xaee30100, 0xaee2010c, 0x3c030001,
850x641821, 0x90635c20, 0x2e41021, 0x24840001,
860xa043009c, 0x2c82000f, 0x1440fff8, 0x0,
870x8f820040, 0x2e41821, 0x24840001, 0x21702,
880x24420030, 0xa062009c, 0x2e41021, 0xa040009c,
890x96e2046a, 0x30420003, 0x14400009, 0x0,
900x96e2047a, 0x30420003, 0x50400131, 0x3c030800,
910x96e2046a, 0x30420003, 0x1040002a, 0x3c020700,
920x96e2047a, 0x30420003, 0x10400026, 0x3c020700,
930x96e3047a, 0x96e2046a, 0x14620022, 0x3c020700,
940x8ee204c0, 0x24030001, 0xa2e34e20, 0x34420e00,
950xaee204c0, 0x8f420218, 0x30420100, 0x10400005,
960x0, 0x3c020001, 0x2442e168, 0x800111d,
970x21100, 0x3c020001, 0x2442d35c, 0x21100,
980x21182, 0x3c030800, 0x431025, 0x3c010001,
990xac221238, 0x3c020001, 0x2442f680, 0x21100,
1000x21182, 0x3c030800, 0x431025, 0x3c010001,
1010xac221278, 0x8ee20000, 0x34424000, 0x8001238,
1020xaee20000, 0x34423000, 0xafa20018, 0x8ee20608,
1030x8f430228, 0x24420001, 0x304900ff, 0x512300e2,
1040xafa00010, 0x8ee20608, 0x210c0, 0x571021,
1050x8fa30018, 0x8fa4001c, 0xac43060c, 0xac440610,
1060x8f870120, 0x27623800, 0x24e80020, 0x102102b,
1070x50400001, 0x27683000, 0x8f820128, 0x11020004,
1080x0, 0x8f820124, 0x15020007, 0x1021,
1090x8ee201a4, 0x3021, 0x24420001, 0xaee201a4,
1100x80011a0, 0x8ee201a4, 0x8ee40608, 0x420c0,
1110x801821, 0x8ee40430, 0x8ee50434, 0xa32821,
1120xa3302b, 0x822021, 0x862021, 0xace40000,
1130xace50004, 0x8ee30608, 0x24020008, 0xa4e2000e,
1140x2402000d, 0xace20018, 0xace9001c, 0x318c0,
1150x2463060c, 0x2e31021, 0xace20008, 0x8ee204c4,
1160xace20010, 0xaf880120, 0x92e24e20, 0x14400037,
1170x24060001, 0x8ee24e30, 0x210c0, 0x24425038,
1180x2e22021, 0x8c830000, 0x24020007, 0x1462001f,
1190x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
1200x24030040, 0x8c820004, 0x24420001, 0xac820004,
1210x8ee24e34, 0x8ee54e30, 0x24420001, 0x10430007,
1220x0, 0x8ee24e34, 0x24420001, 0x10a20005,
1230x0, 0x800118a, 0x0, 0x14a00005,
1240x0, 0x8f820128, 0x24420020, 0xaf820128,
1250x8f820128, 0x8c820004, 0x2c420011, 0x50400013,
1260xac800000, 0x80011a0, 0x0, 0x8ee24e30,
1270x24030040, 0x24420001, 0x50430003, 0x1021,
1280x8ee24e30, 0x24420001, 0xaee24e30, 0x8ee24e30,
1290x210c0, 0x24425038, 0x2e22021, 0x24020007,
1300xac820000, 0x24020001, 0xac820004, 0x54c0000c,
1310xaee90608, 0x3c040001, 0x248451c8, 0xafa00010,
1320xafa00014, 0x8ee60608, 0x8f470228, 0x3c050009,
1330xc002403, 0x34a5f000, 0x8001223, 0x0,
1340x8f830120, 0x27623800, 0x24660020, 0xc2102b,
1350x50400001, 0x27663000, 0x8f820128, 0x10c20004,
1360x0, 0x8f820124, 0x14c20007, 0x0,
1370x8ee201a4, 0x3021, 0x24420001, 0xaee201a4,
1380x8001207, 0x8ee201a4, 0x8ee20608, 0xac62001c,
1390x8ee404a0, 0x8ee504a4, 0x2462001c, 0xac620008,
1400x24020008, 0xa462000e, 0x24020011, 0xac620018,
1410xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
1420xaf860120, 0x92e24e20, 0x14400037, 0x24060001,
1430x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
1440x8c830000, 0x24020012, 0x1462001f, 0x0,
1450x8ee34e30, 0x8ee24e34, 0x1062001b, 0x24030040,
1460x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
1470x8ee54e30, 0x24420001, 0x10430007, 0x0,
1480x8ee24e34, 0x24420001, 0x10a20005, 0x0,
1490x80011f1, 0x0, 0x14a00005, 0x0,
1500x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
1510x8c820004, 0x2c420011, 0x50400013, 0xac800000,
1520x8001207, 0x0, 0x8ee24e30, 0x24030040,
1530x24420001, 0x50430003, 0x1021, 0x8ee24e30,
1540x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
1550x24425038, 0x2e22021, 0x24020012, 0xac820000,
1560x24020001, 0xac820004, 0x14c0001b, 0x0,
1570x3c040001, 0x248451d0, 0xafa00010, 0xafa00014,
1580x8ee60608, 0x8f470228, 0x3c050009, 0xc002403,
1590x34a5f001, 0x8ee201b0, 0x24420001, 0xaee201b0,
1600x8001223, 0x8ee201b0, 0x3c040001, 0x248451dc,
1610xafa00014, 0x8ee60608, 0x8f470228, 0x3c050009,
1620xc002403, 0x34a5f005, 0x8ee201ac, 0x24420001,
1630xaee201ac, 0x8ee201ac, 0x8ee20160, 0x3c040001,
1640x248451e8, 0x3405f001, 0x24420001, 0xaee20160,
1650x8ee20160, 0x3021, 0x3821, 0xafa00010,
1660xc002403, 0xafa00014, 0x8001238, 0x0,
1670x3c020001, 0x2442f5a8, 0x21100, 0x21182,
1680x431025, 0x3c010001, 0xac221278, 0x96e2045a,
1690x30420003, 0x10400025, 0x3c050fff, 0x8ee204c8,
1700x34a5ffff, 0x34420a00, 0xaee204c8, 0x8ee304c8,
1710x3c040001, 0x248451f4, 0x24020001, 0xa2e204ec,
1720xa2e204ed, 0x3c020002, 0x621825, 0x3c020001,
1730x2442a390, 0x451024, 0x21082, 0xaee304c8,
1740x3c030800, 0x431025, 0x3c010001, 0xac221220,
1750x3c020001, 0x2442add4, 0x451024, 0x21082,
1760x431025, 0x3c010001, 0xac221280, 0x96e6045a,
1770x3821, 0x24050011, 0xafa00010, 0xc002403,
1780xafa00014, 0x8001268, 0x0, 0x3c020001,
1790x2442a9d4, 0x21100, 0x21182, 0x3c030800,
1800x431025, 0x3c010001, 0xac221280, 0x96e2046a,
1810x30420010, 0x14400009, 0x0, 0x96e2047a,
1820x30420010, 0x10400112, 0x0, 0x96e2046a,
1830x30420010, 0x10400005, 0x3c020700, 0x96e2047a,
1840x30420010, 0x14400102, 0x3c020700, 0x34423000,
1850xafa20018, 0x8ee20608, 0x8f430228, 0x24420001,
1860x304900ff, 0x512300e2, 0xafa00010, 0x8ee20608,
1870x210c0, 0x571021, 0x8fa30018, 0x8fa4001c,
1880xac43060c, 0xac440610, 0x8f870120, 0x27623800,
1890x24e80020, 0x102102b, 0x50400001, 0x27683000,
1900x8f820128, 0x11020004, 0x0, 0x8f820124,
1910x15020007, 0x1021, 0x8ee201a4, 0x3021,
1920x24420001, 0xaee201a4, 0x80012ea, 0x8ee201a4,
1930x8ee40608, 0x420c0, 0x801821, 0x8ee40430,
1940x8ee50434, 0xa32821, 0xa3302b, 0x822021,
1950x862021, 0xace40000, 0xace50004, 0x8ee30608,
1960x24020008, 0xa4e2000e, 0x2402000d, 0xace20018,
1970xace9001c, 0x318c0, 0x2463060c, 0x2e31021,
1980xace20008, 0x8ee204c4, 0xace20010, 0xaf880120,
1990x92e24e20, 0x14400037, 0x24060001, 0x8ee24e30,
2000x210c0, 0x24425038, 0x2e22021, 0x8c830000,
2010x24020007, 0x1462001f, 0x0, 0x8ee34e30,
2020x8ee24e34, 0x1062001b, 0x24030040, 0x8c820004,
2030x24420001, 0xac820004, 0x8ee24e34, 0x8ee54e30,
2040x24420001, 0x10430007, 0x0, 0x8ee24e34,
2050x24420001, 0x10a20005, 0x0, 0x80012d4,
2060x0, 0x14a00005, 0x0, 0x8f820128,
2070x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
2080x2c420011, 0x50400013, 0xac800000, 0x80012ea,
2090x0, 0x8ee24e30, 0x24030040, 0x24420001,
2100x50430003, 0x1021, 0x8ee24e30, 0x24420001,
2110xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
2120x2e22021, 0x24020007, 0xac820000, 0x24020001,
2130xac820004, 0x54c0000c, 0xaee90608, 0x3c040001,
2140x248451c8, 0xafa00010, 0xafa00014, 0x8ee60608,
2150x8f470228, 0x3c050009, 0xc002403, 0x34a5f000,
2160x800136d, 0x0, 0x8f830120, 0x27623800,
2170x24660020, 0xc2102b, 0x50400001, 0x27663000,
2180x8f820128, 0x10c20004, 0x0, 0x8f820124,
2190x14c20007, 0x0, 0x8ee201a4, 0x3021,
2200x24420001, 0xaee201a4, 0x8001351, 0x8ee201a4,
2210x8ee20608, 0xac62001c, 0x8ee404a0, 0x8ee504a4,
2220x2462001c, 0xac620008, 0x24020008, 0xa462000e,
2230x24020011, 0xac620018, 0xac640000, 0xac650004,
2240x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
2250x14400037, 0x24060001, 0x8ee24e30, 0x210c0,
2260x24425038, 0x2e22021, 0x8c830000, 0x24020012,
2270x1462001f, 0x0, 0x8ee34e30, 0x8ee24e34,
2280x1062001b, 0x24030040, 0x8c820004, 0x24420001,
2290xac820004, 0x8ee24e34, 0x8ee54e30, 0x24420001,
2300x10430007, 0x0, 0x8ee24e34, 0x24420001,
2310x10a20005, 0x0, 0x800133b, 0x0,
2320x14a00005, 0x0, 0x8f820128, 0x24420020,
2330xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
2340x50400013, 0xac800000, 0x8001351, 0x0,
2350x8ee24e30, 0x24030040, 0x24420001, 0x50430003,
2360x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
2370x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
2380x24020012, 0xac820000, 0x24020001, 0xac820004,
2390x14c0001b, 0x0, 0x3c040001, 0x248451d0,
2400xafa00010, 0xafa00014, 0x8ee60608, 0x8f470228,
2410x3c050009, 0xc002403, 0x34a5f001, 0x8ee201b0,
2420x24420001, 0xaee201b0, 0x800136d, 0x8ee201b0,
2430x3c040001, 0x248451dc, 0xafa00014, 0x8ee60608,
2440x8f470228, 0x3c050009, 0xc002403, 0x34a5f005,
2450x8ee201ac, 0x24420001, 0xaee201ac, 0x8ee201ac,
2460x8ee20160, 0x3c040001, 0x248451e8, 0x3405f002,
2470x24420001, 0xaee20160, 0x8ee20160, 0x3021,
2480x3821, 0xafa00010, 0xc002403, 0xafa00014,
2490x96e6047a, 0x96e7046a, 0x3c040001, 0x24845200,
2500x24050012, 0xafa00010, 0xc002403, 0xafa00014,
2510xc004500, 0x0, 0xc002318, 0x0,
2520x3c060001, 0x34c63800, 0xaee00608, 0xaf400228,
2530xaf40022c, 0x96e30458, 0x8ee40000, 0x3c0512d8,
2540x34a5c358, 0x27623800, 0xaee27258, 0x27623800,
2550xaee27260, 0x27623800, 0xaee27264, 0x3661021,
2560xaee27270, 0x2402ffff, 0xaee004d4, 0xaee004e0,
2570xaee004e4, 0xaee004f0, 0xa2e004f4, 0xaee00e0c,
2580xaee00e18, 0xaee00e10, 0xaee00e14, 0xaee00e1c,
2590xaee0724c, 0xaee05244, 0xaee05240, 0xaee0523c,
2600xaee07250, 0xaee07254, 0xaee0725c, 0xaee07268,
2610xaee004d0, 0x2463ffff, 0x852025, 0xaee304f8,
2620xaee40000, 0xaf800060, 0xaf820064, 0x3c020100,
2630xafa20018, 0x8ee20608, 0x8f430228, 0x24420001,
2640x304900ff, 0x512300e2, 0xafa00010, 0x8ee20608,
2650x210c0, 0x571021, 0x8fa30018, 0x8fa4001c,
2660xac43060c, 0xac440610, 0x8f870120, 0x27623800,
2670x24e80020, 0x102102b, 0x50400001, 0x27683000,
2680x8f820128, 0x11020004, 0x0, 0x8f820124,
2690x15020007, 0x1021, 0x8ee201a4, 0x3021,
2700x24420001, 0xaee201a4, 0x8001422, 0x8ee201a4,
2710x8ee40608, 0x420c0, 0x801821, 0x8ee40430,
2720x8ee50434, 0xa32821, 0xa3302b, 0x822021,
2730x862021, 0xace40000, 0xace50004, 0x8ee30608,
2740x24020008, 0xa4e2000e, 0x2402000d, 0xace20018,
2750xace9001c, 0x318c0, 0x2463060c, 0x2e31021,
2760xace20008, 0x8ee204c4, 0xace20010, 0xaf880120,
2770x92e24e20, 0x14400037, 0x24060001, 0x8ee24e30,
2780x210c0, 0x24425038, 0x2e22021, 0x8c830000,
2790x24020007, 0x1462001f, 0x0, 0x8ee34e30,
2800x8ee24e34, 0x1062001b, 0x24030040, 0x8c820004,
2810x24420001, 0xac820004, 0x8ee24e34, 0x8ee54e30,
2820x24420001, 0x10430007, 0x0, 0x8ee24e34,
2830x24420001, 0x10a20005, 0x0, 0x800140c,
2840x0, 0x14a00005, 0x0, 0x8f820128,
2850x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
2860x2c420011, 0x50400013, 0xac800000, 0x8001422,
2870x0, 0x8ee24e30, 0x24030040, 0x24420001,
2880x50430003, 0x1021, 0x8ee24e30, 0x24420001,
2890xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
2900x2e22021, 0x24020007, 0xac820000, 0x24020001,
2910xac820004, 0x54c0000c, 0xaee90608, 0x3c040001,
2920x248451c8, 0xafa00010, 0xafa00014, 0x8ee60608,
2930x8f470228, 0x3c050009, 0xc002403, 0x34a5f000,
2940x80014a5, 0x0, 0x8f830120, 0x27623800,
2950x24660020, 0xc2102b, 0x50400001, 0x27663000,
2960x8f820128, 0x10c20004, 0x0, 0x8f820124,
2970x14c20007, 0x0, 0x8ee201a4, 0x3021,
2980x24420001, 0xaee201a4, 0x8001489, 0x8ee201a4,
2990x8ee20608, 0xac62001c, 0x8ee404a0, 0x8ee504a4,
3000x2462001c, 0xac620008, 0x24020008, 0xa462000e,
3010x24020011, 0xac620018, 0xac640000, 0xac650004,
3020x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
3030x14400037, 0x24060001, 0x8ee24e30, 0x210c0,
3040x24425038, 0x2e22021, 0x8c830000, 0x24020012,
3050x1462001f, 0x0, 0x8ee34e30, 0x8ee24e34,
3060x1062001b, 0x24030040, 0x8c820004, 0x24420001,
3070xac820004, 0x8ee24e34, 0x8ee54e30, 0x24420001,
3080x10430007, 0x0, 0x8ee24e34, 0x24420001,
3090x10a20005, 0x0, 0x8001473, 0x0,
3100x14a00005, 0x0, 0x8f820128, 0x24420020,
3110xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
3120x50400013, 0xac800000, 0x8001489, 0x0,
3130x8ee24e30, 0x24030040, 0x24420001, 0x50430003,
3140x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
3150x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
3160x24020012, 0xac820000, 0x24020001, 0xac820004,
3170x14c0001b, 0x0, 0x3c040001, 0x248451d0,
3180xafa00010, 0xafa00014, 0x8ee60608, 0x8f470228,
3190x3c050009, 0xc002403, 0x34a5f001, 0x8ee201b0,
3200x24420001, 0xaee201b0, 0x80014a5, 0x8ee201b0,
3210x3c040001, 0x248451dc, 0xafa00014, 0x8ee60608,
3220x8f470228, 0x3c050009, 0xc002403, 0x34a5f005,
3230x8ee201ac, 0x24420001, 0xaee201ac, 0x8ee201ac,
3240x8ee20154, 0x24420001, 0xaee20154, 0xc0014dc,
3250x8ee20154, 0x8f8200a0, 0x30420004, 0x1440fffd,
3260x0, 0x8f820040, 0x30420001, 0x14400008,
3270x0, 0x8f430104, 0x24020001, 0x10620004,
3280x0, 0x8f420264, 0x10400006, 0x0,
3290x8ee2017c, 0x24420001, 0xaee2017c, 0x80014c5,
3300x8ee2017c, 0x8f820044, 0x34420004, 0xaf820044,
3310x8ee20178, 0x24420001, 0xaee20178, 0x8ee20178,
3320x8f8200d8, 0x8f8300d4, 0x431023, 0xaee2726c,
3330x8ee2726c, 0x1c400003, 0x3c030001, 0x431021,
3340xaee2726c, 0xc004064, 0x0, 0xc004440,
3350xaf800228, 0x8fbf0024, 0x8fb00020, 0x3e00008,
3360x27bd0028, 0x3e00008, 0x0, 0x3e00008,
3370x0, 0x0, 0x0, 0x2402002c,
3380xaf820050, 0xaee07274, 0x8f420238, 0xaee27278,
3390x8f820054, 0x24420067, 0xaf820058, 0xaee07b88,
3400xaee07b8c, 0xaee07b84, 0x3c010001, 0x370821,
3410xac2083bc, 0x3c010001, 0x370821, 0x3e00008,
3420xa02083b9, 0x27bdffd8, 0xafbf0024, 0xafb00020,
3430x8f820054, 0x3c030001, 0x8c635cd8, 0x24420067,
3440x1060000d, 0xaf820058, 0x3c020001, 0x571021,
3450x904283b8, 0x10400005, 0x3c030200, 0x3c010001,
3460x370821, 0x8001503, 0xa02083b8, 0x8ee20000,
3470x431025, 0xaee20000, 0x8f420218, 0x30420100,
3480x104000c6, 0x0, 0x8f8200b0, 0x30420004,
3490x104000c2, 0x0, 0x3c030001, 0x771821,
3500x8c6383d0, 0x8f820104, 0x146200b4, 0x0,
3510x3c030001, 0x771821, 0x8c6383d4, 0x8f8200b4,
3520x146200ae, 0x0, 0x8f8200b0, 0x3c030080,
3530x431024, 0x1040000d, 0x0, 0x8f82011c,
3540x34420002, 0xaf82011c, 0x8f8200b0, 0x2403fffb,
3550x431024, 0xaf8200b0, 0x8f82011c, 0x2403fffd,
3560x431024, 0x80015cc, 0xaf82011c, 0x3c030001,
3570x771821, 0x8c6383d0, 0x8f820104, 0x14620082,
3580x0, 0x3c030001, 0x771821, 0x8c6383d4,
3590x8f8200b4, 0x1462007c, 0x0, 0x3c070001,
3600xf73821, 0x8ce783d0, 0x8f8200b0, 0x3c040001,
3610x24845270, 0xafa00014, 0xafa20010, 0x8f8600b0,
3620x3c050005, 0xc002403, 0x34a50900, 0x8f82011c,
3630x34420002, 0xaf82011c, 0x8f830104, 0x8f8200b0,
3640x34420001, 0xaf8200b0, 0xaf830104, 0x8f830120,
3650x27623800, 0x24660020, 0xc2102b, 0x50400001,
3660x27663000, 0x8f820128, 0x10c20004, 0x0,
3670x8f820124, 0x14c20006, 0x0, 0x8ee201a4,
3680x24420001, 0xaee201a4, 0x80015a0, 0x8ee201a4,
3690x8f440208, 0x8f45020c, 0x26e20030, 0xac620008,
3700x24020400, 0xa462000e, 0x2402000f, 0xac620018,
3710xac60001c, 0xac640000, 0xac650004, 0x8ee204c4,
3720xac620010, 0xaf860120, 0x92e24e20, 0x14400037,
3730x0, 0x8ee24e30, 0x210c0, 0x24425038,
3740x2e22021, 0x8c830000, 0x24020007, 0x1462001f,
3750x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
3760x24030040, 0x8c820004, 0x24420001, 0xac820004,
3770x8ee24e34, 0x8ee54e30, 0x24420001, 0x10430007,
3780x0, 0x8ee24e34, 0x24420001, 0x10a20005,
3790x0, 0x800158a, 0x0, 0x14a00005,
3800x0, 0x8f820128, 0x24420020, 0xaf820128,
3810x8f820128, 0x8c820004, 0x2c420011, 0x50400013,
3820xac800000, 0x80015a0, 0x0, 0x8ee24e30,
3830x24030040, 0x24420001, 0x50430003, 0x1021,
3840x8ee24e30, 0x24420001, 0xaee24e30, 0x8ee24e30,
3850x210c0, 0x24425038, 0x2e22021, 0x24020007,
3860xac820000, 0x24020001, 0xac820004, 0x8f82011c,
3870x2403fffd, 0x431024, 0xaf82011c, 0x8ee201e4,
3880x3c070001, 0xf73821, 0x8ce783d0, 0x24420001,
3890xaee201e4, 0x8ee201e4, 0x3c040001, 0x2484527c,
3900x80015bd, 0xafa00010, 0x8f820104, 0x3c010001,
3910x370821, 0xac2283d0, 0x8f8200b4, 0x3c070001,
3920xf73821, 0x8ce783d0, 0x3c040001, 0x24845284,
3930x3c010001, 0x370821, 0xac2283d4, 0xafa00010,
3940xafa00014, 0x8f8600b0, 0x3c050005, 0xc002403,
3950x34a50900, 0x80015cc, 0x0, 0x8f820104,
3960x3c010001, 0x370821, 0xac2283d0, 0x8f8200b4,
3970x3c010001, 0x370821, 0xac2283d4, 0x8ee27274,
3980x92e304f4, 0x24420067, 0x14600006, 0xaee27274,
3990x8ee27274, 0x8f430234, 0x43102b, 0x1440007b,
4000x0, 0x8ee304e4, 0x8ee204f8, 0x14620004,
4010x0, 0x92e204f4, 0x50400074, 0xa2e004f4,
4020x8f830120, 0x27623800, 0x24660020, 0xc2102b,
4030x50400001, 0x27663000, 0x8f820128, 0x10c20004,
4040x0, 0x8f820124, 0x14c20007, 0x0,
4050x8ee201a4, 0x8021, 0x24420001, 0xaee201a4,
4060x8001637, 0x8ee201a4, 0x8ee204e4, 0xac62001c,
4070x8ee404b0, 0x8ee504b4, 0x2462001c, 0xac620008,
4080x24020008, 0xa462000e, 0x24020011, 0xac620018,
4090xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
4100xaf860120, 0x92e24e20, 0x14400037, 0x24100001,
4110x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
4120x8c830000, 0x24020012, 0x1462001f, 0x0,
4130x8ee34e30, 0x8ee24e34, 0x1062001b, 0x24030040,
4140x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
4150x8ee54e30, 0x24420001, 0x10430007, 0x0,
4160x8ee24e34, 0x24420001, 0x10a20005, 0x0,
4170x8001621, 0x0, 0x14a00005, 0x0,
4180x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
4190x8c820004, 0x2c420011, 0x50400013, 0xac800000,
4200x8001637, 0x0, 0x8ee24e30, 0x24030040,
4210x24420001, 0x50430003, 0x1021, 0x8ee24e30,
4220x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
4230x24425038, 0x2e22021, 0x24020012, 0xac820000,
4240x24020001, 0xac820004, 0x5600000b, 0x24100001,
4250x8ee204e4, 0x3c040001, 0x2484528c, 0xafa00014,
4260xafa20010, 0x8ee60608, 0x8f470228, 0x3c050009,
4270xc002403, 0x34a5f006, 0x16000003, 0x24020001,
4280x8001650, 0xa2e204f4, 0x8ee20170, 0x24420001,
4290xaee20170, 0x8ee20170, 0x8ee204e4, 0xa2e004f4,
4300xaee004f0, 0xaee07274, 0xaee204f8, 0x8ee20e1c,
4310x1040006d, 0x0, 0x8f830120, 0x27623800,
4320x24660020, 0xc2102b, 0x50400001, 0x27663000,
4330x8f820128, 0x10c20004, 0x0, 0x8f820124,
4340x14c20007, 0x0, 0x8ee201a4, 0x8021,
4350x24420001, 0xaee201a4, 0x80016ad, 0x8ee201a4,
4360x8ee2724c, 0xac62001c, 0x8ee404a8, 0x8ee504ac,
4370x2462001c, 0xac620008, 0x24020008, 0xa462000e,
4380x24020011, 0xac620018, 0xac640000, 0xac650004,
4390x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
4400x14400037, 0x24100001, 0x8ee24e30, 0x210c0,
4410x24425038, 0x2e22021, 0x8c830000, 0x24020012,
4420x1462001f, 0x0, 0x8ee34e30, 0x8ee24e34,
4430x1062001b, 0x24030040, 0x8c820004, 0x24420001,
4440xac820004, 0x8ee24e34, 0x8ee54e30, 0x24420001,
4450x10430007, 0x0, 0x8ee24e34, 0x24420001,
4460x10a20005, 0x0, 0x8001697, 0x0,
4470x14a00005, 0x0, 0x8f820128, 0x24420020,
4480xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
4490x50400013, 0xac800000, 0x80016ad, 0x0,
4500x8ee24e30, 0x24030040, 0x24420001, 0x50430003,
4510x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
4520x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
4530x24020012, 0xac820000, 0x24020001, 0xac820004,
4540x5600000b, 0x24100001, 0x8ee2724c, 0x3c040001,
4550x24845298, 0xafa00014, 0xafa20010, 0x8ee6724c,
4560x8f470280, 0x3c050009, 0xc002403, 0x34a5f008,
4570x56000001, 0xaee00e1c, 0x8ee20174, 0x24420001,
4580xaee20174, 0x8ee20174, 0x8ee24e24, 0x10400019,
4590x0, 0xaee04e24, 0x8f820040, 0x30420001,
4600x14400008, 0x0, 0x8f430104, 0x24020001,
4610x10620004, 0x0, 0x8f420264, 0x10400006,
4620x0, 0x8ee2017c, 0x24420001, 0xaee2017c,
4630x80016da, 0x8ee2017c, 0x8f820044, 0x34420004,
4640xaf820044, 0x8ee20178, 0x24420001, 0xaee20178,
4650x8ee20178, 0x8ee27278, 0x2442ff99, 0xaee27278,
4660x8ee27278, 0x1c4002ad, 0x0, 0x8f420238,
4670x104002aa, 0x0, 0x3c020001, 0x571021,
4680x904283e0, 0x144002a5, 0x0, 0x8f420080,
4690xaee2004c, 0x8f4200c0, 0xaee20048, 0x8f420084,
4700xaee20038, 0x8f420084, 0xaee20244, 0x8f420088,
4710xaee20248, 0x8f42008c, 0xaee2024c, 0x8f420090,
4720xaee20250, 0x8f420094, 0xaee20254, 0x8f420098,
4730xaee20258, 0x8f42009c, 0xaee2025c, 0x8f4200a0,
4740xaee20260, 0x8f4200a4, 0xaee20264, 0x8f4200a8,
4750xaee20268, 0x8f4200ac, 0xaee2026c, 0x8f4200b0,
4760xaee20270, 0x8f4200b4, 0xaee20274, 0x8f4200b8,
4770xaee20278, 0x8f4200bc, 0x24040001, 0xaee2027c,
4780xaee0003c, 0x41080, 0x571021, 0x8ee3003c,
4790x8c420244, 0x24840001, 0x621821, 0x2c82000f,
4800xaee3003c, 0x1440fff8, 0x41080, 0x8f4200cc,
4810xaee20050, 0x8f4200d0, 0xaee20054, 0x8f830120,
4820x27623800, 0x24660020, 0xc2102b, 0x50400001,
4830x27663000, 0x8f820128, 0x10c20004, 0x0,
4840x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
4850x8021, 0x24420001, 0xaee201a4, 0x8001775,
4860x8ee201a4, 0x8f440208, 0x8f45020c, 0x26e20030,
4870xac620008, 0x24020400, 0xa462000e, 0x2402000f,
4880xac620018, 0xac60001c, 0xac640000, 0xac650004,
4890x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
4900x14400037, 0x24100001, 0x8ee24e30, 0x210c0,
4910x24425038, 0x2e22021, 0x8c830000, 0x24020007,
4920x1462001f, 0x0, 0x8ee34e30, 0x8ee24e34,
4930x1062001b, 0x24030040, 0x8c820004, 0x24420001,
4940xac820004, 0x8ee24e34, 0x8ee54e30, 0x24420001,
4950x10430007, 0x0, 0x8ee24e34, 0x24420001,
4960x10a20005, 0x0, 0x800175f, 0x0,
4970x14a00005, 0x0, 0x8f820128, 0x24420020,
4980xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
4990x50400013, 0xac800000, 0x8001775, 0x0,
5000x8ee24e30, 0x24030040, 0x24420001, 0x50430003,
5010x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
5020x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
5030x24020007, 0xac820000, 0x24020001, 0xac820004,
5040x12000212, 0x3c020400, 0xafa20018, 0x3c020001,
5050x571021, 0x904283b0, 0x1040010b, 0x0,
5060x8ee20608, 0x8f430228, 0x24420001, 0x304a00ff,
5070x514300fd, 0xafa00010, 0x8ee20608, 0x210c0,
5080x571021, 0x8fa30018, 0x8fa4001c, 0xac43060c,
5090xac440610, 0x8f830054, 0x8f820054, 0x24690032,
5100x1221023, 0x2c420033, 0x1040006a, 0x5821,
5110x24180008, 0x240f000d, 0x240d0007, 0x240c0040,
5120x240e0001, 0x8f870120, 0x27623800, 0x24e80020,
5130x102102b, 0x50400001, 0x27683000, 0x8f820128,
5140x11020004, 0x0, 0x8f820124, 0x15020007,
5150x1021, 0x8ee201a4, 0x8021, 0x24420001,
5160xaee201a4, 0x80017f3, 0x8ee201a4, 0x8ee40608,
5170x420c0, 0x801821, 0x8ee40430, 0x8ee50434,
5180xa32821, 0xa3302b, 0x822021, 0x862021,
5190xace40000, 0xace50004, 0x8ee20608, 0xa4f8000e,
5200xacef0018, 0xacea001c, 0x210c0, 0x2442060c,
5210x2e21021, 0xace20008, 0x8ee204c4, 0xace20010,
5220xaf880120, 0x92e24e20, 0x14400033, 0x24100001,
5230x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
5240x8c820000, 0x144d001f, 0x0, 0x8ee34e30,
5250x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
5260x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
5270x24420001, 0x104c0007, 0x0, 0x8ee24e34,
5280x24420001, 0x10620005, 0x0, 0x80017e0,
5290x0, 0x14600005, 0x0, 0x8f820128,
5300x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
5310x2c420011, 0x50400010, 0xac800000, 0x80017f3,
5320x0, 0x8ee24e30, 0x24420001, 0x504c0003,
5330x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
5340x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
5350xac8d0000, 0xac8e0004, 0x56000006, 0x240b0001,
5360x8f820054, 0x1221023, 0x2c420033, 0x1440ff9d,
5370x0, 0x316300ff, 0x24020001, 0x14620077,
5380x3c050009, 0xaeea0608, 0x8f830054, 0x8f820054,
5390x24690032, 0x1221023, 0x2c420033, 0x10400061,
5400x5821, 0x240d0008, 0x240c0011, 0x24080012,
5410x24070040, 0x240a0001, 0x8f830120, 0x27623800,
5420x24660020, 0xc2102b, 0x50400001, 0x27663000,
5430x8f820128, 0x10c20004, 0x0, 0x8f820124,
5440x14c20007, 0x0, 0x8ee201a4, 0x8021,
5450x24420001, 0xaee201a4, 0x800185f, 0x8ee201a4,
5460x8ee20608, 0xac62001c, 0x8ee404a0, 0x8ee504a4,
5470x2462001c, 0xac620008, 0xa46d000e, 0xac6c0018,
5480xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
5490xaf860120, 0x92e24e20, 0x14400033, 0x24100001,
5500x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
5510x8c820000, 0x1448001f, 0x0, 0x8ee34e30,
5520x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
5530x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
5540x24420001, 0x10470007, 0x0, 0x8ee24e34,
5550x24420001, 0x10620005, 0x0, 0x800184c,
5560x0, 0x14600005, 0x0, 0x8f820128,
5570x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
5580x2c420011, 0x50400010, 0xac800000, 0x800185f,
5590x0, 0x8ee24e30, 0x24420001, 0x50470003,
5600x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
5610x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
5620xac880000, 0xac8a0004, 0x56000006, 0x240b0001,
5630x8f820054, 0x1221023, 0x2c420033, 0x1440ffa6,
5640x0, 0x316300ff, 0x24020001, 0x14620003,
5650x3c050009, 0x800197c, 0x24100001, 0x3c040001,
5660x248452a4, 0xafa00010, 0xafa00014, 0x8f860120,
5670x8f870124, 0x800187b, 0x34a5f011, 0x3c040001,
5680x248452b0, 0xafa00010, 0xafa00014, 0x8f860120,
5690x8f870124, 0x34a5f010, 0xc002403, 0x8021,
5700x800197c, 0x0, 0x3c040001, 0x248452bc,
5710xafa00014, 0x8ee60608, 0x8f470228, 0x3c050009,
5720x8001975, 0x34a5f00f, 0x8ee20608, 0x8f430228,
5730x24420001, 0x304900ff, 0x512300e2, 0xafa00010,
5740x8ee20608, 0x210c0, 0x571021, 0x8fa30018,
5750x8fa4001c, 0xac43060c, 0xac440610, 0x8f870120,
5760x27623800, 0x24e80020, 0x102102b, 0x50400001,
5770x27683000, 0x8f820128, 0x11020004, 0x0,
5780x8f820124, 0x15020007, 0x1021, 0x8ee201a4,
5790x8021, 0x24420001, 0xaee201a4, 0x80018f7,
5800x8ee201a4, 0x8ee40608, 0x420c0, 0x801821,
5810x8ee40430, 0x8ee50434, 0xa32821, 0xa3302b,
5820x822021, 0x862021, 0xace40000, 0xace50004,
5830x8ee30608, 0x24020008, 0xa4e2000e, 0x2402000d,
5840xace20018, 0xace9001c, 0x318c0, 0x2463060c,
5850x2e31021, 0xace20008, 0x8ee204c4, 0xace20010,
5860xaf880120, 0x92e24e20, 0x14400037, 0x24100001,
5870x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
5880x8c830000, 0x24020007, 0x1462001f, 0x0,
5890x8ee34e30, 0x8ee24e34, 0x1062001b, 0x24030040,
5900x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
5910x8ee54e30, 0x24420001, 0x10430007, 0x0,
5920x8ee24e34, 0x24420001, 0x10a20005, 0x0,
5930x80018e1, 0x0, 0x14a00005, 0x0,
5940x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
5950x8c820004, 0x2c420011, 0x50400013, 0xac800000,
5960x80018f7, 0x0, 0x8ee24e30, 0x24030040,
5970x24420001, 0x50430003, 0x1021, 0x8ee24e30,
5980x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
5990x24425038, 0x2e22021, 0x24020007, 0xac820000,
6000x24020001, 0xac820004, 0x5600000c, 0xaee90608,
6010x3c040001, 0x248452c8, 0xafa00010, 0xafa00014,
6020x8ee60608, 0x8f470228, 0x3c050009, 0xc002403,
6030x34a5f000, 0x800197c, 0x0, 0x8f830120,
6040x27623800, 0x24660020, 0xc2102b, 0x50400001,
6050x27663000, 0x8f820128, 0x10c20004, 0x0,
6060x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
6070x8021, 0x24420001, 0xaee201a4, 0x800195e,
6080x8ee201a4, 0x8ee20608, 0xac62001c, 0x8ee404a0,
6090x8ee504a4, 0x2462001c, 0xac620008, 0x24020008,
6100xa462000e, 0x24020011, 0xac620018, 0xac640000,
6110xac650004, 0x8ee204c4, 0xac620010, 0xaf860120,
6120x92e24e20, 0x14400037, 0x24100001, 0x8ee24e30,
6130x210c0, 0x24425038, 0x2e22021, 0x8c830000,
6140x24020012, 0x1462001f, 0x0, 0x8ee34e30,
6150x8ee24e34, 0x1062001b, 0x24030040, 0x8c820004,
6160x24420001, 0xac820004, 0x8ee24e34, 0x8ee54e30,
6170x24420001, 0x10430007, 0x0, 0x8ee24e34,
6180x24420001, 0x10a20005, 0x0, 0x8001948,
6190x0, 0x14a00005, 0x0, 0x8f820128,
6200x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
6210x2c420011, 0x50400013, 0xac800000, 0x800195e,
6220x0, 0x8ee24e30, 0x24030040, 0x24420001,
6230x50430003, 0x1021, 0x8ee24e30, 0x24420001,
6240xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
6250x2e22021, 0x24020012, 0xac820000, 0x24020001,
6260xac820004, 0x5600001d, 0x24100001, 0x3c040001,
6270x248452d0, 0xafa00010, 0xafa00014, 0x8ee60608,
6280x8f470228, 0x3c050009, 0xc002403, 0x34a5f001,
6290x8ee201b0, 0x24420001, 0xaee201b0, 0x800197c,
6300x8ee201b0, 0x3c040001, 0x248452dc, 0xafa00014,
6310x8ee60608, 0x8f470228, 0x3c050009, 0x34a5f005,
6320xc002403, 0x0, 0x8ee201ac, 0x8021,
6330x24420001, 0xaee201ac, 0x8ee201ac, 0x1200000c,
6340x24020001, 0x3c010001, 0x370821, 0xa02083b0,
6350x8f420238, 0x8ee30158, 0x24630001, 0xaee30158,
6360x8ee30158, 0x800198c, 0xaee27278, 0x24020001,
6370x3c010001, 0x370821, 0xa02283b0, 0x3c020001,
6380x8c425cd8, 0x10400187, 0x0, 0x8ee27b84,
6390x24430001, 0x284200c9, 0x144001a4, 0xaee37b84,
6400x8ee204d4, 0x30420002, 0x14400119, 0xaee07b84,
6410x8ee204d4, 0x3c030600, 0x34631000, 0x34420002,
6420xaee204d4, 0xafa30018, 0x8ee20608, 0x8f430228,
6430x24420001, 0x304a00ff, 0x514300fd, 0xafa00010,
6440x8ee20608, 0x210c0, 0x571021, 0x8fa30018,
6450x8fa4001c, 0xac43060c, 0xac440610, 0x8f830054,
6460x8f820054, 0x24690032, 0x1221023, 0x2c420033,
6470x1040006a, 0x5821, 0x24180008, 0x240f000d,
6480x240d0007, 0x240c0040, 0x240e0001, 0x8f870120,
6490x27623800, 0x24e80020, 0x102102b, 0x50400001,
6500x27683000, 0x8f820128, 0x11020004, 0x0,
6510x8f820124, 0x15020007, 0x1021, 0x8ee201a4,
6520x8021, 0x24420001, 0xaee201a4, 0x8001a15,
6530x8ee201a4, 0x8ee40608, 0x420c0, 0x801821,
6540x8ee40430, 0x8ee50434, 0xa32821, 0xa3302b,
6550x822021, 0x862021, 0xace40000, 0xace50004,
6560x8ee20608, 0xa4f8000e, 0xacef0018, 0xacea001c,
6570x210c0, 0x2442060c, 0x2e21021, 0xace20008,
6580x8ee204c4, 0xace20010, 0xaf880120, 0x92e24e20,
6590x14400033, 0x24100001, 0x8ee24e30, 0x210c0,
6600x24425038, 0x2e22021, 0x8c820000, 0x144d001f,
6610x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
6620x0, 0x8c820004, 0x24420001, 0xac820004,
6630x8ee24e34, 0x8ee34e30, 0x24420001, 0x104c0007,
6640x0, 0x8ee24e34, 0x24420001, 0x10620005,
6650x0, 0x8001a02, 0x0, 0x14600005,
6660x0, 0x8f820128, 0x24420020, 0xaf820128,
6670x8f820128, 0x8c820004, 0x2c420011, 0x50400010,
6680xac800000, 0x8001a15, 0x0, 0x8ee24e30,
6690x24420001, 0x504c0003, 0x1021, 0x8ee24e30,
6700x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
6710x24425038, 0x2e22021, 0xac8d0000, 0xac8e0004,
6720x56000006, 0x240b0001, 0x8f820054, 0x1221023,
6730x2c420033, 0x1440ff9d, 0x0, 0x316300ff,
6740x24020001, 0x54620078, 0xafa00010, 0xaeea0608,
6750x8f830054, 0x8f820054, 0x24690032, 0x1221023,
6760x2c420033, 0x10400061, 0x5821, 0x240d0008,
6770x240c0011, 0x24080012, 0x24070040, 0x240a0001,
6780x8f830120, 0x27623800, 0x24660020, 0xc2102b,
6790x50400001, 0x27663000, 0x8f820128, 0x10c20004,
6800x0, 0x8f820124, 0x14c20007, 0x0,
6810x8ee201a4, 0x8021, 0x24420001, 0xaee201a4,
6820x8001a81, 0x8ee201a4, 0x8ee20608, 0xac62001c,
6830x8ee404a0, 0x8ee504a4, 0x2462001c, 0xac620008,
6840xa46d000e, 0xac6c0018, 0xac640000, 0xac650004,
6850x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
6860x14400033, 0x24100001, 0x8ee24e30, 0x210c0,
6870x24425038, 0x2e22021, 0x8c820000, 0x1448001f,
6880x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
6890x0, 0x8c820004, 0x24420001, 0xac820004,
6900x8ee24e34, 0x8ee34e30, 0x24420001, 0x10470007,
6910x0, 0x8ee24e34, 0x24420001, 0x10620005,
6920x0, 0x8001a6e, 0x0, 0x14600005,
6930x0, 0x8f820128, 0x24420020, 0xaf820128,
6940x8f820128, 0x8c820004, 0x2c420011, 0x50400010,
6950xac800000, 0x8001a81, 0x0, 0x8ee24e30,
6960x24420001, 0x50470003, 0x1021, 0x8ee24e30,
6970x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
6980x24425038, 0x2e22021, 0xac880000, 0xac8a0004,
6990x56000006, 0x240b0001, 0x8f820054, 0x1221023,
7000x2c420033, 0x1440ffa6, 0x0, 0x316300ff,
7010x24020001, 0x10620022, 0x0, 0x3c040001,
7020x248452a4, 0xafa00010, 0xafa00014, 0x8f860120,
7030x8f870124, 0x3c050009, 0xc002403, 0x34a5f011,
7040x8001aad, 0x0, 0x3c040001, 0x248452b0,
7050xafa00014, 0x8f860120, 0x8f870124, 0x3c050009,
7060xc002403, 0x34a5f010, 0x8001aad, 0x0,
7070x3c040001, 0x248452bc, 0xafa00014, 0x8ee60608,
7080x8f470228, 0x3c050009, 0xc002403, 0x34a5f00f,
7090x8ee201ac, 0x24420001, 0xaee201ac, 0x8ee201ac,
7100x8ee2015c, 0x24420001, 0xaee2015c, 0x8ee2015c,
7110x8ee204d4, 0x30420001, 0x10400055, 0x0,
7120x8f420218, 0x30420080, 0x10400029, 0x0,
7130x8f820044, 0x34420040, 0xaf820044, 0x8ee27b7c,
7140x402821, 0x8ee200c0, 0x8ee300c4, 0x24060000,
7150x2407ffff, 0x2021, 0x461024, 0x1444000d,
7160x671824, 0x1465000b, 0x0, 0x8ee27b80,
7170x402821, 0x8ee200e0, 0x8ee300e4, 0x2021,
7180x461024, 0x14440003, 0x671824, 0x1065000b,
7190x0, 0x8ee200c0, 0x8ee300c4, 0x8ee400e0,
7200x8ee500e4, 0xaee37b7c, 0xaee57b80, 0x8f820044,
7210x38420020, 0x8001b38, 0xaf820044, 0x8f820044,
7220x2403ffdf, 0x431024, 0x8001b38, 0xaf820044,
7230x8f820044, 0x2403ffdf, 0x431024, 0xaf820044,
7240x8ee27b7c, 0x402821, 0x8ee200c0, 0x8ee300c4,
7250x24060000, 0x2407ffff, 0x2021, 0x461024,
7260x1444000d, 0x671824, 0x1465000b, 0x0,
7270x8ee27b80, 0x402821, 0x8ee200e0, 0x8ee300e4,
7280x2021, 0x461024, 0x14440003, 0x671824,
7290x1065000b, 0x0, 0x8ee200c0, 0x8ee300c4,
7300x8ee400e0, 0x8ee500e4, 0xaee37b7c, 0xaee57b80,
7310x8f820044, 0x38420040, 0x8001b38, 0xaf820044,
7320x8f820044, 0x34420040, 0x8001b38, 0xaf820044,
7330x8f820044, 0x34420040, 0xaf820044, 0x8ee27b8c,
7340x24430001, 0x28420015, 0x14400028, 0xaee37b8c,
7350x8f820044, 0x38420020, 0xaf820044, 0x8001b38,
7360xaee07b8c, 0x8ee204d4, 0x30420001, 0x10400011,
7370x0, 0x8f420218, 0x30420080, 0x10400009,
7380x0, 0x8f820044, 0x34420020, 0xaf820044,
7390x8f820044, 0x2403ffbf, 0x431024, 0x8001b36,
7400xaf820044, 0x8f820044, 0x34420060, 0x8001b36,
7410xaf820044, 0x8f820044, 0x34420040, 0xaf820044,
7420x8ee27b88, 0x24430001, 0x28421389, 0x14400005,
7430xaee37b88, 0x8f820044, 0x38420020, 0xaf820044,
7440xaee07b88, 0xc004603, 0x0, 0x8fbf0024,
7450x8fb00020, 0x3e00008, 0x27bd0028, 0x27bdffb8,
7460xafbf0044, 0xafb60040, 0xafb5003c, 0xafb40038,
7470xafb30034, 0xafb20030, 0xafb1002c, 0xafb00028,
7480x8f960064, 0x32c20004, 0x1040000c, 0x24020004,
7490xaf820064, 0x8f420114, 0xaee204e0, 0x8f820060,
7500x34420008, 0xaf820060, 0x8ee2016c, 0x24420001,
7510xaee2016c, 0x80022f4, 0x8ee2016c, 0x32c20001,
7520x10400004, 0x24020001, 0xaf820064, 0x80022f4,
7530x0, 0x32c20002, 0x1440000c, 0x3c050003,
7540x3c040001, 0x24845354, 0x34a50001, 0x2c03021,
7550x3821, 0xafa00010, 0xc002403, 0xafa00014,
7560x2402fff8, 0x80022f4, 0xaf820064, 0x8f43022c,
7570x8f42010c, 0x5062000c, 0xafa00010, 0x8f42022c,
7580x21080, 0x5a1021, 0x8c420300, 0xafa20020,
7590x8f42022c, 0x24070001, 0x24420001, 0x3042003f,
7600x8001b80, 0xaf42022c, 0x3c040001, 0x24845360,
7610xafa00014, 0x8f46022c, 0x8f47010c, 0x3c050003,
7620xc002403, 0x34a5f01f, 0x3821, 0x14e00003,
7630x0, 0x80022ed, 0xaf960064, 0x93a20020,
7640x2443ffff, 0x2c620011, 0x10400658, 0x31080,
7650x3c010001, 0x220821, 0x8c225418, 0x400008,
7660x0, 0x8fa20020, 0x30420fff, 0xaee20e0c,
7670x8f820060, 0x34420200, 0xaf820060, 0x8ee20118,
7680x24420001, 0xaee20118, 0x80022e8, 0x8ee20118,
7690x8fa20020, 0x24030001, 0x3c010001, 0x370821,
7700xa02383b1, 0x30420fff, 0xaee25238, 0x8f820060,
7710x34420100, 0xaf820060, 0x8ee20144, 0x24420001,
7720xaee20144, 0x80022e8, 0x8ee20144, 0x8fa20020,
7730x21200, 0x22502, 0x24020001, 0x10820005,
7740x24020002, 0x10820009, 0x2402fffe, 0x8001bc9,
7750xafa00010, 0x8ee204d4, 0xaee40070, 0xaee40074,
7760x34420001, 0x8001bbd, 0xaee204d4, 0x8ee304d4,
7770xaee40070, 0xaee40074, 0x621824, 0xaee304d4,
7780x8f840054, 0x41442, 0x41c82, 0x431021,
7790x41cc2, 0x431023, 0x41d02, 0x431021,
7800x41d42, 0x431023, 0x8001bd0, 0xaee20078,
7810x3c040001, 0x2484536c, 0xafa00014, 0x8fa60020,
7820x3c050003, 0xc002403, 0x34a50004, 0x8ee20110,
7830x24420001, 0xaee20110, 0x80022e8, 0x8ee20110,
7840x27440212, 0xc0022fe, 0x24050006, 0x3049001f,
7850x920c0, 0x2e41021, 0x9442727c, 0x30424000,
7860x1040000a, 0x971021, 0x97430212, 0xa443727e,
7870x8f430214, 0x971021, 0xac437280, 0x2e41821,
7880x34028000, 0x8001c79, 0xa462727c, 0x9443727e,
7890x97420212, 0x14620006, 0x2e41021, 0x971021,
7900x8c437280, 0x8f420214, 0x1062009f, 0x2e41021,
7910x9442727c, 0x30428000, 0x1040002a, 0x2406ffff,
7920x2021, 0x410c0, 0x2e21021, 0x9442737c,
7930x30424000, 0x54400005, 0x803021, 0x24840001,
7940x2c820080, 0x1440fff8, 0x410c0, 0x4c10010,
7950x618c0, 0x610c0, 0x571821, 0x8c63737c,
7960x571021, 0xafa30010, 0x8c427380, 0x3c040001,
7970x24845378, 0xafa20014, 0x8f470214, 0x3c050003,
7980xc002403, 0x34a50013, 0x8001c90, 0x3c020800,
7990x97440212, 0x771021, 0xa444737e, 0x8f440214,
8000x771021, 0x2e31821, 0xac447380, 0x34028000,
8010xa462737c, 0x910c0, 0x2e21021, 0x8001c79,
8020xa446727c, 0x2e41021, 0x9445727c, 0x8001c2e,
8030x510c0, 0x9443737e, 0x97420212, 0x14620006,
8040x510c0, 0x971021, 0x8c437380, 0x8f420214,
8050x10620065, 0x510c0, 0x2e21021, 0x9445737c,
8060x510c0, 0x2e21021, 0x9442737c, 0x30428000,
8070x1040fff0, 0x971021, 0x520c0, 0x971021,
8080x9443737e, 0x97420212, 0x14620006, 0x2406ffff,
8090x971021, 0x8c437380, 0x8f420214, 0x10620053,
8100x3c020800, 0x2021, 0x410c0, 0x2e21021,
8110x9442737c, 0x30424000, 0x54400005, 0x803021,
8120x24840001, 0x2c820080, 0x1440fff8, 0x410c0,
8130x4c10023, 0x618c0, 0x910c0, 0x571821,
8140x8c63727c, 0x571021, 0xafa30010, 0x8c427280,
8150x3c040001, 0x24845384, 0xafa20014, 0x8f470214,
8160x3c050003, 0xc002403, 0x34a5f017, 0x8001c90,
8170x3c020800, 0x8f430210, 0xb71021, 0xac43777c,
8180x8f430214, 0xb71021, 0xac437780, 0x3c020001,
8190x571021, 0x8c4283b4, 0x24420001, 0x3c010001,
8200x370821, 0xac2283b4, 0x3c030001, 0x771821,
8210x8c6383b4, 0x2e51021, 0x8001c82, 0xa443777c,
8220x97440212, 0x771021, 0xa444737e, 0x8f440214,
8230x771021, 0x2e31821, 0xac447380, 0x34028000,
8240xa462737c, 0x510c0, 0x2e21021, 0xa446737c,
8250x2021, 0x428c0, 0x2e51021, 0x9442777c,
8260x1040ffdc, 0x24840001, 0x2c820080, 0x5440fffa,
8270x428c0, 0x92e204d8, 0x10400006, 0x24020001,
8280x8ee304dc, 0x1221004, 0x621825, 0x8001c8f,
8290xaee304dc, 0x8f830228, 0x24020001, 0x1221004,
8300x621825, 0xaf830228, 0x3c020800, 0x34421000,
8310xafa20018, 0x8ee20608, 0x8f430228, 0x24420001,
8320x304a00ff, 0x514300fd, 0xafa00010, 0x8ee20608,
8330x210c0, 0x571021, 0x8fa30018, 0x8fa4001c,
8340xac43060c, 0xac440610, 0x8f830054, 0x8f820054,
8350x24690032, 0x1221023, 0x2c420033, 0x1040006a,
8360x5821, 0x24100008, 0x240f000d, 0x240d0007,
8370x240c0040, 0x240e0001, 0x8f870120, 0x27623800,
8380x24e80020, 0x102102b, 0x50400001, 0x27683000,
8390x8f820128, 0x11020004, 0x0, 0x8f820124,
8400x15020007, 0x1021, 0x8ee201a4, 0x3821,
8410x24420001, 0xaee201a4, 0x8001d08, 0x8ee201a4,
8420x8ee40608, 0x420c0, 0x801821, 0x8ee40430,
8430x8ee50434, 0xa32821, 0xa3302b, 0x822021,
8440x862021, 0xace40000, 0xace50004, 0x8ee20608,
8450xa4f0000e, 0xacef0018, 0xacea001c, 0x210c0,
8460x2442060c, 0x2e21021, 0xace20008, 0x8ee204c4,
8470xace20010, 0xaf880120, 0x92e24e20, 0x14400033,
8480x24070001, 0x8ee24e30, 0x210c0, 0x24425038,
8490x2e22021, 0x8c820000, 0x144d001f, 0x0,
8500x8ee34e30, 0x8ee24e34, 0x1062001b, 0x0,
8510x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
8520x8ee34e30, 0x24420001, 0x104c0007, 0x0,
8530x8ee24e34, 0x24420001, 0x10620005, 0x0,
8540x8001cf5, 0x0, 0x14600005, 0x0,
8550x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
8560x8c820004, 0x2c420011, 0x50400010, 0xac800000,
8570x8001d08, 0x0, 0x8ee24e30, 0x24420001,
8580x504c0003, 0x1021, 0x8ee24e30, 0x24420001,
8590xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
8600x2e22021, 0xac8d0000, 0xac8e0004, 0x54e00006,
8610x240b0001, 0x8f820054, 0x1221023, 0x2c420033,
8620x1440ff9d, 0x0, 0x316300ff, 0x24020001,
8630x54620078, 0xafa00010, 0xaeea0608, 0x8f830054,
8640x8f820054, 0x24690032, 0x1221023, 0x2c420033,
8650x10400061, 0x5821, 0x240e0008, 0x240d0011,
8660x240a0012, 0x24080040, 0x240c0001, 0x8f830120,
8670x27623800, 0x24660020, 0xc2102b, 0x50400001,
8680x27663000, 0x8f820128, 0x10c20004, 0x0,
8690x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
8700x3821, 0x24420001, 0xaee201a4, 0x8001d74,
8710x8ee201a4, 0x8ee20608, 0xac62001c, 0x8ee404a0,
8720x8ee504a4, 0x2462001c, 0xac620008, 0xa46e000e,
8730xac6d0018, 0xac640000, 0xac650004, 0x8ee204c4,
8740xac620010, 0xaf860120, 0x92e24e20, 0x14400033,
8750x24070001, 0x8ee24e30, 0x210c0, 0x24425038,
8760x2e22021, 0x8c820000, 0x144a001f, 0x0,
8770x8ee34e30, 0x8ee24e34, 0x1062001b, 0x0,
8780x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
8790x8ee34e30, 0x24420001, 0x10480007, 0x0,
8800x8ee24e34, 0x24420001, 0x10620005, 0x0,
8810x8001d61, 0x0, 0x14600005, 0x0,
8820x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
8830x8c820004, 0x2c420011, 0x50400010, 0xac800000,
8840x8001d74, 0x0, 0x8ee24e30, 0x24420001,
8850x50480003, 0x1021, 0x8ee24e30, 0x24420001,
8860xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
8870x2e22021, 0xac8a0000, 0xac8c0004, 0x54e00006,
8880x240b0001, 0x8f820054, 0x1221023, 0x2c420033,
8890x1440ffa6, 0x0, 0x316300ff, 0x24020001,
8900x10620022, 0x0, 0x3c040001, 0x24845390,
8910xafa00010, 0xafa00014, 0x8f860120, 0x8f870124,
8920x3c050009, 0xc002403, 0x34a5f011, 0x8001da0,
8930x0, 0x3c040001, 0x2484539c, 0xafa00014,
8940x8f860120, 0x8f870124, 0x3c050009, 0xc002403,
8950x34a5f010, 0x8001da0, 0x0, 0x3c040001,
8960x248453a8, 0xafa00014, 0x8ee60608, 0x8f470228,
8970x3c050009, 0xc002403, 0x34a5f00f, 0x8ee201ac,
8980x24420001, 0xaee201ac, 0x8ee201ac, 0x8ee20124,
8990x24420001, 0xaee20124, 0x8001f97, 0x8ee20124,
9000x27440212, 0xc0022fe, 0x24050006, 0x3049001f,
9010x928c0, 0x2e51021, 0x9442727c, 0x30428000,
9020x1040002f, 0x2e51021, 0x9442727c, 0x30424000,
9030x1440001c, 0xb71021, 0x9443727e, 0x97420212,
9040x14620018, 0xb71021, 0x8c437280, 0x8f420214,
9050x54620016, 0xafa20010, 0x92e204d8, 0x10400007,
9060x24020001, 0x8ee304dc, 0x1221004, 0x21027,
9070x621824, 0x8001dc9, 0xaee304dc, 0x8f830228,
9080x1221004, 0x21027, 0x621824, 0xaf830228,
9090x910c0, 0x2e21821, 0x3402c000, 0x8001e4e,
9100xa462727c, 0x8f420214, 0xafa20010, 0x910c0,
9110x571021, 0x8c42727c, 0x3c040001, 0x248453b4,
9120x3c050003, 0xafa20014, 0x8f470210, 0x34a5f01c,
9130xc002403, 0x1203021, 0x8001e83, 0x3c020800,
9140xb71021, 0x9443727e, 0x97420212, 0x14620019,
9150x918c0, 0xb71021, 0x8c437280, 0x8f420214,
9160x14620014, 0x918c0, 0x2e51021, 0x9447727c,
9170x720c0, 0x971021, 0x9443737e, 0xb71021,
9180xa443727e, 0x971021, 0x8c437380, 0xb71021,
9190xac437280, 0x2e41021, 0x9443737c, 0x2e51021,
9200xa443727c, 0x2e41821, 0x3402c000, 0x8001e4e,
9210xa462737c, 0x2e31021, 0x9447727c, 0x3021,
9220x720c0, 0x2e41021, 0x9442737c, 0x4021,
9230x30428000, 0x14400025, 0xe02821, 0x605021,
9240x340bc000, 0x971021, 0x9443737e, 0x97420212,
9250x54620015, 0xe02821, 0x971021, 0x8c437380,
9260x8f420214, 0x54620010, 0xe02821, 0x11000006,
9270x2e41021, 0x9443737c, 0x510c0, 0x2e21021,
9280x8001e1a, 0xa443737c, 0x9443737c, 0x2ea1021,
9290xa443727c, 0x710c0, 0x2e21021, 0xa44b737c,
9300x8001e28, 0x24060001, 0x510c0, 0x2e21021,
9310x9447737c, 0x720c0, 0x2e41021, 0x9442737c,
9320x30428000, 0x1040ffdf, 0x25080001, 0x30c200ff,
9330x14400025, 0x2021, 0x720c0, 0x971021,
9340x9443737e, 0x97420212, 0x1462000f, 0x910c0,
9350x971021, 0x8c437380, 0x8f420214, 0x1462000a,
9360x910c0, 0x2e41821, 0x3402c000, 0x15000015,
9370xa462737c, 0x910c0, 0x2e21821, 0x34028000,
9380x8001e4e, 0xa462727c, 0x571021, 0x8c42727c,
9390x3c040001, 0x248453c0, 0x3c050003, 0xafa20010,
9400x710c0, 0x571021, 0x8c42737c, 0x34a5001e,
9410x1203021, 0xc002403, 0xafa20014, 0x8001e83,
9420x3c020800, 0x2021, 0x428c0, 0xb71021,
9430x9443777e, 0x97420212, 0x5462002b, 0x24840001,
9440xb71021, 0x8c437780, 0x8f420214, 0x54620026,
9450x24840001, 0x3c020001, 0x571021, 0x8c4283b4,
9460x2442ffff, 0x3c010001, 0x370821, 0xac2283b4,
9470x3c020001, 0x571021, 0x8c4283b4, 0x809021,
9480x242102b, 0x1040000e, 0x24b1777c, 0x24b07784,
9490x2f02021, 0x2f12821, 0xc002490, 0x24060008,
9500x26310008, 0x3c020001, 0x571021, 0x8c4283b4,
9510x26520001, 0x242102b, 0x1440fff5, 0x26100008,
9520x3c040001, 0x972021, 0x8c8483b4, 0x24050008,
9530x420c0, 0x2484777c, 0xc002488, 0x2e42021,
9540x8001e83, 0x3c020800, 0x2c820080, 0x1440ffcf,
9550x428c0, 0x3c020800, 0x34422000, 0xafa20018,
9560x8ee20608, 0x8f430228, 0x24420001, 0x304a00ff,
9570x514300fd, 0xafa00010, 0x8ee20608, 0x210c0,
9580x571021, 0x8fa30018, 0x8fa4001c, 0xac43060c,
9590xac440610, 0x8f830054, 0x8f820054, 0x24690032,
9600x1221023, 0x2c420033, 0x1040006a, 0x5821,
9610x24100008, 0x240f000d, 0x240d0007, 0x240c0040,
9620x240e0001, 0x8f870120, 0x27623800, 0x24e80020,
9630x102102b, 0x50400001, 0x27683000, 0x8f820128,
9640x11020004, 0x0, 0x8f820124, 0x15020007,
9650x1021, 0x8ee201a4, 0x3821, 0x24420001,
9660xaee201a4, 0x8001efb, 0x8ee201a4, 0x8ee40608,
9670x420c0, 0x801821, 0x8ee40430, 0x8ee50434,
9680xa32821, 0xa3302b, 0x822021, 0x862021,
9690xace40000, 0xace50004, 0x8ee20608, 0xa4f0000e,
9700xacef0018, 0xacea001c, 0x210c0, 0x2442060c,
9710x2e21021, 0xace20008, 0x8ee204c4, 0xace20010,
9720xaf880120, 0x92e24e20, 0x14400033, 0x24070001,
9730x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
9740x8c820000, 0x144d001f, 0x0, 0x8ee34e30,
9750x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
9760x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
9770x24420001, 0x104c0007, 0x0, 0x8ee24e34,
9780x24420001, 0x10620005, 0x0, 0x8001ee8,
9790x0, 0x14600005, 0x0, 0x8f820128,
9800x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
9810x2c420011, 0x50400010, 0xac800000, 0x8001efb,
9820x0, 0x8ee24e30, 0x24420001, 0x504c0003,
9830x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
9840x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
9850xac8d0000, 0xac8e0004, 0x54e00006, 0x240b0001,
9860x8f820054, 0x1221023, 0x2c420033, 0x1440ff9d,
9870x0, 0x316300ff, 0x24020001, 0x54620078,
9880xafa00010, 0xaeea0608, 0x8f830054, 0x8f820054,
9890x24690032, 0x1221023, 0x2c420033, 0x10400061,
9900x5821, 0x240e0008, 0x240d0011, 0x240a0012,
9910x24080040, 0x240c0001, 0x8f830120, 0x27623800,
9920x24660020, 0xc2102b, 0x50400001, 0x27663000,
9930x8f820128, 0x10c20004, 0x0, 0x8f820124,
9940x14c20007, 0x0, 0x8ee201a4, 0x3821,
9950x24420001, 0xaee201a4, 0x8001f67, 0x8ee201a4,
9960x8ee20608, 0xac62001c, 0x8ee404a0, 0x8ee504a4,
9970x2462001c, 0xac620008, 0xa46e000e, 0xac6d0018,
9980xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
9990xaf860120, 0x92e24e20, 0x14400033, 0x24070001,
10000x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
10010x8c820000, 0x144a001f, 0x0, 0x8ee34e30,
10020x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
10030x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
10040x24420001, 0x10480007, 0x0, 0x8ee24e34,
10050x24420001, 0x10620005, 0x0, 0x8001f54,
10060x0, 0x14600005, 0x0, 0x8f820128,
10070x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
10080x2c420011, 0x50400010, 0xac800000, 0x8001f67,
10090x0, 0x8ee24e30, 0x24420001, 0x50480003,
10100x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
10110x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
10120xac8a0000, 0xac8c0004, 0x54e00006, 0x240b0001,
10130x8f820054, 0x1221023, 0x2c420033, 0x1440ffa6,
10140x0, 0x316300ff, 0x24020001, 0x10620022,
10150x0, 0x3c040001, 0x24845390, 0xafa00010,
10160xafa00014, 0x8f860120, 0x8f870124, 0x3c050009,
10170xc002403, 0x34a5f011, 0x8001f93, 0x0,
10180x3c040001, 0x2484539c, 0xafa00014, 0x8f860120,
10190x8f870124, 0x3c050009, 0xc002403, 0x34a5f010,
10200x8001f93, 0x0, 0x3c040001, 0x248453a8,
10210xafa00014, 0x8ee60608, 0x8f470228, 0x3c050009,
10220xc002403, 0x34a5f00f, 0x8ee201ac, 0x24420001,
10230xaee201ac, 0x8ee201ac, 0x8ee20128, 0x24420001,
10240xaee20128, 0x8ee20128, 0x8ee20164, 0x24420001,
10250xaee20164, 0x80022e8, 0x8ee20164, 0x8fa20020,
10260x21200, 0x21d02, 0x24020001, 0x10620005,
10270x24020002, 0x1062000d, 0x0, 0x8001fb7,
10280xafa00010, 0x92e204d8, 0x14400006, 0x24020001,
10290x8f820228, 0xaee204dc, 0x2402ffff, 0xaf820228,
10300x24020001, 0x8001fbe, 0xa2e204d8, 0x92e204d8,
10310x5040000c, 0xa2e004d8, 0x8ee204dc, 0xaf820228,
10320x8001fbe, 0xa2e004d8, 0x3c040001, 0x248453c8,
10330xafa00014, 0x8fa60020, 0x3c050003, 0xc002403,
10340x34a5f009, 0x8ee2013c, 0x24420001, 0xaee2013c,
10350x80022e8, 0x8ee2013c, 0x8fa20020, 0x21200,
10360x22502, 0x24020001, 0x10820005, 0x24020002,
10370x1082000f, 0x0, 0x8001fe3, 0xafa00010,
10380x8f820220, 0x3c0308ff, 0x3463ffff, 0x431024,
10390x34420008, 0xaf820220, 0x24020001, 0x3c010001,
10400x370821, 0xa02283b2, 0x8001fea, 0xaee40108,
10410x8f820220, 0x3c0308ff, 0x3463fff7, 0x431024,
10420xaf820220, 0x3c010001, 0x370821, 0xa02083b2,
10430x8001fea, 0xaee40108, 0x3c040001, 0x248453d4,
10440xafa00014, 0x8fa60020, 0x3c050003, 0xc002403,
10450x34a5f00a, 0x8ee2012c, 0x24420001, 0xaee2012c,
10460x80022e8, 0x8ee2012c, 0x8fa20020, 0x21200,
10470x21d02, 0x24020001, 0x10620005, 0x24020002,
10480x1062000e, 0x0, 0x8002011, 0xafa00010,
10490x8f820220, 0x3c0308ff, 0x3463ffff, 0x431024,
10500x34420008, 0xaf820220, 0x24020001, 0x3c010001,
10510x370821, 0x8002018, 0xa02283b3, 0x3c020001,
10520x571021, 0x904283b2, 0x3c010001, 0x370821,
10530x1440000e, 0xa02083b3, 0x8f820220, 0x3c0308ff,
10540x3463fff7, 0x431024, 0x8002018, 0xaf820220,
10550x3c040001, 0x248453e0, 0xafa00014, 0x8fa60020,
10560x3c050003, 0xc002403, 0x34a5f00b, 0x8ee20114,
10570x24420001, 0xaee20114, 0x80022e8, 0x8ee20114,
10580x27840208, 0x27450200, 0xc00249a, 0x24060008,
10590x26e40094, 0x27450200, 0xc00249a, 0x24060008,
10600x8ee20134, 0x24420001, 0xaee20134, 0x80022e8,
10610x8ee20134, 0x8f460248, 0x2021, 0xc005108,
10620x24050004, 0x8ee20130, 0x24420001, 0xaee20130,
10630x80022e8, 0x8ee20130, 0x8ef301cc, 0x8ef401d0,
10640x8ef501d8, 0x8ee20140, 0x26e40030, 0x24420001,
10650xaee20140, 0x8ef00140, 0x8ef10074, 0x8ef20070,
10660xc002488, 0x24050400, 0xaef301cc, 0xaef401d0,
10670xaef501d8, 0xaef00140, 0xaef10074, 0xaef20070,
10680x8f42025c, 0x26e40094, 0xaee20060, 0x8f420260,
10690x27450200, 0x24060008, 0xaee20068, 0x24020006,
10700xc00249a, 0xaee20064, 0x3c023b9a, 0x3442ca00,
10710xaee2006c, 0x240203e8, 0x24040002, 0x24030001,
10720xaee20104, 0xaee40100, 0xaee3010c, 0x8f820220,
10730x30420008, 0x10400004, 0x0, 0xaee30108,
10740x8002061, 0x2021, 0xaee40108, 0x2021,
10750x3c030001, 0x641821, 0x90635c30, 0x2e41021,
10760x24840001, 0xa043009c, 0x2c82000f, 0x1440fff8,
10770x0, 0x8f820040, 0x2e41821, 0x24840001,
10780x21702, 0x24420030, 0xa062009c, 0x2e41021,
10790x80022e8, 0xa040009c, 0x24020001, 0x3c010001,
10800x370821, 0xa02283e0, 0x240b0400, 0x24080014,
10810x240a0040, 0x24090001, 0x8f830100, 0x27623000,
10820x24660020, 0xc2102b, 0x50400001, 0x27662800,
10830x8f820108, 0x10c20004, 0x0, 0x8f820104,
10840x14c20007, 0x26e20030, 0x8ee201a8, 0x3821,
10850x24420001, 0xaee201a8, 0x80020a8, 0x8ee201a8,
10860x8ee404b8, 0x8ee504bc, 0xac620008, 0xa46b000e,
10870xac680018, 0xac60001c, 0xac640000, 0xac650004,
10880x8ee204cc, 0xac620010, 0xaf860100, 0x92e204ec,
10890x1440000e, 0x24070001, 0x8ee24e28, 0x24420001,
10900x504a0003, 0x1021, 0x8ee24e28, 0x24420001,
10910xaee24e28, 0x8ee24e28, 0x210c0, 0x24424e38,
10920x2e21021, 0xac480000, 0xac490004, 0x10e0ffd2,
10930x0, 0x80022e8, 0x0, 0x3c020900,
10940xaee05238, 0xaee0523c, 0xaee05240, 0xaee05244,
10950xaee001d0, 0x3c010001, 0x370821, 0xa02083b1,
10960xafa20018, 0x8ee20608, 0x8f430228, 0x24420001,
10970x304a00ff, 0x514300fd, 0xafa00010, 0x8ee20608,
10980x210c0, 0x571021, 0x8fa30018, 0x8fa4001c,
10990xac43060c, 0xac440610, 0x8f830054, 0x8f820054,
11000x24690032, 0x1221023, 0x2c420033, 0x1040006a,
11010x5821, 0x24100008, 0x240f000d, 0x240d0007,
11020x240c0040, 0x240e0001, 0x8f870120, 0x27623800,
11030x24e80020, 0x102102b, 0x50400001, 0x27683000,
11040x8f820128, 0x11020004, 0x0, 0x8f820124,
11050x15020007, 0x1021, 0x8ee201a4, 0x3821,
11060x24420001, 0xaee201a4, 0x800212c, 0x8ee201a4,
11070x8ee40608, 0x420c0, 0x801821, 0x8ee40430,
11080x8ee50434, 0xa32821, 0xa3302b, 0x822021,
11090x862021, 0xace40000, 0xace50004, 0x8ee20608,
11100xa4f0000e, 0xacef0018, 0xacea001c, 0x210c0,
11110x2442060c, 0x2e21021, 0xace20008, 0x8ee204c4,
11120xace20010, 0xaf880120, 0x92e24e20, 0x14400033,
11130x24070001, 0x8ee24e30, 0x210c0, 0x24425038,
11140x2e22021, 0x8c820000, 0x144d001f, 0x0,
11150x8ee34e30, 0x8ee24e34, 0x1062001b, 0x0,
11160x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
11170x8ee34e30, 0x24420001, 0x104c0007, 0x0,
11180x8ee24e34, 0x24420001, 0x10620005, 0x0,
11190x8002119, 0x0, 0x14600005, 0x0,
11200x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
11210x8c820004, 0x2c420011, 0x50400010, 0xac800000,
11220x800212c, 0x0, 0x8ee24e30, 0x24420001,
11230x504c0003, 0x1021, 0x8ee24e30, 0x24420001,
11240xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
11250x2e22021, 0xac8d0000, 0xac8e0004, 0x54e00006,
11260x240b0001, 0x8f820054, 0x1221023, 0x2c420033,
11270x1440ff9d, 0x0, 0x316300ff, 0x24020001,
11280x54620078, 0xafa00010, 0xaeea0608, 0x8f830054,
11290x8f820054, 0x24690032, 0x1221023, 0x2c420033,
11300x10400061, 0x5821, 0x240e0008, 0x240d0011,
11310x240a0012, 0x24080040, 0x240c0001, 0x8f830120,
11320x27623800, 0x24660020, 0xc2102b, 0x50400001,
11330x27663000, 0x8f820128, 0x10c20004, 0x0,
11340x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
11350x3821, 0x24420001, 0xaee201a4, 0x8002198,
11360x8ee201a4, 0x8ee20608, 0xac62001c, 0x8ee404a0,
11370x8ee504a4, 0x2462001c, 0xac620008, 0xa46e000e,
11380xac6d0018, 0xac640000, 0xac650004, 0x8ee204c4,
11390xac620010, 0xaf860120, 0x92e24e20, 0x14400033,
11400x24070001, 0x8ee24e30, 0x210c0, 0x24425038,
11410x2e22021, 0x8c820000, 0x144a001f, 0x0,
11420x8ee34e30, 0x8ee24e34, 0x1062001b, 0x0,
11430x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
11440x8ee34e30, 0x24420001, 0x10480007, 0x0,
11450x8ee24e34, 0x24420001, 0x10620005, 0x0,
11460x8002185, 0x0, 0x14600005, 0x0,
11470x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
11480x8c820004, 0x2c420011, 0x50400010, 0xac800000,
11490x8002198, 0x0, 0x8ee24e30, 0x24420001,
11500x50480003, 0x1021, 0x8ee24e30, 0x24420001,
11510xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
11520x2e22021, 0xac8a0000, 0xac8c0004, 0x54e00006,
11530x240b0001, 0x8f820054, 0x1221023, 0x2c420033,
11540x1440ffa6, 0x0, 0x316300ff, 0x24020001,
11550x10620022, 0x0, 0x3c040001, 0x24845390,
11560xafa00010, 0xafa00014, 0x8f860120, 0x8f870124,
11570x3c050009, 0xc002403, 0x34a5f011, 0x80021c4,
11580x0, 0x3c040001, 0x2484539c, 0xafa00014,
11590x8f860120, 0x8f870124, 0x3c050009, 0xc002403,
11600x34a5f010, 0x80021c4, 0x0, 0x3c040001,
11610x248453a8, 0xafa00014, 0x8ee60608, 0x8f470228,
11620x3c050009, 0xc002403, 0x34a5f00f, 0x8ee201ac,
11630x24420001, 0xaee201ac, 0x8ee201ac, 0x8ee20120,
11640x24420001, 0xaee20120, 0x8ee20120, 0x8ee20168,
11650x24420001, 0xaee20168, 0x80022e8, 0x8ee20168,
11660x8f42025c, 0x26e40094, 0xaee20060, 0x8f420260,
11670x27450200, 0x24060008, 0xc00249a, 0xaee20068,
11680x8f820220, 0x30420008, 0x14400002, 0x24020001,
11690x24020002, 0xaee20108, 0x8ee2011c, 0x24420001,
11700xaee2011c, 0x80022e8, 0x8ee2011c, 0x3c040001,
11710x248453ec, 0xafa00010, 0xafa00014, 0x8fa60020,
11720x3c050003, 0xc002403, 0x34a5f00f, 0x93a20020,
11730x3c030700, 0x34631000, 0x431025, 0xafa20018,
11740x8ee20608, 0x8f430228, 0x24420001, 0x304900ff,
11750x512300e2, 0xafa00010, 0x8ee20608, 0x210c0,
11760x571021, 0x8fa30018, 0x8fa4001c, 0xac43060c,
11770xac440610, 0x8f870120, 0x27623800, 0x24e80020,
11780x102102b, 0x50400001, 0x27683000, 0x8f820128,
11790x11020004, 0x0, 0x8f820124, 0x15020007,
11800x1021, 0x8ee201a4, 0x3821, 0x24420001,
11810xaee201a4, 0x800225d, 0x8ee201a4, 0x8ee40608,
11820x420c0, 0x801821, 0x8ee40430, 0x8ee50434,
11830xa32821, 0xa3302b, 0x822021, 0x862021,
11840xace40000, 0xace50004, 0x8ee30608, 0x24020008,
11850xa4e2000e, 0x2402000d, 0xace20018, 0xace9001c,
11860x318c0, 0x2463060c, 0x2e31021, 0xace20008,
11870x8ee204c4, 0xace20010, 0xaf880120, 0x92e24e20,
11880x14400037, 0x24070001, 0x8ee24e30, 0x210c0,
11890x24425038, 0x2e22021, 0x8c830000, 0x24020007,
11900x1462001f, 0x0, 0x8ee34e30, 0x8ee24e34,
11910x1062001b, 0x24030040, 0x8c820004, 0x24420001,
11920xac820004, 0x8ee24e34, 0x8ee54e30, 0x24420001,
11930x10430007, 0x0, 0x8ee24e34, 0x24420001,
11940x10a20005, 0x0, 0x8002247, 0x0,
11950x14a00005, 0x0, 0x8f820128, 0x24420020,
11960xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
11970x50400013, 0xac800000, 0x800225d, 0x0,
11980x8ee24e30, 0x24030040, 0x24420001, 0x50430003,
11990x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
12000x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
12010x24020007, 0xac820000, 0x24020001, 0xac820004,
12020x54e0000c, 0xaee90608, 0x3c040001, 0x248453f4,
12030xafa00010, 0xafa00014, 0x8ee60608, 0x8f470228,
12040x3c050009, 0xc002403, 0x34a5f000, 0x80022e0,
12050x0, 0x8f830120, 0x27623800, 0x24660020,
12060xc2102b, 0x50400001, 0x27663000, 0x8f820128,
12070x10c20004, 0x0, 0x8f820124, 0x14c20007,
12080x0, 0x8ee201a4, 0x3821, 0x24420001,
12090xaee201a4, 0x80022c4, 0x8ee201a4, 0x8ee20608,
12100xac62001c, 0x8ee404a0, 0x8ee504a4, 0x2462001c,
12110xac620008, 0x24020008, 0xa462000e, 0x24020011,
12120xac620018, 0xac640000, 0xac650004, 0x8ee204c4,
12130xac620010, 0xaf860120, 0x92e24e20, 0x14400037,
12140x24070001, 0x8ee24e30, 0x210c0, 0x24425038,
12150x2e22021, 0x8c830000, 0x24020012, 0x1462001f,
12160x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
12170x24030040, 0x8c820004, 0x24420001, 0xac820004,
12180x8ee24e34, 0x8ee54e30, 0x24420001, 0x10430007,
12190x0, 0x8ee24e34, 0x24420001, 0x10a20005,
12200x0, 0x80022ae, 0x0, 0x14a00005,
12210x0, 0x8f820128, 0x24420020, 0xaf820128,
12220x8f820128, 0x8c820004, 0x2c420011, 0x50400013,
12230xac800000, 0x80022c4, 0x0, 0x8ee24e30,
12240x24030040, 0x24420001, 0x50430003, 0x1021,
12250x8ee24e30, 0x24420001, 0xaee24e30, 0x8ee24e30,
12260x210c0, 0x24425038, 0x2e22021, 0x24020012,
12270xac820000, 0x24020001, 0xac820004, 0x14e0001b,
12280x0, 0x3c040001, 0x248453fc, 0xafa00010,
12290xafa00014, 0x8ee60608, 0x8f470228, 0x3c050009,
12300xc002403, 0x34a5f001, 0x8ee201b0, 0x24420001,
12310xaee201b0, 0x80022e0, 0x8ee201b0, 0x3c040001,
12320x24845408, 0xafa00014, 0x8ee60608, 0x8f470228,
12330x3c050009, 0xc002403, 0x34a5f005, 0x8ee201ac,
12340x24420001, 0xaee201ac, 0x8ee201ac, 0x8ee20150,
12350x24420001, 0xaee20150, 0x8ee20150, 0x8ee20160,
12360x24420001, 0xaee20160, 0x8ee20160, 0x8f43022c,
12370x8f42010c, 0x14620009, 0x24020002, 0xaf820064,
12380x8f820064, 0x14400005, 0x0, 0x8f43022c,
12390x8f42010c, 0x1462f875, 0x0, 0x8fbf0044,
12400x8fb60040, 0x8fb5003c, 0x8fb40038, 0x8fb30034,
12410x8fb20030, 0x8fb1002c, 0x8fb00028, 0x3e00008,
12420x27bd0048, 0x27bdfff8, 0x2408ffff, 0x10a00014,
12430x4821, 0x3c0aedb8, 0x354a8320, 0x90870000,
12440x24840001, 0x3021, 0x1071026, 0x30420001,
12450x10400002, 0x81842, 0x6a1826, 0x604021,
12460x24c60001, 0x2cc20008, 0x1440fff7, 0x73842,
12470x25290001, 0x125102b, 0x1440fff0, 0x0,
12480x1001021, 0x3e00008, 0x27bd0008, 0x27bdffe8,
12490x27642800, 0xafbf0010, 0xc002488, 0x24051000,
12500x24020021, 0xaf800100, 0xaf800104, 0xaf800108,
12510xaf800110, 0xaf800114, 0xaf800118, 0xaf800120,
12520xaf800124, 0xaf800128, 0xaf800130, 0xaf800134,
12530xaf800138, 0xaee04e28, 0xaee04e2c, 0xaee04e30,
12540xaee04e34, 0xaf82011c, 0x8f420218, 0x30420040,
12550x10400004, 0x0, 0x8f82011c, 0x34420004,
12560xaf82011c, 0x8fbf0010, 0x3e00008, 0x27bd0018,
12570x27bdffe0, 0xafbf0018, 0x8f820104, 0xafa20010,
12580x8f820100, 0x3c050002, 0xafa20014, 0x8f8600b0,
12590x8f87011c, 0x3c040001, 0x248454c0, 0xc002403,
12600x34a5f000, 0x8f8300b0, 0x3c027f00, 0x621824,
12610x3c020400, 0x10620029, 0x43102b, 0x14400008,
12620x3c022000, 0x3c020100, 0x10620024, 0x3c020200,
12630x10620011, 0x0, 0x8002374, 0x0,
12640x10620008, 0x3c024000, 0x1462001c, 0x0,
12650x8ee20190, 0x24420001, 0xaee20190, 0x8002374,
12660x8ee20190, 0x8ee2018c, 0x24420001, 0xaee2018c,
12670x8002374, 0x8ee2018c, 0x8f82011c, 0x34420002,
12680xaf82011c, 0x8f830104, 0x8f8200b0, 0x34420001,
12690xaf8200b0, 0xaf830104, 0x8f82011c, 0x2403fffd,
12700x431024, 0xaf82011c, 0x8ee201a0, 0x24420001,
12710xaee201a0, 0x8002377, 0x8ee201a0, 0x8f8200b0,
12720x34420001, 0xaf8200b0, 0x8fbf0018, 0x3e00008,
12730x27bd0020, 0x27bdffe0, 0xafbf001c, 0xafb00018,
12740x8f820120, 0xafa20010, 0x8f820124, 0x3c050001,
12750xafa20014, 0x8f8600a0, 0x8f87011c, 0x3c040001,
12760x248454cc, 0xc002403, 0x34a5f000, 0x8f8300a0,
12770x3c027f00, 0x621824, 0x3c020400, 0x10620053,
12780x8021, 0x43102b, 0x14400008, 0x3c042000,
12790x3c020100, 0x1062004d, 0x3c020200, 0x1062003a,
12800x0, 0x80023e0, 0x0, 0x10640003,
12810x3c024000, 0x14620045, 0x0, 0x8f8200a0,
12820x441024, 0x10400006, 0x0, 0x8ee20194,
12830x24420001, 0xaee20194, 0x80023a9, 0x8ee20194,
12840x8ee20198, 0x24420001, 0xaee20198, 0x8ee20198,
12850x8f82011c, 0x34420002, 0xaf82011c, 0x8f82011c,
12860x30420200, 0x1040001b, 0x0, 0x8f8300a0,
12870x8f840124, 0x8f8200ac, 0x14400007, 0x24020001,
12880x3c020001, 0x3442f000, 0x621024, 0x50400001,
12890x24100001, 0x24020001, 0x1200000d, 0xaf8200a0,
12900x8f820124, 0x2442ffe0, 0xaf820124, 0x8f820124,
12910x8f820124, 0x27633000, 0x43102b, 0x10400005,
12920x276237e0, 0xaf820124, 0x80023ca, 0x0,
12930xaf840124, 0x8f82011c, 0x2403fffd, 0x431024,
12940x80023e3, 0xaf82011c, 0x8f82011c, 0x34420002,
12950xaf82011c, 0x8f830124, 0x8f8200a0, 0x34420001,
12960xaf8200a0, 0xaf830124, 0x8f82011c, 0x2403fffd,
12970x431024, 0xaf82011c, 0x8ee2019c, 0x24420001,
12980xaee2019c, 0x80023e3, 0x8ee2019c, 0x8f8200a0,
12990x34420001, 0xaf8200a0, 0x8fbf001c, 0x8fb00018,
13000x3e00008, 0x27bd0020, 0x0, 0x3c020001,
13010x8c425c58, 0x27bdffe8, 0xafbf0014, 0x14400012,
13020xafb00010, 0x3c100001, 0x26105dd0, 0x2002021,
13030xc002488, 0x24052000, 0x26021fe0, 0x3c010001,
13040xac225d94, 0x3c010001, 0xac225d90, 0xaf420250,
13050x24022000, 0xaf500254, 0xaf420258, 0x24020001,
13060x3c010001, 0xac225c58, 0x8fbf0014, 0x8fb00010,
13070x3e00008, 0x27bd0018, 0x3c030001, 0x8c635d94,
13080x8c820000, 0x8fa80010, 0x8fa90014, 0xac620000,
13090x3c020001, 0x8c425d94, 0x8c830004, 0xac430004,
13100xac450008, 0x8f840054, 0x2443ffe0, 0xac460010,
13110xac470014, 0xac480018, 0xac49001c, 0x3c010001,
13120xac235d94, 0xac44000c, 0x3c020001, 0x24425dd0,
13130x62182b, 0x10600005, 0x0, 0x3c020001,
13140x8c425d90, 0x3c010001, 0xac225d94, 0x3c030001,
13150x8c635d94, 0x3c020001, 0x8c425c40, 0xac620000,
13160x3c030001, 0x8c635d94, 0x3c020001, 0x8c425c40,
13170xac620004, 0x3e00008, 0xaf430250, 0x3c030001,
13180x8c635d94, 0x3c020001, 0x8c425c40, 0x27bdffd0,
13190xafb40020, 0x8fb40040, 0xafb00010, 0x808021,
13200xafb50024, 0x8fb50044, 0x8fa40048, 0xafb10014,
13210xa08821, 0xafbf0028, 0xafb3001c, 0xafb20018,
13220xac620000, 0x3c050001, 0x8ca55d94, 0x3c020001,
13230x8c425c40, 0xc09021, 0xe09821, 0x10800006,
13240xaca20004, 0x24a50008, 0xc002490, 0x24060018,
13250x800244e, 0x0, 0x24a40008, 0xc002488,
13260x24050018, 0x3c020001, 0x8c425d94, 0x3c050001,
13270x24a55dd0, 0x2442ffe0, 0x3c010001, 0xac225d94,
13280x45102b, 0x10400005, 0x0, 0x3c020001,
13290x8c425d90, 0x3c010001, 0xac225d94, 0x3c030001,
13300x8c635d94, 0x8e020000, 0xac620000, 0x3c030001,
13310x8c635d94, 0x8e020004, 0xac620004, 0xac710008,
13320x8f840054, 0x2462ffe0, 0x3c010001, 0xac225d94,
13330x45102b, 0xac720010, 0xac730014, 0xac740018,
13340xac75001c, 0x10400005, 0xac64000c, 0x3c020001,
13350x8c425d90, 0x3c010001, 0xac225d94, 0x3c030001,
13360x8c635d94, 0x3c020001, 0x8c425c40, 0xac620000,
13370x3c030001, 0x8c635d94, 0x3c020001, 0x8c425c40,
13380xac620004, 0xaf430250, 0x8fbf0028, 0x8fb50024,
13390x8fb40020, 0x8fb3001c, 0x8fb20018, 0x8fb10014,
13400x8fb00010, 0x3e00008, 0x27bd0030, 0x10a00005,
13410x0, 0xac800000, 0x24a5fffc, 0x14a0fffd,
13420x24840004, 0x3e00008, 0x0, 0x10c00007,
13430x0, 0x8c820000, 0x24840004, 0x24c6fffc,
13440xaca20000, 0x14c0fffb, 0x24a50004, 0x3e00008,
13450x0, 0x10c00007, 0x0, 0x8ca20000,
13460x24a50004, 0x24c6fffc, 0xac820000, 0x14c0fffb,
13470x24840004, 0x3e00008, 0x0, 0x3e00008,
13480x0, 0x27bdffd8, 0xafbf0020, 0x8ee304e4,
13490x8ee204e0, 0x10620436, 0x0, 0x8ee204e4,
13500x8ee304fc, 0x21100, 0x626021, 0x95870008,
13510x8d8a0000, 0x8d8b0004, 0x958d000a, 0x8ee2725c,
13520x8ee3726c, 0x30e4ffff, 0x441021, 0x62182b,
13530x10600015, 0x31a20004, 0x8f8200d8, 0x8ee37258,
13540x431023, 0xaee2726c, 0x8ee2726c, 0x1c400003,
13550x3c030001, 0x431021, 0xaee2726c, 0x8ee2725c,
13560x8ee3726c, 0x441021, 0x62182b, 0x10600006,
13570x31a20004, 0x8ee201b8, 0x24420001, 0xaee201b8,
13580x80028e1, 0x8ee201b8, 0x10400240, 0x31a20200,
13590x1040014d, 0x4821, 0x96e2045a, 0x30420010,
13600x10400149, 0x0, 0x8f840100, 0x27623000,
13610x24850020, 0xa2102b, 0x50400001, 0x27652800,
13620x8f820108, 0x10a20004, 0x0, 0x8f820104,
13630x14a20006, 0x2402000c, 0x8ee201a8, 0x24420001,
13640xaee201a8, 0x800252c, 0x8ee201a8, 0xac8a0000,
13650xac8b0004, 0x8ee37264, 0x24060005, 0xa482000e,
13660xac860018, 0xac830008, 0x8ee204e4, 0xac82001c,
13670x8ee204c8, 0xac820010, 0xaf850100, 0x92e204ec,
13680x14400036, 0x24090001, 0x8ee24e28, 0x210c0,
13690x24424e38, 0x2e22021, 0x8c820000, 0x1446001f,
13700x0, 0x8ee34e28, 0x8ee24e2c, 0x1062001b,
13710x24030040, 0x8c820004, 0x24420001, 0xac820004,
13720x8ee24e2c, 0x8ee54e28, 0x24420001, 0x10430007,
13730x0, 0x8ee24e2c, 0x24420001, 0x10a20005,
13740x0, 0x8002516, 0x0, 0x14a00005,
13750x0, 0x8f820108, 0x24420020, 0xaf820108,
13760x8f820108, 0x8c820004, 0x2c420011, 0x50400013,
13770xac800000, 0x800252c, 0x0, 0x8ee24e28,
13780x24030040, 0x24420001, 0x50430003, 0x1021,
13790x8ee24e28, 0x24420001, 0xaee24e28, 0x8ee24e28,
13800x210c0, 0x24424e38, 0x2e22021, 0x24020005,
13810xac820000, 0x24020001, 0xac820004, 0x1520000a,
13820x3c040001, 0xafab0010, 0x8ee27264, 0x3c040001,
13830x24845730, 0x3c050004, 0xafa20014, 0x8ee604e4,
13840x80028be, 0x34a5f114, 0x8ee27264, 0x34843800,
13850x3641821, 0x24420010, 0x43102b, 0x14400073,
13860x0, 0x8ee27264, 0x24480010, 0x3641021,
13870x102102b, 0x14400002, 0x3c02ffff, 0x1024021,
13880x8f850100, 0x27623000, 0x24a60020, 0xc2102b,
13890x50400001, 0x27662800, 0x8f820108, 0x10c20004,
13900x0, 0x8f820104, 0x14c20007, 0x2563000c,
13910x8ee201a8, 0x4821, 0x24420001, 0xaee201a8,
13920x80025a0, 0x8ee201a8, 0x2c64000c, 0x1441021,
13930xaca20000, 0xaca30004, 0x24e2fff4, 0xa4a2000e,
13940x24020006, 0xaca80008, 0xaca20018, 0x8ee204e4,
13950xaca2001c, 0x8ee204c8, 0x3c030002, 0x431025,
13960xaca20010, 0xaf860100, 0x92e204ec, 0x14400037,
13970x24090001, 0x8ee24e28, 0x210c0, 0x24424e38,
13980x2e22021, 0x8c830000, 0x24020005, 0x1462001f,
13990x0, 0x8ee34e28, 0x8ee24e2c, 0x1062001b,
14000x24030040, 0x8c820004, 0x24420001, 0xac820004,
14010x8ee24e2c, 0x8ee54e28, 0x24420001, 0x10430007,
14020x0, 0x8ee24e2c, 0x24420001, 0x10a20005,
14030x0, 0x800258a, 0x0, 0x14a00005,
14040x0, 0x8f820108, 0x24420020, 0xaf820108,
14050x8f820108, 0x8c820004, 0x2c420011, 0x50400013,
14060xac800000, 0x80025a0, 0x0, 0x8ee24e28,
14070x24030040, 0x24420001, 0x50430003, 0x1021,
14080x8ee24e28, 0x24420001, 0xaee24e28, 0x8ee24e28,
14090x210c0, 0x24424e38, 0x2e22021, 0x24020005,
14100xac820000, 0x24020001, 0xac820004, 0x1520000a,
14110x2508fffc, 0xafab0010, 0x8ee27264, 0x3c040001,
14120x24845730, 0x3c050004, 0xafa20014, 0x8ee604e4,
14130x80028be, 0x34a5f125, 0x34028100, 0xa5020000,
14140x9582000e, 0x800261d, 0xa5020002, 0x8f850100,
14150x27623000, 0x24a60020, 0xc2102b, 0x50400001,
14160x27662800, 0x8f820108, 0x10c20004, 0x0,
14170x8f820104, 0x14c20007, 0x2563000c, 0x8ee201a8,
14180x4821, 0x24420001, 0xaee201a8, 0x800260d,
14190x8ee201a8, 0x2c64000c, 0x1441021, 0xaca20000,
14200xaca30004, 0x8ee37264, 0x24e2fff4, 0xa4a2000e,
14210x24020006, 0xaca20018, 0x24630010, 0xaca30008,
14220x8ee204e4, 0xaca2001c, 0x8ee204c8, 0x3c030002,
14230x431025, 0xaca20010, 0xaf860100, 0x92e204ec,
14240x14400037, 0x24090001, 0x8ee24e28, 0x210c0,
14250x24424e38, 0x2e22021, 0x8c830000, 0x24020005,
14260x1462001f, 0x0, 0x8ee34e28, 0x8ee24e2c,
14270x1062001b, 0x24030040, 0x8c820004, 0x24420001,
14280xac820004, 0x8ee24e2c, 0x8ee54e28, 0x24420001,
14290x10430007, 0x0, 0x8ee24e2c, 0x24420001,
14300x10a20005, 0x0, 0x80025f7, 0x0,
14310x14a00005, 0x0, 0x8f820108, 0x24420020,
14320xaf820108, 0x8f820108, 0x8c820004, 0x2c420011,
14330x50400013, 0xac800000, 0x800260d, 0x0,
14340x8ee24e28, 0x24030040, 0x24420001, 0x50430003,
14350x1021, 0x8ee24e28, 0x24420001, 0xaee24e28,
14360x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
14370x24020005, 0xac820000, 0x24020001, 0xac820004,
14380x1520000a, 0x34028100, 0xafab0010, 0x8ee27264,
14390x3c040001, 0x24845730, 0x3c050004, 0xafa20014,
14400x8ee604e4, 0x80028be, 0x34a5f015, 0x8ee37264,
14410xa462000c, 0x8ee37264, 0x9582000e, 0xa462000e,
14420x8002681, 0x24e70004, 0x8f840100, 0x27623000,
14430x24850020, 0xa2102b, 0x50400001, 0x27652800,
14440x8f820108, 0x10a20004, 0x0, 0x8f820104,
14450x14a20007, 0x24020006, 0x8ee201a8, 0x4821,
14460x24420001, 0xaee201a8, 0x8002677, 0x8ee201a8,
14470xac8a0000, 0xac8b0004, 0x8ee37264, 0xa487000e,
14480xac820018, 0xac830008, 0x8ee204e4, 0xac82001c,
14490x8ee204c8, 0x3c030002, 0x431025, 0xac820010,
14500xaf850100, 0x92e204ec, 0x14400037, 0x24090001,
14510x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
14520x8c830000, 0x24020005, 0x1462001f, 0x0,
14530x8ee34e28, 0x8ee24e2c, 0x1062001b, 0x24030040,
14540x8c820004, 0x24420001, 0xac820004, 0x8ee24e2c,
14550x8ee54e28, 0x24420001, 0x10430007, 0x0,
14560x8ee24e2c, 0x24420001, 0x10a20005, 0x0,
14570x8002661, 0x0, 0x14a00005, 0x0,
14580x8f820108, 0x24420020, 0xaf820108, 0x8f820108,
14590x8c820004, 0x2c420011, 0x50400013, 0xac800000,
14600x8002677, 0x0, 0x8ee24e28, 0x24030040,
14610x24420001, 0x50430003, 0x1021, 0x8ee24e28,
14620x24420001, 0xaee24e28, 0x8ee24e28, 0x210c0,
14630x24424e38, 0x2e22021, 0x24020005, 0xac820000,
14640x24020001, 0xac820004, 0x15200009, 0x3c050004,
14650xafab0010, 0x8ee27264, 0x3c040001, 0x24845730,
14660xafa20014, 0x8ee604e4, 0x80028be, 0x34a5f004,
14670x8ee2725c, 0x30e7ffff, 0x471021, 0xaee2725c,
14680x8ee204e4, 0x8ee304fc, 0x8ee47258, 0x21100,
14690x431021, 0xac44000c, 0x8ee27258, 0xafa20018,
14700x8ee3725c, 0xafa3001c, 0x8ee2725c, 0x2c42003c,
14710x10400004, 0x24620001, 0x2403fffe, 0x431024,
14720xafa2001c, 0x8ee27264, 0x3c060001, 0x34c63800,
14730x8ee3725c, 0x2405fff8, 0x471021, 0x24420007,
14740x451024, 0x24630007, 0xaee27258, 0x8ee2726c,
14750x8ee47258, 0x651824, 0x431023, 0xaee2726c,
14760x3661021, 0x82202b, 0x14800004, 0x3c03ffff,
14770x8ee27258, 0x431021, 0xaee27258, 0x8ee27258,
14780xaee27264, 0x8f8200f0, 0x24470008, 0x27621800,
14790xe2102b, 0x50400001, 0x27671000, 0x8f8200f4,
14800x14e20007, 0x0, 0x8ee201b4, 0x4821,
14810x24420001, 0xaee201b4, 0x80026c4, 0x8ee201b4,
14820x8f8200f0, 0x24090001, 0x8fa30018, 0x8fa4001c,
14830xac430000, 0xac440004, 0xaf8700f0, 0x15200012,
14840xd1142, 0x8f8200f0, 0xafa20010, 0x8f8200f4,
14850x3c040001, 0x2484573c, 0xafa20014, 0x8fa60018,
14860x8fa7001c, 0x3c050004, 0xc002403, 0x34a5f005,
14870x8ee20088, 0x24420001, 0xaee20088, 0x8ee20088,
14880x80028d3, 0xaee0725c, 0x30430003, 0x24020002,
14890x10620016, 0x28620003, 0x10400005, 0x24020001,
14900x10620008, 0x0, 0x8002703, 0x0,
14910x24020003, 0x10620017, 0x0, 0x8002703,
14920x0, 0x8ee200e8, 0x8ee300ec, 0x24630001,
14930x2c640001, 0x441021, 0xaee200e8, 0xaee300ec,
14940x8ee200e8, 0x8002703, 0x8ee300ec, 0x8ee200f0,
14950x8ee300f4, 0x24630001, 0x2c640001, 0x441021,
14960xaee200f0, 0xaee300f4, 0x8ee200f0, 0x8002703,
14970x8ee300f4, 0x8ee200f8, 0x8ee300fc, 0x24630001,
14980x2c640001, 0x441021, 0xaee200f8, 0xaee300fc,
14990x8ee200f8, 0x8ee300fc, 0x8ee2725c, 0x8ee400e0,
15000x8ee500e4, 0x401821, 0x1021, 0xa32821,
15010xa3302b, 0x822021, 0x862021, 0xaee400e0,
15020xaee500e4, 0x80028d3, 0xaee0725c, 0x30e2ffff,
15030x104001c1, 0x31a20200, 0x1040014d, 0x4821,
15040x96e2045a, 0x30420010, 0x10400149, 0x0,
15050x8f840100, 0x27623000, 0x24850020, 0xa2102b,
15060x50400001, 0x27652800, 0x8f820108, 0x10a20004,
15070x0, 0x8f820104, 0x14a20006, 0x2402000c,
15080x8ee201a8, 0x24420001, 0xaee201a8, 0x800276e,
15090x8ee201a8, 0xac8a0000, 0xac8b0004, 0x8ee37264,
15100x24060005, 0xa482000e, 0xac860018, 0xac830008,
15110x8ee204e4, 0xac82001c, 0x8ee204c8, 0xac820010,
15120xaf850100, 0x92e204ec, 0x14400036, 0x24090001,
15130x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
15140x8c820000, 0x1446001f, 0x0, 0x8ee34e28,
15150x8ee24e2c, 0x1062001b, 0x24030040, 0x8c820004,
15160x24420001, 0xac820004, 0x8ee24e2c, 0x8ee54e28,
15170x24420001, 0x10430007, 0x0, 0x8ee24e2c,
15180x24420001, 0x10a20005, 0x0, 0x8002758,
15190x0, 0x14a00005, 0x0, 0x8f820108,
15200x24420020, 0xaf820108, 0x8f820108, 0x8c820004,
15210x2c420011, 0x50400013, 0xac800000, 0x800276e,
15220x0, 0x8ee24e28, 0x24030040, 0x24420001,
15230x50430003, 0x1021, 0x8ee24e28, 0x24420001,
15240xaee24e28, 0x8ee24e28, 0x210c0, 0x24424e38,
15250x2e22021, 0x24020005, 0xac820000, 0x24020001,
15260xac820004, 0x1520000a, 0x3c040001, 0xafab0010,
15270x8ee27264, 0x3c040001, 0x24845730, 0x3c050004,
15280xafa20014, 0x8ee604e4, 0x80028be, 0x34a5f014,
15290x8ee27264, 0x34843800, 0x3641821, 0x24420010,
15300x43102b, 0x14400073, 0x0, 0x8ee27264,
15310x24480010, 0x3641021, 0x102102b, 0x14400002,
15320x3c02ffff, 0x1024021, 0x8f850100, 0x27623000,
15330x24a60020, 0xc2102b, 0x50400001, 0x27662800,
15340x8f820108, 0x10c20004, 0x0, 0x8f820104,
15350x14c20007, 0x2563000c, 0x8ee201a8, 0x4821,
15360x24420001, 0xaee201a8, 0x80027e2, 0x8ee201a8,
15370x2c64000c, 0x1441021, 0xaca20000, 0xaca30004,
15380x24e2fff4, 0xa4a2000e, 0x24020006, 0xaca80008,
15390xaca20018, 0x8ee204e4, 0xaca2001c, 0x8ee204c8,
15400x3c030002, 0x431025, 0xaca20010, 0xaf860100,
15410x92e204ec, 0x14400037, 0x24090001, 0x8ee24e28,
15420x210c0, 0x24424e38, 0x2e22021, 0x8c830000,
15430x24020005, 0x1462001f, 0x0, 0x8ee34e28,
15440x8ee24e2c, 0x1062001b, 0x24030040, 0x8c820004,
15450x24420001, 0xac820004, 0x8ee24e2c, 0x8ee54e28,
15460x24420001, 0x10430007, 0x0, 0x8ee24e2c,
15470x24420001, 0x10a20005, 0x0, 0x80027cc,
15480x0, 0x14a00005, 0x0, 0x8f820108,
15490x24420020, 0xaf820108, 0x8f820108, 0x8c820004,
15500x2c420011, 0x50400013, 0xac800000, 0x80027e2,
15510x0, 0x8ee24e28, 0x24030040, 0x24420001,
15520x50430003, 0x1021, 0x8ee24e28, 0x24420001,
15530xaee24e28, 0x8ee24e28, 0x210c0, 0x24424e38,
15540x2e22021, 0x24020005, 0xac820000, 0x24020001,
15550xac820004, 0x1520000a, 0x2508fffc, 0xafab0010,
15560x8ee27264, 0x3c040001, 0x24845730, 0x3c050004,
15570xafa20014, 0x8ee604e4, 0x80028be, 0x34a5f015,
15580x34028100, 0xa5020000, 0x9582000e, 0x800285f,
15590xa5020002, 0x8f850100, 0x27623000, 0x24a60020,
15600xc2102b, 0x50400001, 0x27662800, 0x8f820108,
15610x10c20004, 0x0, 0x8f820104, 0x14c20007,
15620x2563000c, 0x8ee201a8, 0x4821, 0x24420001,
15630xaee201a8, 0x800284f, 0x8ee201a8, 0x2c64000c,
15640x1441021, 0xaca20000, 0xaca30004, 0x8ee37264,
15650x24e2fff4, 0xa4a2000e, 0x24020006, 0xaca20018,
15660x24630010, 0xaca30008, 0x8ee204e4, 0xaca2001c,
15670x8ee204c8, 0x3c030002, 0x431025, 0xaca20010,
15680xaf860100, 0x92e204ec, 0x14400037, 0x24090001,
15690x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
15700x8c830000, 0x24020005, 0x1462001f, 0x0,
15710x8ee34e28, 0x8ee24e2c, 0x1062001b, 0x24030040,
15720x8c820004, 0x24420001, 0xac820004, 0x8ee24e2c,
15730x8ee54e28, 0x24420001, 0x10430007, 0x0,
15740x8ee24e2c, 0x24420001, 0x10a20005, 0x0,
15750x8002839, 0x0, 0x14a00005, 0x0,
15760x8f820108, 0x24420020, 0xaf820108, 0x8f820108,
15770x8c820004, 0x2c420011, 0x50400013, 0xac800000,
15780x800284f, 0x0, 0x8ee24e28, 0x24030040,
15790x24420001, 0x50430003, 0x1021, 0x8ee24e28,
15800x24420001, 0xaee24e28, 0x8ee24e28, 0x210c0,
15810x24424e38, 0x2e22021, 0x24020005, 0xac820000,
15820x24020001, 0xac820004, 0x1520000a, 0x34028100,
15830xafab0010, 0x8ee27264, 0x3c040001, 0x24845730,
15840x3c050004, 0xafa20014, 0x8ee604e4, 0x80028be,
15850x34a5f016, 0x8ee37264, 0xa462000c, 0x8ee37264,
15860x9582000e, 0xa462000e, 0x80028c2, 0x24e70004,
15870x8f830100, 0x27623000, 0x24640020, 0x82102b,
15880x50400001, 0x27642800, 0x8f820108, 0x10820004,
15890x0, 0x8f820104, 0x14820007, 0x24050005,
15900x8ee201a8, 0x4821, 0x24420001, 0xaee201a8,
15910x80028b6, 0x8ee201a8, 0xac6a0000, 0xac6b0004,
15920x8ee27264, 0xa467000e, 0xac650018, 0xac620008,
15930x8ee204e4, 0xac62001c, 0x8ee204c8, 0xac620010,
15940xaf840100, 0x92e204ec, 0x14400036, 0x24090001,
15950x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
15960x8c820000, 0x1445001f, 0x0, 0x8ee34e28,
15970x8ee24e2c, 0x1062001b, 0x24030040, 0x8c820004,
15980x24420001, 0xac820004, 0x8ee24e2c, 0x8ee54e28,
15990x24420001, 0x10430007, 0x0, 0x8ee24e2c,
16000x24420001, 0x10a20005, 0x0, 0x80028a0,
16010x0, 0x14a00005, 0x0, 0x8f820108,
16020x24420020, 0xaf820108, 0x8f820108, 0x8c820004,
16030x2c420011, 0x50400013, 0xac800000, 0x80028b6,
16040x0, 0x8ee24e28, 0x24030040, 0x24420001,
16050x50430003, 0x1021, 0x8ee24e28, 0x24420001,
16060xaee24e28, 0x8ee24e28, 0x210c0, 0x24424e38,
16070x2e22021, 0x24020005, 0xac820000, 0x24020001,
16080xac820004, 0x1520000b, 0x3c050004, 0x3c040001,
16090x24845748, 0xafab0010, 0xafa00014, 0x8ee604e4,
16100x34a5f017, 0xc002403, 0x30e7ffff, 0x80028e1,
16110x0, 0x8ee27264, 0x3c050001, 0x30e4ffff,
16120x441021, 0xaee27264, 0x8ee2725c, 0x8ee37264,
16130x34a53800, 0x441021, 0xaee2725c, 0x3651021,
16140x62182b, 0x14600004, 0x3c03ffff, 0x8ee27264,
16150x431021, 0xaee27264, 0x8ee304e4, 0x96e20458,
16160x24630001, 0x2442ffff, 0x621824, 0xaee304e4,
16170x8ee304e4, 0x8ee204e0, 0x14620005, 0x0,
16180x8f820060, 0x2403fff7, 0x431024, 0xaf820060,
16190x8fbf0020, 0x3e00008, 0x27bd0028, 0x27bdffe0,
16200xafbf0018, 0x8ee304e8, 0x8ee204e0, 0x10620189,
16210x0, 0x8ee204e8, 0x8ee304fc, 0x21100,
16220x621821, 0x94670008, 0x92e204ed, 0x8c680000,
16230x8c690004, 0x10400023, 0x946a000a, 0x8ee204c8,
16240x34460400, 0x31420200, 0x1040001f, 0x0,
16250x96e2045a, 0x30420010, 0x1040001b, 0x3c028000,
16260x3c010001, 0x370821, 0xac2283d8, 0x8ee27264,
16270x9464000e, 0x3c050001, 0x34a53800, 0x24420004,
16280xaee27264, 0x8ee37264, 0x42400, 0x3651021,
16290x3c010001, 0x370821, 0xac2483dc, 0x62182b,
16300x14600005, 0x24e70004, 0x8ee27264, 0x3c03ffff,
16310x431021, 0xaee27264, 0x8ee27264, 0x8002917,
16320xaee27258, 0x8ee604c8, 0x8ee2726c, 0x30e4ffff,
16330x44102a, 0x10400015, 0x0, 0x8f8200d8,
16340x8ee37258, 0x431023, 0xaee2726c, 0x8ee2726c,
16350x1c400007, 0x44102a, 0x8ee2726c, 0x3c030001,
16360x431021, 0xaee2726c, 0x8ee2726c, 0x44102a,
16370x10400006, 0x0, 0x8ee201b8, 0x24420001,
16380xaee201b8, 0x8002a72, 0x8ee201b8, 0x3c020001,
16390x571021, 0x8c4283d8, 0x54400001, 0x24e7fffc,
16400x31420004, 0x104000b9, 0x30e2ffff, 0x3c020001,
16410x571021, 0x8c4283d8, 0x1040002f, 0x5021,
16420x8f840100, 0x27623000, 0x24850020, 0xa2102b,
16430x50400001, 0x27652800, 0x8f820108, 0x10a20032,
16440x0, 0x8f820104, 0x10a2002f, 0x24020015,
16450xac880000, 0xac890004, 0x8ee37264, 0xa487000e,
16460xac820018, 0xac830008, 0x8ee204e8, 0x3c030001,
16470x771821, 0x8c6383dc, 0xac860010, 0x431025,
16480xac82001c, 0xaf850100, 0x92e204ec, 0x14400066,
16490x240a0001, 0x8ee24e28, 0x24030040, 0x24420001,
16500x50430003, 0x1021, 0x8ee24e28, 0x24420001,
16510xaee24e28, 0x8ee24e28, 0x210c0, 0x24424e38,
16520x2e21821, 0x24020015, 0xac620000, 0x24020001,
16530x80029bf, 0xac620004, 0x8f840100, 0x27623000,
16540x24850020, 0xa2102b, 0x50400001, 0x27652800,
16550x8f820108, 0x10a20004, 0x0, 0x8f820104,
16560x14a20006, 0x24020006, 0x8ee201a8, 0x24420001,
16570xaee201a8, 0x80029bf, 0x8ee201a8, 0xac880000,
16580xac890004, 0x8ee37264, 0xa487000e, 0xac820018,
16590xac830008, 0x8ee204e8, 0xac860010, 0xac82001c,
16600xaf850100, 0x92e204ec, 0x14400037, 0x240a0001,
16610x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
16620x8c830000, 0x24020005, 0x1462001f, 0x0,
16630x8ee34e28, 0x8ee24e2c, 0x1062001b, 0x24030040,
16640x8c820004, 0x24420001, 0xac820004, 0x8ee24e2c,
16650x8ee54e28, 0x24420001, 0x10430007, 0x0,
16660x8ee24e2c, 0x24420001, 0x10a20005, 0x0,
16670x80029a9, 0x0, 0x14a00005, 0x0,
16680x8f820108, 0x24420020, 0xaf820108, 0x8f820108,
16690x8c820004, 0x2c420011, 0x50400013, 0xac800000,
16700x80029bf, 0x0, 0x8ee24e28, 0x24030040,
16710x24420001, 0x50430003, 0x1021, 0x8ee24e28,
16720x24420001, 0xaee24e28, 0x8ee24e28, 0x210c0,
16730x24424e38, 0x2e22021, 0x24020005, 0xac820000,
16740x24020001, 0xac820004, 0x1540000a, 0x24020001,
16750xafa90010, 0x8ee27264, 0x3c040001, 0x24845730,
16760x3c050004, 0xafa20014, 0x8ee604e4, 0x8002a4f,
16770x34a5f204, 0xa2e204ed, 0x8ee204e8, 0x8ee304fc,
16780x8ee47258, 0x3c060001, 0x34c63800, 0x3c010001,
16790x370821, 0xac2083d8, 0x3c010001, 0x370821,
16800xac2083dc, 0x21100, 0x431021, 0xac44000c,
16810x8ee27264, 0x2405fff8, 0x30e3ffff, 0x431021,
16820x24420007, 0x451024, 0x24630007, 0xaee27258,
16830x8ee2726c, 0x8ee47258, 0x651824, 0x431023,
16840xaee2726c, 0x3661021, 0x82202b, 0x14800004,
16850x3c03ffff, 0x8ee27258, 0x431021, 0xaee27258,
16860x8ee27258, 0x8002a64, 0xaee27264, 0x10400073,
16870x0, 0x8f830100, 0x27623000, 0x24640020,
16880x82102b, 0x14400002, 0x5021, 0x27642800,
16890x8f820108, 0x10820004, 0x0, 0x8f820104,
16900x14820006, 0x24050005, 0x8ee201a8, 0x24420001,
16910xaee201a8, 0x8002a46, 0x8ee201a8, 0xac680000,
16920xac690004, 0x8ee27264, 0xa467000e, 0xac650018,
16930xac620008, 0x8ee204e8, 0xac660010, 0xac62001c,
16940xaf840100, 0x92e204ec, 0x14400036, 0x240a0001,
16950x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
16960x8c820000, 0x1445001f, 0x0, 0x8ee34e28,
16970x8ee24e2c, 0x1062001b, 0x24030040, 0x8c820004,
16980x24420001, 0xac820004, 0x8ee24e2c, 0x8ee54e28,
16990x24420001, 0x10430007, 0x0, 0x8ee24e2c,
17000x24420001, 0x10a20005, 0x0, 0x8002a30,
17010x0, 0x14a00005, 0x0, 0x8f820108,
17020x24420020, 0xaf820108, 0x8f820108, 0x8c820004,
17030x2c420011, 0x50400013, 0xac800000, 0x8002a46,
17040x0, 0x8ee24e28, 0x24030040, 0x24420001,
17050x50430003, 0x1021, 0x8ee24e28, 0x24420001,
17060xaee24e28, 0x8ee24e28, 0x210c0, 0x24424e38,
17070x2e22021, 0x24020005, 0xac820000, 0x24020001,
17080xac820004, 0x1540000c, 0x30e5ffff, 0x3c040001,
17090x24845748, 0x3c050004, 0xafa90010, 0xafa00014,
17100x8ee604e4, 0x34a5f237, 0xc002403, 0x30e7ffff,
17110x8002a72, 0x0, 0x8ee27264, 0x451021,
17120xaee27264, 0x8ee2726c, 0x8ee37264, 0x3c040001,
17130x34843800, 0xa2e004ed, 0x451023, 0xaee2726c,
17140x3641021, 0x62182b, 0x14600004, 0x3c03ffff,
17150x8ee27264, 0x431021, 0xaee27264, 0x8ee304e8,
17160x96e20458, 0x24630001, 0x2442ffff, 0x621824,
17170xaee304e8, 0x8ee304e8, 0x8ee204e0, 0x14620005,
17180x0, 0x8f820060, 0x2403fff7, 0x431024,
17190xaf820060, 0x8fbf0018, 0x3e00008, 0x27bd0020,
17200x27bdffe0, 0xafbf001c, 0xafb00018, 0x8f820100,
17210x8ee34e2c, 0x8f820104, 0x8f850108, 0x24020040,
17220x24630001, 0x50620003, 0x1021, 0x8ee24e2c,
17230x24420001, 0xaee24e2c, 0x8ee24e2c, 0x8ee34e2c,
17240x210c0, 0x24424e38, 0x2e22021, 0x8ee24e28,
17250x8c870004, 0x14620007, 0xa03021, 0x8f820108,
17260x24420020, 0xaf820108, 0x8f820108, 0x8002aa2,
17270xac800000, 0x8ee24e2c, 0x24030040, 0x24420001,
17280x50430003, 0x1021, 0x8ee24e2c, 0x24420001,
17290x210c0, 0x24424e38, 0x2e22021, 0x8c820004,
17300x8f830108, 0x21140, 0x621821, 0xaf830108,
17310xac800000, 0x8cc20018, 0x2443fffe, 0x2c620013,
17320x104000c1, 0x31080, 0x3c010001, 0x220821,
17330x8c225770, 0x400008, 0x0, 0x8ee204f0,
17340x471021, 0xaee204f0, 0x8ee204f0, 0x8f43023c,
17350x43102b, 0x144000be, 0x0, 0x8ee304e4,
17360x8ee204f8, 0x506200ba, 0xa2e004f4, 0x8f830120,
17370x27623800, 0x24660020, 0xc2102b, 0x50400001,
17380x27663000, 0x8f820128, 0x10c20004, 0x0,
17390x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
17400x8021, 0x24420001, 0xaee201a4, 0x8002b12,
17410x8ee201a4, 0x8ee204e4, 0xac62001c, 0x8ee404b0,
17420x8ee504b4, 0x2462001c, 0xac620008, 0x24020008,
17430xa462000e, 0x24020011, 0xac620018, 0xac640000,
17440xac650004, 0x8ee204c4, 0xac620010, 0xaf860120,
17450x92e24e20, 0x14400037, 0x24100001, 0x8ee24e30,
17460x210c0, 0x24425038, 0x2e22021, 0x8c830000,
17470x24020012, 0x1462001f, 0x0, 0x8ee34e30,
17480x8ee24e34, 0x1062001b, 0x24030040, 0x8c820004,
17490x24420001, 0xac820004, 0x8ee24e34, 0x8ee54e30,
17500x24420001, 0x10430007, 0x0, 0x8ee24e34,
17510x24420001, 0x10a20005, 0x0, 0x8002afc,
17520x0, 0x14a00005, 0x0, 0x8f820128,
17530x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
17540x2c420011, 0x50400013, 0xac800000, 0x8002b12,
17550x0, 0x8ee24e30, 0x24030040, 0x24420001,
17560x50430003, 0x1021, 0x8ee24e30, 0x24420001,
17570xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
17580x2e22021, 0x24020012, 0xac820000, 0x24020001,
17590xac820004, 0x5600000b, 0x24100001, 0x8ee204e4,
17600x3c040001, 0x24845754, 0xafa00014, 0xafa20010,
17610x8ee60608, 0x8f470228, 0x3c050009, 0xc002403,
17620x34a5f006, 0x16000003, 0x24020001, 0x8002b71,
17630xa2e204f4, 0x8ee20170, 0x24420001, 0xaee20170,
17640x8ee20170, 0x8ee204e4, 0xa2e004f4, 0xaee004f0,
17650xaee204f8, 0x8f42023c, 0x50400045, 0xaee07274,
17660x8ee20184, 0x24420001, 0xaee20184, 0x8ee20184,
17670x8002b71, 0xaee07274, 0x8ee20504, 0x24030040,
17680x24420001, 0x50430003, 0x1021, 0x8ee20504,
17690x24420001, 0xaee20504, 0x8ee20504, 0x8cc30018,
17700x21080, 0x571021, 0x8c440508, 0x24020003,
17710x1462000f, 0x0, 0x3c020001, 0x571021,
17720x904283b1, 0x10400014, 0x0, 0x8ee201d0,
17730x8ee35240, 0x441021, 0xaee201d0, 0x8ee201d8,
17740x641821, 0x306300ff, 0x8002b59, 0xaee35240,
17750x8ee201cc, 0x8ee30e10, 0x441021, 0xaee201cc,
17760x8ee201d8, 0x641821, 0x306301ff, 0xaee30e10,
17770x441021, 0xaee201d8, 0x8ee20000, 0x34420040,
17780x8002b71, 0xaee20000, 0x8ee2014c, 0x3c010001,
17790x370821, 0xa02083e0, 0x24420001, 0xaee2014c,
17800x8002b71, 0x8ee2014c, 0x94c7000e, 0x8cc2001c,
17810x3c040001, 0x24845760, 0xafa60014, 0xafa20010,
17820x8cc60018, 0x3c050008, 0xc002403, 0x34a50910,
17830x8fbf001c, 0x8fb00018, 0x3e00008, 0x27bd0020,
17840x27bdff98, 0xafbf0060, 0xafbe005c, 0xafb60058,
17850xafb50054, 0xafb40050, 0xafb3004c, 0xafb20048,
17860xafb10044, 0xafb00040, 0x8f830108, 0x8f820104,
17870xafa00024, 0x106203e7, 0xafa0002c, 0x3c1e0001,
17880x37de3800, 0x3c0bffff, 0x8f930108, 0x8e620018,
17890x8f830104, 0x2443fffe, 0x2c620014, 0x104003cf,
17900x31080, 0x3c010001, 0x220821, 0x8c2257c0,
17910x400008, 0x0, 0x9663000e, 0x8ee2725c,
17920x8ee404f0, 0x431021, 0xaee2725c, 0x8e63001c,
17930x96e20458, 0x24840001, 0xaee404f0, 0x24630001,
17940x2442ffff, 0x621824, 0xaee304e4, 0x8f42023c,
17950x82202b, 0x148003b9, 0x0, 0x8f830120,
17960x27623800, 0x24660020, 0xc2102b, 0x50400001,
17970x27663000, 0x8f820128, 0x10c20004, 0x0,
17980x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
17990x8021, 0x24420001, 0xaee201a4, 0x8002bfe,
18000x8ee201a4, 0x8ee204e4, 0xac62001c, 0x8ee404b0,
18010x8ee504b4, 0x2462001c, 0xac620008, 0x24020008,
18020xa462000e, 0x24020011, 0xac620018, 0xac640000,
18030xac650004, 0x8ee204c4, 0xac620010, 0xaf860120,
18040x92e24e20, 0x14400037, 0x24100001, 0x8ee24e30,
18050x210c0, 0x24425038, 0x2e22021, 0x8c830000,
18060x24020012, 0x1462001f, 0x0, 0x8ee34e30,
18070x8ee24e34, 0x1062001b, 0x240c0040, 0x8c820004,
18080x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
18090x24420001, 0x104c0007, 0x0, 0x8ee24e34,
18100x24420001, 0x10620005, 0x0, 0x8002be8,
18110x0, 0x14600005, 0x0, 0x8f820128,
18120x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
18130x2c420011, 0x50400013, 0xac800000, 0x8002bfe,
18140x0, 0x8ee24e30, 0x240c0040, 0x24420001,
18150x504c0003, 0x1021, 0x8ee24e30, 0x24420001,
18160xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
18170x2e22021, 0x24020012, 0x240c0001, 0xac820000,
18180xac8c0004, 0x5600000d, 0x24100001, 0x8ee204e4,
18190x3c040001, 0x24845754, 0xafa00014, 0xafa20010,
18200x8ee60608, 0x8f470228, 0x3c050009, 0x34a5f006,
18210xc002403, 0xafab0038, 0x8fab0038, 0x1200030a,
18220x240c0001, 0x8002f19, 0x0, 0x966c001c,
18230xafac002c, 0x9662001e, 0x3c0c8000, 0xafac0024,
18240xae62001c, 0x8e75001c, 0x8ee204fc, 0x8ee404fc,
18250x151900, 0x621021, 0x8c52000c, 0x92e27b98,
18260x641821, 0x9476000a, 0x14400003, 0x32c20002,
18270xaef27ba4, 0xaef57b9c, 0x1040004b, 0x8021,
18280x96e2045a, 0x30420002, 0x10400047, 0x0,
18290x8e63001c, 0x8ee204fc, 0x32100, 0x821021,
18300x8c42000c, 0x37e1821, 0x24420022, 0x43102b,
18310x1440000a, 0x24050014, 0x8ee204fc, 0x821021,
18320x8c44000c, 0xafab0038, 0xc002f75, 0x2484000e,
18330x8fab0038, 0x8002c52, 0x3050ffff, 0x8ee204fc,
18340x821021, 0x8c42000c, 0x9450000e, 0x94430010,
18350x94440012, 0x94450014, 0x2038021, 0x2048021,
18360x2058021, 0x94430016, 0x94440018, 0x9445001a,
18370x2038021, 0x2048021, 0x2058021, 0x9443001c,
18380x9444001e, 0x94420020, 0x2038021, 0x2048021,
18390x2028021, 0x101c02, 0x3202ffff, 0x628021,
18400x8e63001c, 0x8ee204fc, 0x102402, 0x32900,
18410xa21021, 0x8c43000c, 0x3202ffff, 0x828021,
18420x37e1021, 0x24630018, 0x62182b, 0x14600009,
18430x0, 0x8ee204fc, 0xa21021, 0x8c43000c,
18440x101027, 0x3c01ffff, 0x230821, 0x8002c6f,
18450xa4220018, 0x8ee204fc, 0xa21021, 0x8c43000c,
18460x101027, 0xa4620018, 0x96e2045a, 0x8821,
18470x30420008, 0x14400063, 0xa021, 0x8e63001c,
18480x8ee204fc, 0x33100, 0xc21021, 0x8c42000c,
18490x37e1821, 0x24420022, 0x43102b, 0x14400035,
18500x0, 0x8ee204fc, 0xc21021, 0x8c42000c,
18510x24470010, 0x37e1021, 0xe2102b, 0x50400001,
18520xeb3821, 0x8ee204fc, 0x94f10000, 0xc21021,
18530x8c42000c, 0x24470016, 0x37e1021, 0xe2102b,
18540x14400002, 0x2634ffec, 0xeb3821, 0x8ee204fc,
18550x90e30001, 0xc21021, 0x8c42000c, 0x2447001a,
18560x37e1021, 0xe2102b, 0x14400002, 0x2838821,
18570xeb3821, 0x94e20000, 0x24e70002, 0x2228821,
18580x37e1021, 0xe2102b, 0x50400001, 0xeb3821,
18590x94e20000, 0x24e70002, 0x2228821, 0x37e1021,
18600xe2102b, 0x50400001, 0xeb3821, 0x94e20000,
18610x24e70002, 0x2228821, 0x37e1021, 0xe2102b,
18620x50400001, 0xeb3821, 0x94e20000, 0x8002cd0,
18630x2228821, 0x8ee204fc, 0xc21021, 0x8c43000c,
18640x8ee204fc, 0x94710010, 0x8ee304fc, 0xc21021,
18650x8c44000c, 0xc31821, 0x8c62000c, 0x2634ffec,
18660x90840017, 0x8ee304fc, 0x9442001a, 0x2848821,
18670xc31821, 0x8c65000c, 0x8ee304fc, 0x2228821,
18680x8ee204fc, 0xc31821, 0xc21021, 0x8c44000c,
18690x8c62000c, 0x94a3001c, 0x9484001e, 0x94420020,
18700x2238821, 0x2248821, 0x2228821, 0x111c02,
18710x3222ffff, 0x628821, 0x111c02, 0x3222ffff,
18720x628821, 0x32c20001, 0x104000b2, 0x0,
18730x96e2045a, 0x30420001, 0x104000ae, 0x32c20080,
18740x10400008, 0x0, 0x92e27b98, 0x14400005,
18750x0, 0x240c0001, 0xa2ec7b98, 0xaef57b9c,
18760xaef27ba4, 0x8ee304fc, 0x151100, 0x431021,
18770x8c47000c, 0x37e1821, 0x24e2000e, 0x43102b,
18780x14400008, 0xe02021, 0x2405000e, 0xc002f75,
18790xafab0038, 0x3042ffff, 0x8fab0038, 0x8002d09,
18800x2028021, 0x94e60000, 0x24e70002, 0x94e50000,
18810x24e70002, 0x94e30000, 0x24e70002, 0x94e20000,
18820x24e70002, 0x94e40000, 0x24e70002, 0x2068021,
18830x2058021, 0x2038021, 0x2028021, 0x94e20000,
18840x94e30002, 0x2048021, 0x2028021, 0x2038021,
18850x101c02, 0x3202ffff, 0x628021, 0x101c02,
18860x3202ffff, 0x8ee47b9c, 0x628021, 0x14950004,
18870x3205ffff, 0x96620016, 0x8002d17, 0x512021,
18880x96620016, 0x542021, 0x41402, 0x3083ffff,
18890x432021, 0x852023, 0x41402, 0x822021,
18900x3084ffff, 0x50800001, 0x3404ffff, 0x8ee27ba4,
18910x24430017, 0x37e1021, 0x62102b, 0x50400001,
18920x6b1821, 0x90630000, 0x24020011, 0x14620031,
18930x24020006, 0x8ee27ba4, 0x37e1821, 0x24420028,
18940x43102b, 0x14400018, 0x0, 0x8ee27b9c,
18950x12a2000a, 0x32c20100, 0x8ee27ba4, 0x3c01ffff,
18960x220821, 0x94220028, 0x822021, 0x41c02,
18970x3082ffff, 0x622021, 0x32c20100, 0x14400004,
18980x41027, 0x92e27b98, 0x14400002, 0x41027,
18990x3044ffff, 0x8ee27ba4, 0x3c01ffff, 0x220821,
19000x8002d8a, 0xa4240028, 0x8ee27b9c, 0x12a20008,
19010x32c20100, 0x8ee27ba4, 0x94420028, 0x822021,
19020x41c02, 0x3082ffff, 0x622021, 0x32c20100,
19030x14400004, 0x41027, 0x92e27b98, 0x14400002,
19040x41027, 0x3044ffff, 0x8ee27ba4, 0x8002d8a,
19050xa4440028, 0x1462002f, 0x37e1821, 0x8ee27ba4,
19060x24420032, 0x43102b, 0x14400018, 0x0,
19070x8ee27b9c, 0x12a2000a, 0x32c20100, 0x8ee27ba4,
19080x3c01ffff, 0x220821, 0x94220032, 0x822021,
19090x41c02, 0x3082ffff, 0x622021, 0x32c20100,
19100x14400004, 0x41027, 0x92e27b98, 0x14400002,
19110x41027, 0x3044ffff, 0x8ee27ba4, 0x3c01ffff,
19120x220821, 0x8002d8a, 0xa4240032, 0x8ee27b9c,
19130x12a20008, 0x32c20100, 0x8ee27ba4, 0x94420032,
19140x822021, 0x41c02, 0x3082ffff, 0x622021,
19150x32c20100, 0x14400004, 0x41027, 0x92e27b98,
19160x14400002, 0x41027, 0x3044ffff, 0x8ee27ba4,
19170xa4440032, 0x8fac0024, 0x1180002c, 0x37e1821,
19180x8e420000, 0xae42fffc, 0x2642000a, 0x43102b,
19190x1440001b, 0x34038100, 0x26430004, 0x37e1021,
19200x62102b, 0x14400003, 0x602021, 0x6b1821,
19210x602021, 0x8c620000, 0x24630004, 0xae420000,
19220x37e1021, 0x62102b, 0x50400001, 0x6b1821,
19230x8c620000, 0xac820000, 0x34028100, 0xa4620000,
19240x24630002, 0x37e1021, 0x62102b, 0x50400001,
19250x6b1821, 0x97ac002e, 0x8002db4, 0xa46c0000,
19260x8e420004, 0x8e440008, 0xa6430008, 0x97ac002e,
19270xa64c000a, 0xae420000, 0xae440004, 0x9662000e,
19280x2652fffc, 0x24420004, 0xa662000e, 0x9662000e,
19290x8ee3725c, 0x621821, 0xaee3725c, 0xafb20018,
19300x8ee3725c, 0xafa3001c, 0x8ee2725c, 0x2c42003c,
19310x10400004, 0x24620001, 0x2403fffe, 0x431024,
19320xafa2001c, 0x32c20080, 0x1040000c, 0x32c20100,
19330x8ee27ba8, 0x24430001, 0x210c0, 0x571021,
19340xaee37ba8, 0x8fa30018, 0x8fa4001c, 0xac437bac,
19350xac447bb0, 0x8002ea0, 0xaee0725c, 0x10400072,
19360x0, 0x8ee27ba8, 0x24430001, 0x210c0,
19370x571021, 0xaee37ba8, 0x8fa30018, 0x8fa4001c,
19380xac437bac, 0xac447bb0, 0x8ee27ba8, 0x10400063,
19390x4821, 0x5021, 0x8f8200f0, 0x24480008,
19400x27621800, 0x102102b, 0x50400001, 0x27681000,
19410x8f8200f4, 0x15020007, 0x0, 0x8ee201b4,
19420x8021, 0x24420001, 0xaee201b4, 0x8002dfa,
19430x8ee201b4, 0x8f8300f0, 0x24100001, 0x1571021,
19440x8c447bac, 0x8c457bb0, 0xac640000, 0xac650004,
19450xaf8800f0, 0x16000006, 0x2ea1021, 0x8ee20088,
19460x24420001, 0xaee20088, 0x8002e3f, 0x8ee20088,
19470x8c427bb0, 0x8ee400e0, 0x8ee500e4, 0x8ee67b9c,
19480x401821, 0x1021, 0xa32821, 0xa3382b,
19490x822021, 0x872021, 0x8ee204fc, 0xc93021,
19500x63100, 0xaee400e0, 0xaee500e4, 0xc23021,
19510x94c2000a, 0x240c0002, 0x21142, 0x30430003,
19520x106c0016, 0x28620003, 0x10400005, 0x240c0001,
19530x106c0008, 0x0, 0x8002e3f, 0x0,
19540x240c0003, 0x106c0017, 0x0, 0x8002e3f,
19550x0, 0x8ee200e8, 0x8ee300ec, 0x24630001,
19560x2c640001, 0x441021, 0xaee200e8, 0xaee300ec,
19570x8ee200e8, 0x8002e3f, 0x8ee300ec, 0x8ee200f0,
19580x8ee300f4, 0x24630001, 0x2c640001, 0x441021,
19590xaee200f0, 0xaee300f4, 0x8ee200f0, 0x8002e3f,
19600x8ee300f4, 0x8ee200f8, 0x8ee300fc, 0x24630001,
19610x2c640001, 0x441021, 0xaee200f8, 0xaee300fc,
19620x8ee200f8, 0x8ee300fc, 0x8ee27ba8, 0x25290001,
19630x122102b, 0x1440ffa0, 0x254a0008, 0xa2e07b98,
19640x8002e9f, 0xaee07ba8, 0x8f8200f0, 0x24470008,
19650x27621800, 0xe2102b, 0x50400001, 0x27671000,
19660x8f8200f4, 0x14e20007, 0x0, 0x8ee201b4,
19670x8021, 0x24420001, 0xaee201b4, 0x8002e5d,
19680x8ee201b4, 0x8f8200f0, 0x24100001, 0x8fa30018,
19690x8fa4001c, 0xac430000, 0xac440004, 0xaf8700f0,
19700x16000007, 0x0, 0x8ee20088, 0x24420001,
19710xaee20088, 0x8ee20088, 0x8002ea0, 0xaee0725c,
19720x8ee2725c, 0x8ee400e0, 0x8ee500e4, 0x240c0002,
19730x401821, 0x1021, 0xa32821, 0xa3302b,
19740x822021, 0x862021, 0x161142, 0x30430003,
19750xaee400e0, 0xaee500e4, 0x106c0017, 0x2c620003,
19760x10400005, 0x240c0001, 0x106c0008, 0x0,
19770x8002ea0, 0xaee0725c, 0x240c0003, 0x106c0019,
19780x0, 0x8002ea0, 0xaee0725c, 0x8ee200e8,
19790x8ee300ec, 0x24630001, 0x2c640001, 0x441021,
19800xaee200e8, 0xaee300ec, 0x8ee200e8, 0x8ee300ec,
19810x8002ea0, 0xaee0725c, 0x8ee200f0, 0x8ee300f4,
19820x24630001, 0x2c640001, 0x441021, 0xaee200f0,
19830xaee300f4, 0x8ee200f0, 0x8ee300f4, 0x8002ea0,
19840xaee0725c, 0x8ee200f8, 0x8ee300fc, 0x24630001,
19850x2c640001, 0x441021, 0xaee200f8, 0xaee300fc,
19860x8ee200f8, 0x8ee300fc, 0xaee0725c, 0x8e62001c,
19870x96e30458, 0x8ee404f0, 0x24420001, 0x2463ffff,
19880x431024, 0x24840001, 0xaee204e4, 0xaee404f0,
19890x8f42023c, 0x82202b, 0x148000b0, 0x0,
19900x8f830120, 0x27623800, 0x24660020, 0xc2102b,
19910x50400001, 0x27663000, 0x8f820128, 0x10c20004,
19920x0, 0x8f820124, 0x14c20007, 0x0,
19930x8ee201a4, 0x8021, 0x24420001, 0xaee201a4,
19940x8002f07, 0x8ee201a4, 0x8ee204e4, 0xac62001c,
19950x8ee404b0, 0x8ee504b4, 0x2462001c, 0xac620008,
19960x24020008, 0xa462000e, 0x24020011, 0xac620018,
19970xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
19980xaf860120, 0x92e24e20, 0x14400037, 0x24100001,
19990x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
20000x8c830000, 0x24020012, 0x1462001f, 0x0,
20010x8ee34e30, 0x8ee24e34, 0x1062001b, 0x240c0040,
20020x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
20030x8ee34e30, 0x24420001, 0x104c0007, 0x0,
20040x8ee24e34, 0x24420001, 0x10620005, 0x0,
20050x8002ef1, 0x0, 0x14600005, 0x0,
20060x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
20070x8c820004, 0x2c420011, 0x50400013, 0xac800000,
20080x8002f07, 0x0, 0x8ee24e30, 0x240c0040,
20090x24420001, 0x504c0003, 0x1021, 0x8ee24e30,
20100x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
20110x24425038, 0x2e22021, 0x24020012, 0x240c0001,
20120xac820000, 0xac8c0004, 0x5600000d, 0x24100001,
20130x8ee204e4, 0x3c040001, 0x24845754, 0xafa00014,
20140xafa20010, 0x8ee60608, 0x8f470228, 0x3c050009,
20150x34a5f006, 0xc002403, 0xafab0038, 0x8fab0038,
20160x16000003, 0x240c0001, 0x8002f5c, 0xa2ec04f4,
20170x8ee20170, 0x24420001, 0xaee20170, 0x8ee20170,
20180x8ee204e4, 0xa2e004f4, 0xaee004f0, 0xaee07274,
20190xaee204f8, 0x8f42023c, 0x10400038, 0x0,
20200x8ee20184, 0x24420001, 0xaee20184, 0x8002f5c,
20210x8ee20184, 0x8ee20504, 0x240c0040, 0x24420001,
20220x504c0003, 0x1021, 0x8ee20504, 0x24420001,
20230xaee20504, 0x8ee20504, 0x8e630018, 0x240c0003,
20240x21080, 0x571021, 0x146c000f, 0x8c440508,
20250x3c020001, 0x571021, 0x904283b1, 0x10400014,
20260x0, 0x8ee201d0, 0x8ee35240, 0x441021,
20270xaee201d0, 0x8ee201d8, 0x641821, 0x306300ff,
20280x8002f4f, 0xaee35240, 0x8ee201cc, 0x8ee30e10,
20290x441021, 0xaee201cc, 0x8ee201d8, 0x641821,
20300x306301ff, 0xaee30e10, 0x441021, 0xaee201d8,
20310x8ee20000, 0x34420040, 0x8002f5c, 0xaee20000,
20320x8ee2014c, 0x3c010001, 0x370821, 0xa02083e0,
20330x24420001, 0xaee2014c, 0x8ee2014c, 0x8f820108,
20340x24420020, 0xaf820108, 0x8f820108, 0x8f820108,
20350x27633000, 0x43102b, 0x14400002, 0x27622800,
20360xaf820108, 0x8f830108, 0x8f820104, 0x1462fc1e,
20370x0, 0x8fbf0060, 0x8fbe005c, 0x8fb60058,
20380x8fb50054, 0x8fb40050, 0x8fb3004c, 0x8fb20048,
20390x8fb10044, 0x8fb00040, 0x3e00008, 0x27bd0068,
20400x52843, 0x10a0000d, 0x3021, 0x3c030001,
20410x34633800, 0x3c07ffff, 0x3631021, 0x82102b,
20420x50400001, 0x872021, 0x94820000, 0x24840002,
20430x24a5ffff, 0x14a0fff8, 0xc23021, 0x61c02,
20440x30c2ffff, 0x623021, 0x61c02, 0x30c2ffff,
20450x623021, 0x3e00008, 0x30c2ffff, 0x27bdff88,
20460x240f0001, 0xafbf0070, 0xafbe006c, 0xafb60068,
20470xafb50064, 0xafb40060, 0xafb3005c, 0xafb20058,
20480xafb10054, 0xafb00050, 0xa3a00027, 0xafaf002c,
20490x8ee204d4, 0x8021, 0x30420001, 0x1440002a,
20500xa3a00037, 0x8f8700e0, 0x8f8800c4, 0x8f8200e8,
20510xe22023, 0x2c821000, 0x50400001, 0x24841000,
20520x420c2, 0x801821, 0x8ee400c8, 0x8ee500cc,
20530x1021, 0xa32821, 0xa3302b, 0x822021,
20540x862021, 0xaee400c8, 0xaee500cc, 0x8f8300c8,
20550x3c02000a, 0x3442efff, 0x1032023, 0x44102b,
20560x10400003, 0x3c02000a, 0x3442f000, 0x822021,
20570x801821, 0x8ee400c0, 0x8ee500c4, 0x1021,
20580xa32821, 0xa3302b, 0x822021, 0x862021,
20590xaee400c0, 0xaee500c4, 0xaf8800c8, 0xaf8700e4,
20600x80034cc, 0xaf8700e8, 0x3c020001, 0x571021,
20610x904283c0, 0x1040000b, 0x0, 0x3c140001,
20620x297a021, 0x8e9483c4, 0x3c130001, 0x2779821,
20630x8e7383c8, 0x3c120001, 0x2579021, 0x8003193,
20640x8e5283cc, 0x8f8300e0, 0x8f8200e4, 0x10430007,
20650x8821, 0x8f8200e4, 0x24110001, 0x8c430000,
20660x8c440004, 0xafa30018, 0xafa4001c, 0x1620000e,
20670x3c02ffff, 0x8f8200c4, 0xafa20010, 0x8f8200c8,
20680x3c040001, 0x24845870, 0xafa20014, 0x8f8600e0,
20690x8f8700e4, 0x3c050006, 0xc002403, 0x34a5f000,
20700x80034cc, 0x0, 0x8fa3001c, 0x8fb20018,
20710x3074ffff, 0x2694fffc, 0x621024, 0x10400058,
20720x2409821, 0x3c020080, 0x621024, 0x1040000a,
20730x3c040040, 0x8ee2007c, 0x24420001, 0xaee2007c,
20740x8ee2007c, 0x8ee201fc, 0x24420001, 0xaee201fc,
20750x80034c6, 0x8ee201fc, 0x3c060004, 0x3c0b0001,
20760x3c0a0002, 0x3c050010, 0x3c090008, 0x8ee20080,
20770x3c080020, 0x34078000, 0x24420001, 0xaee20080,
20780x8ee20080, 0x8fa2001c, 0x441824, 0x10660021,
20790xc3102b, 0x14400007, 0x0, 0x106b0011,
20800x0, 0x106a0015, 0x0, 0x8003049,
20810x42042, 0x10650023, 0xa3102b, 0x14400005,
20820x0, 0x10690019, 0x0, 0x8003049,
20830x42042, 0x10680021, 0x0, 0x8003049,
20840x42042, 0x8ee20034, 0x24420001, 0xaee20034,
20850x8ee20034, 0x8003049, 0x42042, 0x8ee201ec,
20860x24420001, 0xaee201ec, 0x8ee201ec, 0x8003049,
20870x42042, 0x8ee201f0, 0x24420001, 0xaee201f0,
20880x8ee201f0, 0x8003049, 0x42042, 0x8ee201f4,
20890x24420001, 0xaee201f4, 0x8ee201f4, 0x8003049,
20900x42042, 0x8ee20030, 0x24420001, 0xaee20030,
20910x8ee20030, 0x8003049, 0x42042, 0x8ee201f8,
20920x24420001, 0xaee201f8, 0x8ee201f8, 0x42042,
20930x1087047c, 0x0, 0x800300e, 0x0,
20940x3c020001, 0x571021, 0x904283b2, 0x14400084,
20950x24020001, 0x3c030001, 0x771821, 0x906383b3,
20960x1462007f, 0x3c020100, 0x8e430000, 0x621024,
20970x1040006f, 0x2402ffff, 0x14620005, 0x24100001,
20980x96430004, 0x3402ffff, 0x10620075, 0x0,
20990x92e204d8, 0x14400072, 0x0, 0x3c020001,
21000x571021, 0x8c4283b4, 0x28420005, 0x10400020,
21010x3821, 0x3c020001, 0x571021, 0x8c4283b4,
21020x18400016, 0x2821, 0x96660000, 0x520c0,
21030x971021, 0x9442777e, 0x14460009, 0x971021,
21040x94437780, 0x96620002, 0x14620005, 0x971021,
21050x94437782, 0x96620004, 0x50620008, 0x24070001,
21060x3c020001, 0x571021, 0x8c4283b4, 0x24a50001,
21070xa2102a, 0x5440ffee, 0x520c0, 0x30e200ff,
21080x10400440, 0x0, 0x80030d5, 0x0,
21090x2402021, 0xc0022fe, 0x24050006, 0x3044001f,
21100x428c0, 0x2e51021, 0x9442727c, 0x30424000,
21110x14400434, 0xb71021, 0x9443727e, 0x96620000,
21120x1462000b, 0x418c0, 0xb71021, 0x94437280,
21130x96620002, 0x14620006, 0x418c0, 0xb71021,
21140x94437282, 0x96620004, 0x10620035, 0x418c0,
21150x2e31021, 0x9442727c, 0x30428000, 0x14400421,
21160x2e31021, 0x944b727c, 0x96670000, 0xb28c0,
21170xb71021, 0x9442737e, 0x80030b7, 0x3021,
21180x420c0, 0x2e41021, 0x9443737c, 0x2e41021,
21190x944b737c, 0x30638000, 0x14600010, 0xb28c0,
21200xb71021, 0x9442737e, 0x1447fff5, 0x1602021,
21210xb71021, 0x94437380, 0x96620002, 0x5462fff1,
21220x420c0, 0xb71021, 0x94437382, 0x96620004,
21230x5462ffec, 0x420c0, 0x24060001, 0x30c200ff,
21240x10400400, 0x0, 0x80030d5, 0x0,
21250x97430202, 0x96420000, 0x146203fa, 0x0,
21260x97430204, 0x96420002, 0x146203f6, 0x0,
21270x97430206, 0x96420004, 0x146203f2, 0x0,
21280x92420000, 0x3a030001, 0x30420001, 0x431024,
21290x10400074, 0x2402ffff, 0x8e630000, 0x14620004,
21300x3402ffff, 0x96630004, 0x1062006f, 0x240f0002,
21310x3c020001, 0x571021, 0x904283b2, 0x1440006a,
21320x240f0003, 0x92e204d8, 0x54400068, 0xafaf002c,
21330x3c020001, 0x571021, 0x8c4283b4, 0x28420005,
21340x10400020, 0x3821, 0x3c020001, 0x571021,
21350x8c4283b4, 0x18400016, 0x2821, 0x96660000,
21360x520c0, 0x971021, 0x9442777e, 0x14460009,
21370x971021, 0x94437780, 0x96620002, 0x14620005,
21380x971021, 0x94437782, 0x96620004, 0x50620008,
21390x24070001, 0x3c020001, 0x571021, 0x8c4283b4,
21400x24a50001, 0xa2102a, 0x5440ffee, 0x520c0,
21410x30e200ff, 0x14400044, 0x240f0003, 0x80034c6,
21420x0, 0x2402021, 0xc0022fe, 0x24050006,
21430x3044001f, 0x428c0, 0x2e51021, 0x9442727c,
21440x30424000, 0x144003af, 0xb71021, 0x9443727e,
21450x96620000, 0x1462000b, 0x418c0, 0xb71021,
21460x94437280, 0x96620002, 0x14620006, 0x418c0,
21470xb71021, 0x94437282, 0x96620004, 0x10620027,
21480x418c0, 0x2e31021, 0x9442727c, 0x30428000,
21490x1440039c, 0x2e31021, 0x944b727c, 0x96670000,
21500xb28c0, 0xb71021, 0x9442737e, 0x800313c,
21510x3021, 0x420c0, 0x2e41021, 0x9443737c,
21520x2e41021, 0x944b737c, 0x30638000, 0x14600010,
21530xb28c0, 0xb71021, 0x9442737e, 0x1447fff5,
21540x1602021, 0xb71021, 0x94437380, 0x96620002,
21550x5462fff1, 0x420c0, 0xb71021, 0x94437382,
21560x96620004, 0x5462ffec, 0x420c0, 0x24060001,
21570x30c200ff, 0x1040037b, 0x0, 0x800314f,
21580x240f0003, 0x240f0001, 0xafaf002c, 0x8f420260,
21590x54102b, 0x1040003a, 0x0, 0x8f8300e4,
21600x8f8200e0, 0x10620003, 0x24630008, 0xaf8300e4,
21610xaf8300e8, 0x8ee400c0, 0x8ee500c4, 0x2801821,
21620x1021, 0xa32821, 0xa3302b, 0x822021,
21630x862021, 0xaee400c0, 0xaee500c4, 0x8ee20058,
21640x24420001, 0xaee20058, 0x8ee20058, 0x8ee2007c,
21650x24420001, 0xaee2007c, 0x8ee2007c, 0x8f8200e0,
21660xafa20010, 0x8f8200e4, 0x3c040001, 0x24845878,
21670xafa20014, 0x8fa60018, 0x8fa7001c, 0x3c050006,
21680xc002403, 0x34a5f003, 0x80034cc, 0x0,
21690x8ee25240, 0xafa20010, 0x8ee25244, 0x3c040001,
21700x24845884, 0xafa20014, 0x8ee60e10, 0x8ee70e18,
21710x3c050006, 0xc002403, 0x34a5f002, 0x8ee201c0,
21720x24420001, 0xaee201c0, 0x8ee20000, 0x8ee301c0,
21730x2403ffbf, 0x431024, 0x8003470, 0xaee20000,
21740x96e20468, 0x54102b, 0x10400003, 0x0,
21750x240f0001, 0xa3af0027, 0x12800301, 0x24160007,
21760x24150040, 0x241e0001, 0x240e0012, 0x8ee2724c,
21770x8f430280, 0x24420001, 0x304207ff, 0x106202d3,
21780x0, 0x93a20027, 0x10400014, 0x0,
21790x8ee35240, 0x8ee25244, 0x10620009, 0x26ed5244,
21800x8ee65244, 0x8ee35244, 0x21140, 0x24425248,
21810x2e28021, 0x24630001, 0x80031bf, 0x306b00ff,
21820x92e27248, 0x1440ffca, 0x0, 0x8ee201e0,
21830x24420001, 0xaee201e0, 0x8ee201e0, 0x8ee30e10,
21840x8ee20e18, 0x1062ffc2, 0x26ed0e18, 0x8ee60e18,
21850x8ee30e18, 0x21140, 0x24420e20, 0x2e28021,
21860x24630001, 0x306b01ff, 0x96e2046a, 0x30420010,
21870x10400019, 0x0, 0x9642000c, 0x340f8100,
21880x144f0015, 0x0, 0x3c020001, 0x571021,
21890x904283c0, 0x14400010, 0x0, 0x9642000e,
21900xa6020016, 0x8e420008, 0x8e430004, 0x8e440000,
21910x2694fffc, 0xae42000c, 0xae430008, 0xae440004,
21920x9602000e, 0x26730004, 0x240f0001, 0xa3af0037,
21930x34420200, 0xa602000e, 0x8e020000, 0x8e030004,
21940x3c040001, 0x34843800, 0x306a0007, 0x26a9823,
21950x3641021, 0x262102b, 0x10400005, 0x28aa021,
21960x2641023, 0x3621823, 0x3c020020, 0x439823,
21970x26820007, 0x2404fff8, 0x9603000a, 0x446024,
21980x6a1821, 0x6c102b, 0x10400002, 0x1803821,
21990x603821, 0xae130018, 0x8f880120, 0x24e20007,
22000x443824, 0x27623800, 0x25090020, 0x122102b,
22010x50400001, 0x27693000, 0x8f820128, 0x11220004,
22020x0, 0x8f820124, 0x15220007, 0x1401821,
22030x8ee201a4, 0x8821, 0x24420001, 0xaee201a4,
22040x800324c, 0x8ee201a4, 0x8e040000, 0x8e050004,
22050x1021, 0xad130008, 0xa507000e, 0xad160018,
22060xad06001c, 0xa3302b, 0xa32823, 0x822023,
22070x862023, 0xad040000, 0xad050004, 0x8ee204c0,
22080xad020010, 0xaf890120, 0x92e24e20, 0x14400033,
22090x24110001, 0x8ee24e30, 0x210c0, 0x24425038,
22100x2e22021, 0x8c820000, 0x1456001f, 0x0,
22110x8ee34e30, 0x8ee24e34, 0x1062001b, 0x0,
22120x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
22130x8ee34e30, 0x24420001, 0x10550007, 0x0,
22140x8ee24e34, 0x24420001, 0x10620005, 0x0,
22150x8003239, 0x0, 0x14600005, 0x0,
22160x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
22170x8c820004, 0x2c420011, 0x50400010, 0xac800000,
22180x800324c, 0x0, 0x8ee24e30, 0x24420001,
22190x50550003, 0x1021, 0x8ee24e30, 0x24420001,
22200xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
22210x2e22021, 0xac960000, 0xac9e0004, 0x16200018,
22220x3c050006, 0x8e020018, 0x3c040001, 0x24845890,
22230xafa20010, 0x8e020000, 0x8e030004, 0x34a5f009,
22240x2003021, 0xc002403, 0xafa30014, 0x93a20037,
22250x10400216, 0x340f8100, 0x8e420004, 0x8e430008,
22260x8e44000c, 0xa64f000c, 0xae420000, 0xae430004,
22270xae440008, 0x96020016, 0x8003470, 0xa642000e,
22280x14ec0168, 0x28a1823, 0x960c000a, 0x9603000e,
22290x28a1023, 0xa602000a, 0x34620004, 0xa602000e,
22300x8f880120, 0x27623800, 0x25090020, 0x122102b,
22310x14400002, 0x306affff, 0x27693000, 0x8f820128,
22320x11220004, 0x0, 0x8f820124, 0x15220007,
22330x24040020, 0x8ee201a4, 0x8821, 0x24420001,
22340xaee201a4, 0x80032ca, 0x8ee201a4, 0x8ee5724c,
22350x8ee60490, 0x8ee70494, 0xa504000e, 0x24040004,
22360xad100008, 0xad040018, 0x52940, 0xa01821,
22370x1021, 0xe33821, 0xe3202b, 0xc23021,
22380xc43021, 0xad060000, 0xad070004, 0x8ee2724c,
22390xad02001c, 0x8ee204c4, 0xad020010, 0xaf890120,
22400x92e24e20, 0x14400033, 0x24110001, 0x8ee24e30,
22410x210c0, 0x24425038, 0x2e22021, 0x8c820000,
22420x1456001f, 0x0, 0x8ee34e30, 0x8ee24e34,
22430x1062001b, 0x0, 0x8c820004, 0x24420001,
22440xac820004, 0x8ee24e34, 0x8ee34e30, 0x24420001,
22450x10550007, 0x0, 0x8ee24e34, 0x24420001,
22460x10620005, 0x0, 0x80032b7, 0x0,
22470x14600005, 0x0, 0x8f820128, 0x24420020,
22480xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
22490x50400010, 0xac800000, 0x80032ca, 0x0,
22500x8ee24e30, 0x24420001, 0x50550003, 0x1021,
22510x8ee24e30, 0x24420001, 0xaee24e30, 0x8ee24e30,
22520x210c0, 0x24425038, 0x2e22021, 0xac960000,
22530xac9e0004, 0x1620000d, 0x0, 0xa60c000a,
22540xa60a000e, 0x8f820100, 0xafa20010, 0x8f820104,
22550x3c040001, 0x2484589c, 0x3c050006, 0xafa20014,
22560x8ee6724c, 0x800343b, 0x34a5f00b, 0x3c010001,
22570x370821, 0xa02083c0, 0xadab0000, 0x8ee201d8,
22580x8ee3724c, 0x2442ffff, 0xaee201d8, 0x8ee201d8,
22590x24630001, 0x306307ff, 0x26e25244, 0x15a20006,
22600xaee3724c, 0x8ee201d0, 0x2442ffff, 0xaee201d0,
22610x80032ef, 0x8ee201d0, 0x8ee201cc, 0x2442ffff,
22620xaee201cc, 0x8ee201cc, 0x8f420240, 0x10400073,
22630x0, 0x8ee20e1c, 0x24420001, 0xaee20e1c,
22640x8f430240, 0x43102b, 0x14400176, 0xa021,
22650x8f830120, 0x27623800, 0x24660020, 0xc2102b,
22660x50400001, 0x27663000, 0x8f820128, 0x10c20004,
22670x0, 0x8f820124, 0x14c20007, 0x0,
22680x8ee201a4, 0x8821, 0x24420001, 0xaee201a4,
22690x800334f, 0x8ee201a4, 0x8ee2724c, 0xac62001c,
22700x8ee404a8, 0x8ee504ac, 0x2462001c, 0xac620008,
22710x24020008, 0xa462000e, 0x24020011, 0xac620018,
22720xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
22730xaf860120, 0x92e24e20, 0x14400033, 0x24110001,
22740x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
22750x8c820000, 0x144e001f, 0x0, 0x8ee34e30,
22760x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
22770x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
22780x24420001, 0x10550007, 0x0, 0x8ee24e34,
22790x24420001, 0x10620005, 0x0, 0x800333c,
22800x0, 0x14600005, 0x0, 0x8f820128,
22810x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
22820x2c420011, 0x50400010, 0xac800000, 0x800334f,
22830x0, 0x8ee24e30, 0x24420001, 0x50550003,
22840x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
22850x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
22860xac8e0000, 0xac9e0004, 0x5620000d, 0x24110001,
22870x8ee2724c, 0x3c040001, 0x248458a8, 0xafa00014,
22880xafa20010, 0x8ee6724c, 0x8f470280, 0x3c050009,
22890x34a5f008, 0xc002403, 0xafae0048, 0x8fae0048,
22900x56200001, 0xaee00e1c, 0x8ee20188, 0x24420001,
22910xaee20188, 0x80033c8, 0x8ee20188, 0x8f830120,
22920x27623800, 0x24660020, 0xc2102b, 0x50400001,
22930x27663000, 0x8f820128, 0x10c20004, 0x0,
22940x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
22950x8821, 0x24420001, 0xaee201a4, 0x80033ba,
22960x8ee201a4, 0x8ee2724c, 0xac62001c, 0x8ee404a8,
22970x8ee504ac, 0x2462001c, 0xac620008, 0x24020008,
22980xa462000e, 0x24020011, 0xac620018, 0xac640000,
22990xac650004, 0x8ee204c4, 0xac620010, 0xaf860120,
23000x92e24e20, 0x14400033, 0x24110001, 0x8ee24e30,
23010x210c0, 0x24425038, 0x2e22021, 0x8c820000,
23020x144e001f, 0x0, 0x8ee34e30, 0x8ee24e34,
23030x1062001b, 0x0, 0x8c820004, 0x24420001,
23040xac820004, 0x8ee24e34, 0x8ee34e30, 0x24420001,
23050x10550007, 0x0, 0x8ee24e34, 0x24420001,
23060x10620005, 0x0, 0x80033a7, 0x0,
23070x14600005, 0x0, 0x8f820128, 0x24420020,
23080xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
23090x50400010, 0xac800000, 0x80033ba, 0x0,
23100x8ee24e30, 0x24420001, 0x50550003, 0x1021,
23110x8ee24e30, 0x24420001, 0xaee24e30, 0x8ee24e30,
23120x210c0, 0x24425038, 0x2e22021, 0xac8e0000,
23130xac9e0004, 0x1620000d, 0x0, 0x8ee2724c,
23140x3c040001, 0x248458a8, 0xafa00014, 0xafa20010,
23150x8ee6724c, 0x8f470280, 0x3c050009, 0x34a5f008,
23160xc002403, 0xafae0048, 0x8fae0048, 0x8ee20174,
23170x24420001, 0xaee20174, 0x8ee20174, 0x800346e,
23180xa021, 0x960c000a, 0x183102b, 0x54400001,
23190x1801821, 0xa603000a, 0x8f880120, 0x27623800,
23200x25090020, 0x122102b, 0x50400001, 0x27693000,
23210x8f820128, 0x11220004, 0x0, 0x8f820124,
23220x15220007, 0x24040020, 0x8ee201a4, 0x8821,
23230x24420001, 0xaee201a4, 0x800342f, 0x8ee201a4,
23240x8ee5724c, 0x8ee60490, 0x8ee70494, 0xa504000e,
23250x24040004, 0xad100008, 0xad040018, 0x52940,
23260xa01821, 0x1021, 0xe33821, 0xe3202b,
23270xc23021, 0xc43021, 0xad060000, 0xad070004,
23280x8ee2724c, 0xad02001c, 0x8ee204c4, 0xad020010,
23290xaf890120, 0x92e24e20, 0x14400033, 0x24110001,
23300x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
23310x8c820000, 0x1456001f, 0x0, 0x8ee34e30,
23320x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
23330x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
23340x24420001, 0x10550007, 0x0, 0x8ee24e34,
23350x24420001, 0x10620005, 0x0, 0x800341c,
23360x0, 0x14600005, 0x0, 0x8f820128,
23370x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
23380x2c420011, 0x50400010, 0xac800000, 0x800342f,
23390x0, 0x8ee24e30, 0x24420001, 0x50550003,
23400x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
23410x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
23420xac960000, 0xac9e0004, 0x1620001d, 0x0,
23430xa60c000a, 0x8f820100, 0xafa20010, 0x8f820104,
23440x3c040001, 0x2484589c, 0x3c050006, 0xafa20014,
23450x8ee6724c, 0x34a5f00d, 0xc002403, 0x2003821,
23460x93a20037, 0x10400031, 0x340f8100, 0x8e420004,
23470x8e430008, 0x8e44000c, 0xa64f000c, 0xae420000,
23480xae430004, 0xae440008, 0x96020016, 0xa642000e,
23490x9602000e, 0x3042fdff, 0x8003470, 0xa602000e,
23500x8ee201d8, 0x2442ffff, 0xaee201d8, 0x8ee201d8,
23510x8ee201cc, 0x3c04001f, 0x3c010001, 0x370821,
23520xa03e83c0, 0x2442ffff, 0xaee201cc, 0x9603000a,
23530x3484ffff, 0x8ee201cc, 0x6a1821, 0x2639821,
23540x93202b, 0x10800003, 0x3c02fff5, 0x34421000,
23550x2629821, 0xadab0000, 0x8ee2724c, 0x24420001,
23560x304207ff, 0xaee2724c, 0x8f420240, 0x10400004,
23570x283a023, 0x8ee20e1c, 0x24420001, 0xaee20e1c,
23580xa3a00027, 0x1680fd29, 0x0, 0x12800024,
23590x0, 0x3c010001, 0x370821, 0xac3483c4,
23600x3c010001, 0x370821, 0xac3383c8, 0x3c010001,
23610x370821, 0xac3283cc, 0x93a20037, 0x10400008,
23620x0, 0x3c020001, 0x571021, 0x8c4283cc,
23630x24420004, 0x3c010001, 0x370821, 0xac2283cc,
23640x8ee2724c, 0x8f430280, 0x24420001, 0x304207ff,
23650x14620006, 0x0, 0x8ee201c4, 0x24420001,
23660xaee201c4, 0x80034cc, 0x8ee201c4, 0x8ee201bc,
23670x24420001, 0xaee201bc, 0x80034cc, 0x8ee201bc,
23680x97a4001e, 0x2484fffc, 0x801821, 0x8ee400c0,
23690x8ee500c4, 0x1021, 0xa32821, 0xa3302b,
23700x822021, 0x862021, 0xaee400c0, 0xaee500c4,
23710x8faf002c, 0x24020002, 0x11e2000f, 0x29e20003,
23720x14400017, 0x24020003, 0x15e20015, 0x0,
23730x8ee200d0, 0x8ee300d4, 0x24630001, 0x2c640001,
23740x441021, 0xaee200d0, 0xaee300d4, 0x8ee200d0,
23750x80034c6, 0x8ee300d4, 0x8ee200d8, 0x8ee300dc,
23760x24630001, 0x2c640001, 0x441021, 0xaee200d8,
23770xaee300dc, 0x8ee200d8, 0x80034c6, 0x8ee300dc,
23780x8ee200c8, 0x8ee300cc, 0x24630001, 0x2c640001,
23790x441021, 0xaee200c8, 0xaee300cc, 0x8ee200c8,
23800x8ee300cc, 0x8f8300e4, 0x8f8200e0, 0x10620003,
23810x24630008, 0xaf8300e4, 0xaf8300e8, 0x8fbf0070,
23820x8fbe006c, 0x8fb60068, 0x8fb50064, 0x8fb40060,
23830x8fb3005c, 0x8fb20058, 0x8fb10054, 0x8fb00050,
23840x3e00008, 0x27bd0078, 0x27bdffb0, 0xafb50044,
23850xa821, 0xafb00030, 0x8021, 0xafbf004c,
23860xafb60048, 0xafb40040, 0xafb3003c, 0xafb20038,
23870xafb10034, 0x8ee204d4, 0x24140001, 0x30420001,
23880x1440002a, 0xb021, 0x8f8700e0, 0x8f8800c4,
23890x8f8200e8, 0xe22023, 0x2c821000, 0x50400001,
23900x24841000, 0x420c2, 0x801821, 0x8ee400c8,
23910x8ee500cc, 0x1021, 0xa32821, 0xa3302b,
23920x822021, 0x862021, 0xaee400c8, 0xaee500cc,
23930x8f8300c8, 0x3c02000a, 0x3442efff, 0x1032023,
23940x44102b, 0x10400003, 0x3c02000a, 0x3442f000,
23950x822021, 0x801821, 0x8ee400c0, 0x8ee500c4,
23960x1021, 0xa32821, 0xa3302b, 0x822021,
23970x862021, 0xaee400c0, 0xaee500c4, 0xaf8800c8,
23980xaf8700e4, 0x8003850, 0xaf8700e8, 0x3c020001,
23990x571021, 0x904283c0, 0x1040000b, 0x0,
24000x3c130001, 0x2779821, 0x8e7383c4, 0x3c110001,
24010x2378821, 0x8e3183c8, 0x3c120001, 0x2579021,
24020x80036e8, 0x8e5283cc, 0x8f8300e0, 0x8f8200e4,
24030x10430007, 0x4821, 0x8f8200e4, 0x24090001,
24040x8c430000, 0x8c440004, 0xafa30018, 0xafa4001c,
24050x1520000e, 0x3c02ffff, 0x8f8200c4, 0xafa20010,
24060x8f8200c8, 0x3c040001, 0x24845870, 0xafa20014,
24070x8f8600e0, 0x8f8700e4, 0x3c050006, 0xc002403,
24080x34a5f000, 0x8003850, 0x0, 0x8fa3001c,
24090x8fb20018, 0x3073ffff, 0x2673fffc, 0x621024,
24100x10400058, 0x2408821, 0x3c020080, 0x621024,
24110x1040000a, 0x3c040040, 0x8ee2007c, 0x24420001,
24120xaee2007c, 0x8ee2007c, 0x8ee201fc, 0x24420001,
24130xaee201fc, 0x800384a, 0x8ee201fc, 0x3c060004,
24140x3c0b0001, 0x3c0a0002, 0x3c050010, 0x3c090008,
24150x8ee20080, 0x3c080020, 0x34078000, 0x24420001,
24160xaee20080, 0x8ee20080, 0x8fa2001c, 0x441824,
24170x10660021, 0xc3102b, 0x14400007, 0x0,
24180x106b0011, 0x0, 0x106a0015, 0x0,
24190x8003592, 0x42042, 0x10650023, 0xa3102b,
24200x14400005, 0x0, 0x10690019, 0x0,
24210x8003592, 0x42042, 0x10680021, 0x0,
24220x8003592, 0x42042, 0x8ee20034, 0x24420001,
24230xaee20034, 0x8ee20034, 0x8003592, 0x42042,
24240x8ee201ec, 0x24420001, 0xaee201ec, 0x8ee201ec,
24250x8003592, 0x42042, 0x8ee201f0, 0x24420001,
24260xaee201f0, 0x8ee201f0, 0x8003592, 0x42042,
24270x8ee201f4, 0x24420001, 0xaee201f4, 0x8ee201f4,
24280x8003592, 0x42042, 0x8ee20030, 0x24420001,
24290xaee20030, 0x8ee20030, 0x8003592, 0x42042,
24300x8ee201f8, 0x24420001, 0xaee201f8, 0x8ee201f8,
24310x42042, 0x108702b7, 0x0, 0x8003557,
24320x0, 0x3c020001, 0x571021, 0x904283b2,
24330x14400084, 0x24020001, 0x3c030001, 0x771821,
24340x906383b3, 0x1462007f, 0x3c020100, 0x8e430000,
24350x621024, 0x1040006f, 0x2402ffff, 0x14620005,
24360x24100001, 0x96430004, 0x3402ffff, 0x10620075,
24370x0, 0x92e204d8, 0x14400072, 0x0,
24380x3c020001, 0x571021, 0x8c4283b4, 0x28420005,
24390x10400020, 0x3821, 0x3c020001, 0x571021,
24400x8c4283b4, 0x18400016, 0x2821, 0x96260000,
24410x520c0, 0x971021, 0x9442777e, 0x14460009,
24420x971021, 0x94437780, 0x96220002, 0x14620005,
24430x971021, 0x94437782, 0x96220004, 0x50620008,
24440x24070001, 0x3c020001, 0x571021, 0x8c4283b4,
24450x24a50001, 0xa2102a, 0x5440ffee, 0x520c0,
24460x30e200ff, 0x1040027b, 0x0, 0x800361e,
24470x0, 0x2402021, 0xc0022fe, 0x24050006,
24480x3044001f, 0x428c0, 0x2e51021, 0x9442727c,
24490x30424000, 0x1440026f, 0xb71021, 0x9443727e,
24500x96220000, 0x1462000b, 0x418c0, 0xb71021,
24510x94437280, 0x96220002, 0x14620006, 0x418c0,
24520xb71021, 0x94437282, 0x96220004, 0x10620035,
24530x418c0, 0x2e31021, 0x9442727c, 0x30428000,
24540x1440025c, 0x2e31021, 0x9448727c, 0x96270000,
24550x828c0, 0xb71021, 0x9442737e, 0x8003600,
24560x3021, 0x420c0, 0x2e41021, 0x9443737c,
24570x2e41021, 0x9448737c, 0x30638000, 0x14600010,
24580x828c0, 0xb71021, 0x9442737e, 0x1447fff5,
24590x1002021, 0xb71021, 0x94437380, 0x96220002,
24600x5462fff1, 0x420c0, 0xb71021, 0x94437382,
24610x96220004, 0x5462ffec, 0x420c0, 0x24060001,
24620x30c200ff, 0x1040023b, 0x0, 0x800361e,
24630x0, 0x97430202, 0x96420000, 0x14620235,
24640x0, 0x97430204, 0x96420002, 0x14620231,
24650x0, 0x97430206, 0x96420004, 0x1462022d,
24660x0, 0x92420000, 0x3a030001, 0x30420001,
24670x431024, 0x10400074, 0x2402ffff, 0x8e230000,
24680x14620004, 0x3402ffff, 0x96230004, 0x1062006f,
24690x24140002, 0x3c020001, 0x571021, 0x904283b2,
24700x1440006a, 0x24140003, 0x92e204d8, 0x14400067,
24710x0, 0x3c020001, 0x571021, 0x8c4283b4,
24720x28420005, 0x10400020, 0x3821, 0x3c020001,
24730x571021, 0x8c4283b4, 0x18400016, 0x2821,
24740x96260000, 0x520c0, 0x971021, 0x9442777e,
24750x14460009, 0x971021, 0x94437780, 0x96220002,
24760x14620005, 0x971021, 0x94437782, 0x96220004,
24770x50620008, 0x24070001, 0x3c020001, 0x571021,
24780x8c4283b4, 0x24a50001, 0xa2102a, 0x5440ffee,
24790x520c0, 0x30e200ff, 0x14400044, 0x24140003,
24800x800384a, 0x0, 0x2402021, 0xc0022fe,
24810x24050006, 0x3044001f, 0x428c0, 0x2e51021,
24820x9442727c, 0x30424000, 0x144001ea, 0xb71021,
24830x9443727e, 0x96220000, 0x1462000b, 0x418c0,
24840xb71021, 0x94437280, 0x96220002, 0x14620006,
24850x418c0, 0xb71021, 0x94437282, 0x96220004,
24860x10620027, 0x418c0, 0x2e31021, 0x9442727c,
24870x30428000, 0x144001d7, 0x2e31021, 0x9448727c,
24880x96270000, 0x828c0, 0xb71021, 0x9442737e,
24890x8003685, 0x3021, 0x420c0, 0x2e41021,
24900x9443737c, 0x2e41021, 0x9448737c, 0x30638000,
24910x14600010, 0x828c0, 0xb71021, 0x9442737e,
24920x1447fff5, 0x1002021, 0xb71021, 0x94437380,
24930x96220002, 0x5462fff1, 0x420c0, 0xb71021,
24940x94437382, 0x96220004, 0x5462ffec, 0x420c0,
24950x24060001, 0x30c200ff, 0x104001b6, 0x0,
24960x8003698, 0x24140003, 0x24140001, 0x8f420260,
24970x53102b, 0x10400049, 0x0, 0x8f8300e4,
24980x8f8200e0, 0x10620003, 0x24630008, 0xaf8300e4,
24990xaf8300e8, 0x8ee400c0, 0x8ee500c4, 0x2601821,
25000x1021, 0xa32821, 0xa3302b, 0x822021,
25010x862021, 0xaee400c0, 0xaee500c4, 0x8ee20058,
25020x24420001, 0xaee20058, 0x8ee20058, 0x8ee2007c,
25030x24420001, 0xaee2007c, 0x8ee2007c, 0x8f8200e0,
25040xafa20010, 0x8f8200e4, 0x3c040001, 0x24845878,
25050xafa20014, 0x8fa60018, 0x8fa7001c, 0x3c050006,
25060xc002403, 0x34a5f003, 0x8003850, 0x0,
25070x8ee25240, 0xafa20010, 0x8ee25244, 0x3c040001,
25080x24845884, 0xafa20014, 0x8ee60e10, 0x8ee70e18,
25090xc002403, 0x34a5f002, 0x8ee201c0, 0x24420001,
25100xaee201c0, 0x8ee20000, 0x8ee301c0, 0x2403ffbf,
25110x431024, 0x80037f8, 0xaee20000, 0x8ee25240,
25120xafa20010, 0x8ee25244, 0x3c040001, 0x24845884,
25130xafa20014, 0x8ee60e10, 0x8ee70e18, 0x3c050006,
25140xc002403, 0x34a5f002, 0x8ee201c0, 0x24420001,
25150xaee201c0, 0x80037f8, 0x8ee201c0, 0x96e20468,
25160x53102b, 0x54400001, 0x3c158000, 0x12600131,
25170x3c0c001f, 0x358cffff, 0x8ee2724c, 0x8f430280,
25180x24420001, 0x304207ff, 0x10620108, 0x0,
25190x12a00014, 0x0, 0x8ee35240, 0x8ee25244,
25200x10620009, 0x26ee5244, 0x8eeb5244, 0x8ee35244,
25210x21140, 0x24425248, 0x2e28021, 0x24630001,
25220x8003712, 0x306800ff, 0x92e27248, 0x1440ffc0,
25230x3c050006, 0x8ee201e0, 0x24420001, 0xaee201e0,
25240x8ee201e0, 0x8ee30e10, 0x8ee20e18, 0x1062ffcb,
25250x26ee0e18, 0x8eeb0e18, 0xa821, 0x8ee30e18,
25260x21140, 0x24420e20, 0x2e28021, 0x24630001,
25270x306801ff, 0x96e2046a, 0x30420010, 0x10400017,
25280x34028100, 0x9643000c, 0x14620014, 0x0,
25290x3c020001, 0x571021, 0x904283c0, 0x1440000f,
25300x0, 0x9642000e, 0xa6020016, 0x8e420008,
25310x8e430004, 0x8e440000, 0x2673fffc, 0xae42000c,
25320xae430008, 0xae440004, 0x9602000e, 0x26310004,
25330x24160001, 0x34420200, 0xa602000e, 0x9603000a,
25340x2605021, 0x73102b, 0x10400002, 0x2606821,
25350x605021, 0x2d42003d, 0x1040002a, 0x3821,
25360x9623000c, 0x24020800, 0x54620027, 0xae110018,
25370x3c020001, 0x571021, 0x904283c0, 0x54400022,
25380xae110018, 0x26220017, 0x182102b, 0x10400013,
25390x0, 0x3c02fff5, 0x511021, 0x90421017,
25400x38430006, 0x2c630001, 0x38420011, 0x2c420001,
25410x621825, 0x10600013, 0x26220010, 0x182102b,
25420x1040000e, 0x0, 0x3c07fff5, 0xf13821,
25430x94e71010, 0x800375e, 0x24e7000e, 0x92220017,
25440x38430006, 0x2c630001, 0x38420011, 0x2c420001,
25450x621825, 0x50600004, 0xae110018, 0x96270010,
25460x24e7000e, 0xae110018, 0x3c020001, 0x571021,
25470x904283c0, 0x2102b, 0x14e00002, 0x24ec0,
25480x1403821, 0x8f830120, 0x27623800, 0x24660020,
25490xc2102b, 0x50400001, 0x27663000, 0x8f820128,
25500x10c20004, 0x0, 0x8f820124, 0x14c20007,
25510x2402000b, 0x8ee201a4, 0x4821, 0x24420001,
25520xaee201a4, 0x80037bf, 0x8ee201a4, 0x8e040000,
25530x8e050004, 0xac620018, 0x1751025, 0x491025,
25540xac710008, 0xa467000e, 0xac62001c, 0xac640000,
25550xac650004, 0x8ee204c0, 0xac620010, 0xaf860120,
25560x92e24e20, 0x14400038, 0x24090001, 0x8ee24e30,
25570x210c0, 0x24425038, 0x2e22021, 0x8c830000,
25580x24020007, 0x14620020, 0x0, 0x8ee34e30,
25590x8ee24e34, 0x1062001c, 0x0, 0x8c820004,
25600x24420001, 0xac820004, 0x8ee34e34, 0x8ee54e30,
25610x24020040, 0x24630001, 0x10620007, 0x0,
25620x8ee24e34, 0x24420001, 0x10a20005, 0x0,
25630x80037a9, 0x0, 0x14a00005, 0x0,
25640x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
25650x8c820004, 0x2c420011, 0x50400013, 0xac800000,
25660x80037bf, 0x0, 0x8ee24e30, 0x24030040,
25670x24420001, 0x50430003, 0x1021, 0x8ee24e30,
25680x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
25690x24425038, 0x2e22021, 0x24020007, 0xac820000,
25700x24020001, 0xac820004, 0x15200018, 0x3c050006,
25710x8e020018, 0x3c040001, 0x24845890, 0xafa20010,
25720x8e020000, 0x8e030004, 0x34a5f009, 0x2003021,
25730xc002403, 0xafa30014, 0x32c200ff, 0x1040002b,
25740x34028100, 0x8e430004, 0x8e440008, 0x8e45000c,
25750xa642000c, 0xae430000, 0xae440004, 0xae450008,
25760x96020016, 0x80037f8, 0xa642000e, 0x154d000a,
25770x0, 0x9602000e, 0xa613000a, 0x34420004,
25780xa602000e, 0x3c010001, 0x370821, 0xa02083c0,
25790x80037f6, 0x9821, 0x9604000a, 0x93102b,
25800x10400002, 0x2601821, 0x801821, 0x24020001,
25810xa603000a, 0x3c010001, 0x370821, 0xa02283c0,
25820x9604000a, 0x2248821, 0x191102b, 0x10400003,
25830x3c02fff5, 0x34421000, 0x2228821, 0x2649823,
25840xa821, 0x1660fef4, 0xadc80000, 0x12600021,
25850x32c200ff, 0x3c010001, 0x370821, 0xac3383c4,
25860x3c010001, 0x370821, 0xac3183c8, 0x3c010001,
25870x370821, 0x10400008, 0xac3283cc, 0x3c020001,
25880x571021, 0x8c4283cc, 0x24420004, 0x3c010001,
25890x370821, 0xac2283cc, 0x8ee2724c, 0x8f430280,
25900x24420001, 0x14620006, 0x0, 0x8ee201c4,
25910x24420001, 0xaee201c4, 0x8003850, 0x8ee201c4,
25920x8ee201bc, 0x24420001, 0xaee201bc, 0x8003850,
25930x8ee201bc, 0x97a4001e, 0x2484fffc, 0x801821,
25940x8ee400c0, 0x8ee500c4, 0x1021, 0xa32821,
25950xa3302b, 0x822021, 0x862021, 0x24020002,
25960xaee400c0, 0xaee500c4, 0x1282000f, 0x2a820003,
25970x14400017, 0x24020003, 0x16820015, 0x0,
25980x8ee200d0, 0x8ee300d4, 0x24630001, 0x2c640001,
25990x441021, 0xaee200d0, 0xaee300d4, 0x8ee200d0,
26000x800384a, 0x8ee300d4, 0x8ee200d8, 0x8ee300dc,
26010x24630001, 0x2c640001, 0x441021, 0xaee200d8,
26020xaee300dc, 0x8ee200d8, 0x800384a, 0x8ee300dc,
26030x8ee200c8, 0x8ee300cc, 0x24630001, 0x2c640001,
26040x441021, 0xaee200c8, 0xaee300cc, 0x8ee200c8,
26050x8ee300cc, 0x8f8300e4, 0x8f8200e0, 0x10620003,
26060x24630008, 0xaf8300e4, 0xaf8300e8, 0x8fbf004c,
26070x8fb60048, 0x8fb50044, 0x8fb40040, 0x8fb3003c,
26080x8fb20038, 0x8fb10034, 0x8fb00030, 0x3e00008,
26090x27bd0050, 0x27bdff90, 0xafb60060, 0xb021,
26100xafbf0068, 0xafbe0064, 0xafb5005c, 0xafb40058,
26110xafb30054, 0xafb20050, 0xafb1004c, 0xafb00048,
26120x8ee204d4, 0x8821, 0x24150001, 0x30420001,
26130x1440002a, 0xa3a0002f, 0x8f8700e0, 0x8f8800c4,
26140x8f8200e8, 0xe22023, 0x2c821000, 0x50400001,
26150x24841000, 0x420c2, 0x801821, 0x8ee400c8,
26160x8ee500cc, 0x1021, 0xa32821, 0xa3302b,
26170x822021, 0x862021, 0xaee400c8, 0xaee500cc,
26180x8f8300c8, 0x3c02000a, 0x3442efff, 0x1032023,
26190x44102b, 0x10400003, 0x3c02000a, 0x3442f000,
26200x822021, 0x801821, 0x8ee400c0, 0x8ee500c4,
26210x1021, 0xa32821, 0xa3302b, 0x822021,
26220x862021, 0xaee400c0, 0xaee500c4, 0xaf8800c8,
26230xaf8700e4, 0x8003c5b, 0xaf8700e8, 0x3c020001,
26240x571021, 0x904283c0, 0x1040000b, 0x0,
26250x3c130001, 0x2779821, 0x8e7383c4, 0x3c100001,
26260x2178021, 0x8e1083c8, 0x3c120001, 0x2579021,
26270x8003a59, 0x8e5283cc, 0x8f8300e0, 0x8f8200e4,
26280x10430007, 0x3821, 0x8f8200e4, 0x24070001,
26290x8c430000, 0x8c440004, 0xafa30018, 0xafa4001c,
26300x14e0000e, 0x3c02ffff, 0x8f8200c4, 0xafa20010,
26310x8f8200c8, 0x3c040001, 0x248458b4, 0xafa20014,
26320x8f8600e0, 0x8f8700e4, 0x3c050006, 0xc002403,
26330x34a5f200, 0x8003c5b, 0x0, 0x8fa3001c,
26340x8fb20018, 0x3073ffff, 0x2673fffc, 0x621024,
26350x10400058, 0x2408021, 0x3c020080, 0x621024,
26360x1040000a, 0x3c040040, 0x8ee2007c, 0x24420001,
26370xaee2007c, 0x8ee2007c, 0x8ee201fc, 0x24420001,
26380xaee201fc, 0x8003c55, 0x8ee201fc, 0x3c060004,
26390x3c0b0001, 0x3c0a0002, 0x3c050010, 0x3c090008,
26400x8ee20080, 0x3c080020, 0x34078000, 0x24420001,
26410xaee20080, 0x8ee20080, 0x8fa2001c, 0x441824,
26420x10660021, 0xc3102b, 0x14400007, 0x0,
26430x106b0011, 0x0, 0x106a0015, 0x0,
26440x8003916, 0x42042, 0x10650023, 0xa3102b,
26450x14400005, 0x0, 0x10690019, 0x0,
26460x8003916, 0x42042, 0x10680021, 0x0,
26470x8003916, 0x42042, 0x8ee20034, 0x24420001,
26480xaee20034, 0x8ee20034, 0x8003916, 0x42042,
26490x8ee201ec, 0x24420001, 0xaee201ec, 0x8ee201ec,
26500x8003916, 0x42042, 0x8ee201f0, 0x24420001,
26510xaee201f0, 0x8ee201f0, 0x8003916, 0x42042,
26520x8ee201f4, 0x24420001, 0xaee201f4, 0x8ee201f4,
26530x8003916, 0x42042, 0x8ee20030, 0x24420001,
26540xaee20030, 0x8ee20030, 0x8003916, 0x42042,
26550x8ee201f8, 0x24420001, 0xaee201f8, 0x8ee201f8,
26560x42042, 0x1087033e, 0x0, 0x80038db,
26570x0, 0x3c020001, 0x571021, 0x904283b2,
26580x14400084, 0x24020001, 0x3c030001, 0x771821,
26590x906383b3, 0x1462007f, 0x3c020100, 0x8e430000,
26600x621024, 0x1040006f, 0x2402ffff, 0x14620005,
26610x24110001, 0x96430004, 0x3402ffff, 0x10620075,
26620x0, 0x92e204d8, 0x14400072, 0x0,
26630x3c020001, 0x571021, 0x8c4283b4, 0x28420005,
26640x10400020, 0x3821, 0x3c020001, 0x571021,
26650x8c4283b4, 0x18400016, 0x2821, 0x96060000,
26660x520c0, 0x971021, 0x9442777e, 0x14460009,
26670x971021, 0x94437780, 0x96020002, 0x14620005,
26680x971021, 0x94437782, 0x96020004, 0x50620008,
26690x24070001, 0x3c020001, 0x571021, 0x8c4283b4,
26700x24a50001, 0xa2102a, 0x5440ffee, 0x520c0,
26710x30e200ff, 0x10400302, 0x0, 0x80039a2,
26720x0, 0x2402021, 0xc0022fe, 0x24050006,
26730x3044001f, 0x428c0, 0x2e51021, 0x9442727c,
26740x30424000, 0x144002f6, 0xb71021, 0x9443727e,
26750x96020000, 0x1462000b, 0x418c0, 0xb71021,
26760x94437280, 0x96020002, 0x14620006, 0x418c0,
26770xb71021, 0x94437282, 0x96020004, 0x10620035,
26780x418c0, 0x2e31021, 0x9442727c, 0x30428000,
26790x144002e3, 0x2e31021, 0x944d727c, 0x96070000,
26800xd28c0, 0xb71021, 0x9442737e, 0x8003984,
26810x3021, 0x420c0, 0x2e41021, 0x9443737c,
26820x2e41021, 0x944d737c, 0x30638000, 0x14600010,
26830xd28c0, 0xb71021, 0x9442737e, 0x1447fff5,
26840x1a02021, 0xb71021, 0x94437380, 0x96020002,
26850x5462fff1, 0x420c0, 0xb71021, 0x94437382,
26860x96020004, 0x5462ffec, 0x420c0, 0x24060001,
26870x30c200ff, 0x104002c2, 0x0, 0x80039a2,
26880x0, 0x97430202, 0x96420000, 0x146202bc,
26890x0, 0x97430204, 0x96420002, 0x146202b8,
26900x0, 0x97430206, 0x96420004, 0x146202b4,
26910x0, 0x92420000, 0x3a230001, 0x30420001,
26920x431024, 0x10400074, 0x2402ffff, 0x8e030000,
26930x14620004, 0x3402ffff, 0x96030004, 0x1062006f,
26940x24150002, 0x3c020001, 0x571021, 0x904283b2,
26950x1440006a, 0x24150003, 0x92e204d8, 0x14400067,
26960x0, 0x3c020001, 0x571021, 0x8c4283b4,
26970x28420005, 0x10400020, 0x3821, 0x3c020001,
26980x571021, 0x8c4283b4, 0x18400016, 0x2821,
26990x96060000, 0x520c0, 0x971021, 0x9442777e,
27000x14460009, 0x971021, 0x94437780, 0x96020002,
27010x14620005, 0x971021, 0x94437782, 0x96020004,
27020x50620008, 0x24070001, 0x3c020001, 0x571021,
27030x8c4283b4, 0x24a50001, 0xa2102a, 0x5440ffee,
27040x520c0, 0x30e200ff, 0x14400044, 0x24150003,
27050x8003c55, 0x0, 0x2402021, 0xc0022fe,
27060x24050006, 0x3044001f, 0x428c0, 0x2e51021,
27070x9442727c, 0x30424000, 0x14400271, 0xb71021,
27080x9443727e, 0x96020000, 0x1462000b, 0x418c0,
27090xb71021, 0x94437280, 0x96020002, 0x14620006,
27100x418c0, 0xb71021, 0x94437282, 0x96020004,
27110x10620027, 0x418c0, 0x2e31021, 0x9442727c,
27120x30428000, 0x1440025e, 0x2e31021, 0x944d727c,
27130x96070000, 0xd28c0, 0xb71021, 0x9442737e,
27140x8003a09, 0x3021, 0x420c0, 0x2e41021,
27150x9443737c, 0x2e41021, 0x944d737c, 0x30638000,
27160x14600010, 0xd28c0, 0xb71021, 0x9442737e,
27170x1447fff5, 0x1a02021, 0xb71021, 0x94437380,
27180x96020002, 0x5462fff1, 0x420c0, 0xb71021,
27190x94437382, 0x96020004, 0x5462ffec, 0x420c0,
27200x24060001, 0x30c200ff, 0x1040023d, 0x0,
27210x8003a1c, 0x24150003, 0x24150001, 0x8f420260,
27220x53102b, 0x10400036, 0x0, 0x8f8300e4,
27230x8f8200e0, 0x10620003, 0x24630008, 0xaf8300e4,
27240xaf8300e8, 0x8ee400c0, 0x8ee500c4, 0x2601821,
27250x1021, 0xa32821, 0xa3302b, 0x822021,
27260x862021, 0xaee400c0, 0xaee500c4, 0x8ee20058,
27270x24420001, 0xaee20058, 0x8ee20058, 0x8ee2007c,
27280x24420001, 0xaee2007c, 0x8ee2007c, 0x8f8200e0,
27290xafa20010, 0x8f8200e4, 0x3c040001, 0x248458c0,
27300xafa20014, 0x8fa60018, 0x8fa7001c, 0x3c050006,
27310xc002403, 0x34a5f203, 0x8003c5b, 0x0,
27320x8ee25240, 0xafa20010, 0x8ee25244, 0x3c040001,
27330x248458cc, 0xafa20014, 0x8ee60e10, 0x8ee70e18,
27340x3c050006, 0xc002403, 0x34a5f202, 0x8ee201c0,
27350x24420001, 0xaee201c0, 0x8003c02, 0x8ee201c0,
27360x96e20468, 0x53102b, 0x54400001, 0x3c168000,
27370x126001cb, 0x3c0e001f, 0x35ceffff, 0x3c0ffff5,
27380x35ef1000, 0x241e0040, 0x8ee2724c, 0x8f430280,
27390x24420001, 0x304207ff, 0x1062019e, 0x0,
27400x12c00012, 0x0, 0x8ee35240, 0x8ee25244,
27410x1062000a, 0x26f85244, 0x8ef45244, 0xafb80024,
27420x8ee35244, 0x21140, 0x24425248, 0x2e28821,
27430x24630001, 0x8003a85, 0x306d00ff, 0x8ee201e0,
27440x24420001, 0xaee201e0, 0x8ee201e0, 0x8ee30e10,
27450x8ee20e18, 0x1062ffca, 0x26f80e18, 0x8ef40e18,
27460xb021, 0xafb80024, 0x8ee30e18, 0x21140,
27470x24420e20, 0x2e28821, 0x24630001, 0x306d01ff,
27480x96e2046a, 0x30420010, 0x10400018, 0x34028100,
27490x9643000c, 0x14620015, 0x0, 0x3c020001,
27500x571021, 0x904283c0, 0x14400010, 0x0,
27510x9642000e, 0xa6220016, 0x8e420008, 0x8e430004,
27520x8e440000, 0x2673fffc, 0xae42000c, 0xae430008,
27530xae440004, 0x9622000e, 0x26100004, 0x24180001,
27540xa3b8002f, 0x34420200, 0xa622000e, 0x8e220000,
27550x8e230004, 0x3c040001, 0x34843800, 0x2003021,
27560x306a0007, 0x20a8023, 0x3641021, 0x202102b,
27570x10400005, 0x26a9821, 0x2041023, 0x3621823,
27580x3c020020, 0x438023, 0x26620007, 0x9623000a,
27590x2418fff8, 0x58c824, 0x6a1821, 0x79102b,
27600x10400002, 0x3206021, 0x606021, 0x1801821,
27610x24620007, 0x2418fff8, 0x586024, 0x26c102b,
27620x14400004, 0x1932823, 0x1832823, 0x8003ac3,
27630xc31021, 0xd31021, 0x4a2023, 0x1c4102b,
27640x54400001, 0x8f2021, 0x25420040, 0x4c102b,
27650x14400035, 0x5821, 0x94c3000c, 0x24020800,
27660x54620032, 0xae260018, 0x3c020001, 0x571021,
27670x904283c0, 0x5440002d, 0xae260018, 0x24c20017,
27680x1c2102b, 0x10400013, 0x0, 0x3c02fff5,
27690x461021, 0x90421017, 0x38430006, 0x2c630001,
27700x38420011, 0x2c420001, 0x621825, 0x10600014,
27710x24c20010, 0x1c2102b, 0x1040000e, 0x0,
27720x3c0bfff5, 0x1665821, 0x956b1010, 0x8003af4,
27730x2562000e, 0x90c20017, 0x38430006, 0x2c630001,
27740x38420011, 0x2c420001, 0x621825, 0x10600005,
27750x1601821, 0x94cb0010, 0x2562000e, 0x4a5821,
27760x1601821, 0x24620007, 0x2418fff8, 0x585824,
27770xc31021, 0x4a2023, 0x1c4102b, 0x10400002,
27780x1632823, 0x8f2021, 0xae260018, 0x3c020001,
27790x571021, 0x904283c0, 0x2102b, 0x216c0,
27800x15600002, 0xafa20044, 0x1805821, 0x30820001,
27810x10400007, 0x4021, 0x90880000, 0x24840001,
27820x1c4102b, 0x10400002, 0x24a5ffff, 0x8f2021,
27830x50a00012, 0x81c02, 0x2ca20002, 0x54400009,
27840x24a5ffff, 0x94820000, 0x24840002, 0x1024021,
27850x1c4102b, 0x10400006, 0x24a5fffe, 0x8003b21,
27860x8f2021, 0x90820000, 0x21200, 0x1024021,
27870x14a0fff2, 0x2ca20002, 0x81c02, 0x3102ffff,
27880x624021, 0x3108ffff, 0x1402821, 0x11400011,
27890x2002021, 0x2ca20002, 0x54400009, 0x24a5ffff,
27900x94820000, 0x24840002, 0x1024021, 0x1c4102b,
27910x10400006, 0x24a5fffe, 0x8003b38, 0x8f2021,
27920x90820000, 0x21200, 0x1024021, 0x14a0fff2,
27930x2ca20002, 0x81c02, 0x3102ffff, 0x624021,
27940x81c02, 0x3102ffff, 0x8f890120, 0x624021,
27950x27623800, 0x25230020, 0x62102b, 0x14400002,
27960x3108ffff, 0x27633000, 0x8f820128, 0x10620004,
27970x0, 0x8f820124, 0x14620007, 0x1402821,
27980x8ee201a4, 0x3821, 0x24420001, 0xaee201a4,
27990x8003bc9, 0x8ee201a4, 0x8e260000, 0x8e270004,
28000x81400, 0x3448000b, 0xad300008, 0xa52b000e,
28010xad280018, 0x8fb80044, 0x2021, 0x2961025,
28020x581025, 0xad22001c, 0xe5102b, 0xe53823,
28030xc43023, 0xc23023, 0xad260000, 0xad270004,
28040x8ee204c0, 0xad220010, 0xaf830120, 0x92e24e20,
28050x1440005f, 0x24070001, 0x2502ffee, 0x2c420002,
28060x14400003, 0x24020011, 0x15020024, 0x0,
28070x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
28080x8c830000, 0x24020012, 0x1462000f, 0x0,
28090x8ee34e30, 0x8ee24e34, 0x1062000b, 0x0,
28100x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
28110x8ee34e30, 0x24420001, 0x105e002a, 0x0,
28120x8003ba8, 0x0, 0x8ee24e30, 0x24420001,
28130x505e0003, 0x1021, 0x8ee24e30, 0x24420001,
28140xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
28150x2e22021, 0x8003bc6, 0x24020012, 0x8ee24e30,
28160x210c0, 0x24425038, 0x2e22021, 0x8c830000,
28170x24020007, 0x1462001f, 0x0, 0x8ee34e30,
28180x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
28190x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
28200x24420001, 0x105e0007, 0x0, 0x8ee24e34,
28210x24420001, 0x10620005, 0x0, 0x8003bb4,
28220x0, 0x14600005, 0x0, 0x8f820128,
28230x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
28240x2c420011, 0x50400012, 0xac800000, 0x8003bc9,
28250x0, 0x8ee24e30, 0x24420001, 0x505e0003,
28260x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
28270x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
28280x24020007, 0xac820000, 0x24020001, 0xac820004,
28290x14e00019, 0x3c050006, 0x3c040001, 0x24845890,
28300x8e220018, 0x34a5f209, 0xafa20010, 0x8e220000,
28310x8e230004, 0x2203021, 0x1603821, 0xc002403,
28320xafa30014, 0x93a2002f, 0x1040002a, 0x34028100,
28330x8e430004, 0x8e440008, 0x8e45000c, 0xa642000c,
28340xae430000, 0xae440004, 0xae450008, 0x96220016,
28350x8003c02, 0xa642000e, 0x1599000a, 0x26a1823,
28360x9622000e, 0xa623000a, 0x34420004, 0xa622000e,
28370x3c010001, 0x370821, 0xa02083c0, 0x8003bff,
28380x9821, 0x9624000a, 0x83102b, 0x54400001,
28390x801821, 0x24020001, 0xa623000a, 0x3c010001,
28400x370821, 0xa02283c0, 0x9622000a, 0x4a1821,
28410x2038021, 0x1d0102b, 0x54400001, 0x20f8021,
28420x2639823, 0xb021, 0x8fb80024, 0x1660fe5e,
28430xaf0d0000, 0x12600022, 0x0, 0x3c010001,
28440x370821, 0xac3383c4, 0x3c010001, 0x370821,
28450xac3083c8, 0x3c010001, 0x370821, 0xac3283cc,
28460x93a2002f, 0x10400008, 0x0, 0x3c020001,
28470x571021, 0x8c4283cc, 0x24420004, 0x3c010001,
28480x370821, 0xac2283cc, 0x8f430280, 0x8ee2724c,
28490x14620006, 0x0, 0x8ee201c4, 0x24420001,
28500xaee201c4, 0x8003c5b, 0x8ee201c4, 0x8ee201bc,
28510x24420001, 0xaee201bc, 0x8003c5b, 0x8ee201bc,
28520x97a4001e, 0x2484fffc, 0x801821, 0x8ee400c0,
28530x8ee500c4, 0x1021, 0xa32821, 0xa3302b,
28540x822021, 0x862021, 0x24020002, 0xaee400c0,
28550xaee500c4, 0x12a2000f, 0x2aa20003, 0x14400017,
28560x24020003, 0x16a20015, 0x0, 0x8ee200d0,
28570x8ee300d4, 0x24630001, 0x2c640001, 0x441021,
28580xaee200d0, 0xaee300d4, 0x8ee200d0, 0x8003c55,
28590x8ee300d4, 0x8ee200d8, 0x8ee300dc, 0x24630001,
28600x2c640001, 0x441021, 0xaee200d8, 0xaee300dc,
28610x8ee200d8, 0x8003c55, 0x8ee300dc, 0x8ee200c8,
28620x8ee300cc, 0x24630001, 0x2c640001, 0x441021,
28630xaee200c8, 0xaee300cc, 0x8ee200c8, 0x8ee300cc,
28640x8f8300e4, 0x8f8200e0, 0x10620003, 0x24630008,
28650xaf8300e4, 0xaf8300e8, 0x8fbf0068, 0x8fbe0064,
28660x8fb60060, 0x8fb5005c, 0x8fb40058, 0x8fb30054,
28670x8fb20050, 0x8fb1004c, 0x8fb00048, 0x3e00008,
28680x27bd0070, 0x27bdffe0, 0xafbf0018, 0x8ee30e14,
28690x8ee20e0c, 0x10620074, 0x0, 0x8ee30e0c,
28700x8ee20e14, 0x622023, 0x4820001, 0x24840200,
28710x8ee30e18, 0x8ee20e14, 0x43102b, 0x14400004,
28720x24020200, 0x8ee30e14, 0x8003c7d, 0x431823,
28730x8ee20e18, 0x8ee30e14, 0x431023, 0x2443ffff,
28740x804821, 0x69102a, 0x54400001, 0x604821,
28750x8f870100, 0x27623000, 0x24e80020, 0x102102b,
28760x50400001, 0x27682800, 0x8f820108, 0x11020004,
28770x0, 0x8f820104, 0x15020007, 0x1021,
28780x8ee201a8, 0x2021, 0x24420001, 0xaee201a8,
28790x8003cbf, 0x8ee201a8, 0x8ee40e14, 0x42140,
28800x801821, 0x8ee40460, 0x8ee50464, 0xa32821,
28810xa3302b, 0x822021, 0x862021, 0xace40000,
28820xace50004, 0x8ee30e14, 0x91140, 0xa4e2000e,
28830x24020002, 0xace20018, 0x31940, 0x24630e20,
28840x2e31021, 0xace20008, 0x8ee20e14, 0xace2001c,
28850x8ee204cc, 0xace20010, 0xaf880100, 0x92e204ec,
28860x14400011, 0x24040001, 0x8ee24e28, 0x24030040,
28870x24420001, 0x50430003, 0x1021, 0x8ee24e28,
28880x24420001, 0xaee24e28, 0x8ee24e28, 0x210c0,
28890x24424e38, 0x2e21821, 0x24020002, 0xac620000,
28900x24020001, 0xac620004, 0x1480000e, 0x24030040,
28910x8ee20e14, 0xafa20010, 0x8ee20e18, 0x3c050007,
28920xafa20014, 0x8ee60e0c, 0x8ee70e10, 0x3c040001,
28930x248458d4, 0xc002403, 0x34a5f001, 0x8003cdd,
28940x0, 0x8ee20500, 0x24420001, 0x50430003,
28950x1021, 0x8ee20500, 0x24420001, 0xaee20500,
28960x8ee20500, 0x21080, 0x571021, 0xac490508,
28970x8ee20e14, 0x491021, 0x304201ff, 0xaee20e14,
28980x8ee30e14, 0x8ee20e0c, 0x14620005, 0x0,
28990x8f820060, 0x2403fdff, 0x431024, 0xaf820060,
29000x8fbf0018, 0x3e00008, 0x27bd0020, 0x27bdffe0,
29010xafbf0018, 0x8ee3523c, 0x8ee25238, 0x10620074,
29020x0, 0x8ee35238, 0x8ee2523c, 0x622023,
29030x4820001, 0x24840100, 0x8ee35244, 0x8ee2523c,
29040x43102b, 0x14400004, 0x24020100, 0x8ee3523c,
29050x8003cff, 0x431823, 0x8ee25244, 0x8ee3523c,
29060x431023, 0x2443ffff, 0x804821, 0x69102a,
29070x54400001, 0x604821, 0x8f870100, 0x27623000,
29080x24e80020, 0x102102b, 0x50400001, 0x27682800,
29090x8f820108, 0x11020004, 0x0, 0x8f820104,
29100x15020007, 0x1021, 0x8ee201a8, 0x2021,
29110x24420001, 0xaee201a8, 0x8003d41, 0x8ee201a8,
29120x8ee4523c, 0x42140, 0x801821, 0x8ee40470,
29130x8ee50474, 0xa32821, 0xa3302b, 0x822021,
29140x862021, 0xace40000, 0xace50004, 0x8ee3523c,
29150x91140, 0xa4e2000e, 0x24020003, 0xace20018,
29160x31940, 0x24635248, 0x2e31021, 0xace20008,
29170x8ee2523c, 0xace2001c, 0x8ee204cc, 0xace20010,
29180xaf880100, 0x92e204ec, 0x14400011, 0x24040001,
29190x8ee24e28, 0x24030040, 0x24420001, 0x50430003,
29200x1021, 0x8ee24e28, 0x24420001, 0xaee24e28,
29210x8ee24e28, 0x210c0, 0x24424e38, 0x2e21821,
29220x24020003, 0xac620000, 0x24020001, 0xac620004,
29230x1480000e, 0x24030040, 0x8ee2523c, 0xafa20010,
29240x8ee25244, 0x3c050007, 0xafa20014, 0x8ee65238,
29250x8ee75240, 0x3c040001, 0x248458e0, 0xc002403,
29260x34a5f010, 0x8003d5f, 0x0, 0x8ee20500,
29270x24420001, 0x50430003, 0x1021, 0x8ee20500,
29280x24420001, 0xaee20500, 0x8ee20500, 0x21080,
29290x571021, 0xac490508, 0x8ee2523c, 0x491021,
29300x304200ff, 0xaee2523c, 0x8ee3523c, 0x8ee25238,
29310x14620005, 0x0, 0x8f820060, 0x2403feff,
29320x431024, 0xaf820060, 0x8fbf0018, 0x3e00008,
29330x27bd0020, 0x8f820120, 0x8ee34e34, 0x8f820124,
29340x8f860128, 0x24020040, 0x24630001, 0x50620003,
29350x1021, 0x8ee24e34, 0x24420001, 0xaee24e34,
29360x8ee24e34, 0x8ee44e34, 0x8ee34e30, 0x210c0,
29370x24425038, 0x14830007, 0x2e22821, 0x8f820128,
29380x24420020, 0xaf820128, 0x8f820128, 0x8003d92,
29390xaca00000, 0x8ee24e34, 0x24030040, 0x24420001,
29400x50430003, 0x1021, 0x8ee24e34, 0x24420001,
29410x210c0, 0x24425038, 0x2e22821, 0x8ca20004,
29420x8f830128, 0x21140, 0x621821, 0xaf830128,
29430xaca00000, 0x8cc20018, 0x2443fffe, 0x2c620012,
29440x10400008, 0x31080, 0x3c010001, 0x220821,
29450x8c2258f0, 0x400008, 0x0, 0x24020001,
29460xaee24e24, 0x3e00008, 0x0, 0x27bdffc8,
29470xafbf0030, 0xafb5002c, 0xafb40028, 0xafb30024,
29480xafb20020, 0xafb1001c, 0xafb00018, 0x8f830128,
29490x8f820124, 0x106202b0, 0x9821, 0x3c11001f,
29500x3631ffff, 0x3c12fff5, 0x36521000, 0x24150012,
29510x24140040, 0x8f8c0128, 0x8f820128, 0x24420020,
29520xaf820128, 0x9182001b, 0x8f830128, 0x2443fffe,
29530x2c620012, 0x1040029c, 0x31080, 0x3c010001,
29540x220821, 0x8c225948, 0x400008, 0x0,
29550x8f420218, 0x30420100, 0x10400007, 0x0,
29560x95830016, 0x95820018, 0x621823, 0x31402,
29570x431021, 0xa5820016, 0x8d82001c, 0x3c038000,
29580x3044ffff, 0x436824, 0x3c030800, 0x431824,
29590x11a00004, 0xad84001c, 0x41140, 0x8003dd8,
29600x24425248, 0x41140, 0x24420e20, 0x2e25821,
29610x9562000e, 0x3042fffc, 0x10600004, 0xa562000e,
29620x95840016, 0x8003ec0, 0x0, 0x8d690018,
29630x4021, 0x952a0000, 0x25290002, 0x95270000,
29640x25290002, 0x95260000, 0x25290002, 0x95250000,
29650x25290002, 0x95240000, 0x25290002, 0x95230000,
29660x25290002, 0x95220000, 0x25290002, 0x1475021,
29670x1465021, 0x1455021, 0x1445021, 0x1435021,
29680x1425021, 0xa1c02, 0x3142ffff, 0x625021,
29690xa1c02, 0x3142ffff, 0x625021, 0x96e2046a,
29700x314effff, 0x30420002, 0x10400044, 0x5021,
29710x25220014, 0x222102b, 0x10400014, 0x1201821,
29720x2405000a, 0x2021, 0x223102b, 0x54400001,
29730x721821, 0x94620000, 0x24630002, 0x24a5ffff,
29740x14a0fff9, 0x822021, 0x41c02, 0x3082ffff,
29750x622021, 0x41402, 0x3083ffff, 0x431021,
29760x3042ffff, 0x8003e33, 0x1425021, 0x952a0000,
29770x25290002, 0x95280000, 0x25290002, 0x95270000,
29780x25290002, 0x95260000, 0x25290002, 0x95250000,
29790x25290002, 0x95230000, 0x25290002, 0x95220000,
29800x25290002, 0x95240000, 0x25290002, 0x1485021,
29810x1475021, 0x1465021, 0x1455021, 0x1435021,
29820x1425021, 0x95220000, 0x95230002, 0x1445021,
29830x1425021, 0x1435021, 0xa1c02, 0x3142ffff,
29840x625021, 0xa1c02, 0x3142ffff, 0x625021,
29850x3148ffff, 0x51000001, 0x3408ffff, 0x8d620018,
29860x9443000c, 0x24020800, 0x54620005, 0xa5680010,
29870x9562000e, 0x34420002, 0xa562000e, 0xa5680010,
29880x96e2046a, 0x2821, 0x30420008, 0x14400056,
29890x3021, 0x8d630018, 0x24620024, 0x222102b,
29900x10400034, 0x24690010, 0x229102b, 0x54400001,
29910x1324821, 0x95250000, 0x24690014, 0x229102b,
29920x10400002, 0x24a5ffec, 0x1324821, 0x95220000,
29930x30420fff, 0x14400003, 0x25290002, 0x8003e60,
29940x24130001, 0x9821, 0xa03021, 0x229102b,
29950x54400001, 0x1324821, 0x91220001, 0x25290002,
29960xa22821, 0x229102b, 0x54400001, 0x1324821,
29970x25290002, 0x229102b, 0x54400001, 0x1324821,
29980x95220000, 0x25290002, 0xa22821, 0x229102b,
29990x54400001, 0x1324821, 0x95220000, 0x25290002,
30000xa22821, 0x229102b, 0x54400001, 0x1324821,
30010x95220000, 0x25290002, 0xa22821, 0x229102b,
30020x54400001, 0x1324821, 0x95220000, 0x8003e99,
30030xa22821, 0x94650010, 0x94620014, 0x24690016,
30040x30420fff, 0x14400003, 0x24a5ffec, 0x8003e8c,
30050x24130001, 0x9821, 0xa03021, 0x91230001,
30060x25290004, 0x95220000, 0x25290002, 0x95240000,
30070x25290002, 0xa32821, 0xa22821, 0x95220000,
30080x95230002, 0xa42821, 0xa22821, 0xa32821,
30090x51c02, 0x30a2ffff, 0x622821, 0x51c02,
30100x30a2ffff, 0x622821, 0x96e2046a, 0x30420001,
30110x1040001e, 0x2021, 0x95820016, 0x4e2023,
30120x41402, 0x822021, 0x326200ff, 0x50400002,
30130x862021, 0x852021, 0x41402, 0x822021,
30140x3084ffff, 0x50800001, 0x3404ffff, 0x8d620018,
30150x24430017, 0x223102b, 0x54400001, 0x721821,
30160x90620000, 0x38430011, 0x2c630001, 0x38420006,
30170x2c420001, 0x621825, 0x10600004, 0x0,
30180x9562000e, 0x34420001, 0xa562000e, 0x9562000e,
30190x240a0002, 0x30420004, 0x10400002, 0xa5640012,
30200x240a0004, 0x8f880120, 0x27623800, 0x25090020,
30210x122102b, 0x50400001, 0x27693000, 0x8f820128,
30220x11220004, 0x0, 0x8f820124, 0x15220007,
30230x24040020, 0x8ee201a4, 0x8021, 0x24420001,
30240xaee201a4, 0x8003f4f, 0x8ee201a4, 0x8ee5724c,
30250x8ee60490, 0x8ee70494, 0xad0b0008, 0xa504000e,
30260xad0a0018, 0x52940, 0xa01821, 0x1021,
30270xe33821, 0xe3202b, 0xc23021, 0xc43021,
30280xad060000, 0xad070004, 0x8ee2724c, 0x4d1025,
30290xad02001c, 0x8ee204c4, 0xad020010, 0xaf890120,
30300x92e24e20, 0x14400060, 0x24100001, 0x2543ffee,
30310x2c630002, 0x39420011, 0x2c420001, 0x621825,
30320x10600024, 0x0, 0x8ee24e30, 0x210c0,
30330x24425038, 0x2e22021, 0x8c820000, 0x1455000f,
30340x0, 0x8ee34e30, 0x8ee24e34, 0x1062000b,
30350x0, 0x8c820004, 0x24420001, 0xac820004,
30360x8ee24e34, 0x8ee34e30, 0x24420001, 0x1054002b,
30370x0, 0x8003f2e, 0x0, 0x8ee24e30,
30380x24420001, 0x50540003, 0x1021, 0x8ee24e30,
30390x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
30400x24425038, 0x2e22021, 0x24020001, 0x8003f4e,
30410xac950000, 0x8ee24e30, 0x210c0, 0x24425038,
30420x2e22021, 0x8c830000, 0x24020007, 0x1462001f,
30430x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
30440x0, 0x8c820004, 0x24420001, 0xac820004,
30450x8ee24e34, 0x8ee34e30, 0x24420001, 0x10540007,
30460x0, 0x8ee24e34, 0x24420001, 0x10620005,
30470x0, 0x8003f3a, 0x0, 0x14600005,
30480x0, 0x8f820128, 0x24420020, 0xaf820128,
30490x8f820128, 0x8c820004, 0x2c420011, 0x50400012,
30500xac800000, 0x8003f4f, 0x0, 0x8ee24e30,
30510x24420001, 0x50540003, 0x1021, 0x8ee24e30,
30520x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
30530x24425038, 0x2e22021, 0x24020007, 0xac820000,
30540x24020001, 0xac820004, 0x1600000d, 0x0,
30550x8f820120, 0x3c040001, 0x24845938, 0xafa00014,
30560xafa20010, 0x8d86001c, 0x8f870124, 0x3c050008,
30570xc002403, 0x34a50001, 0x8004057, 0x0,
30580x8ee2724c, 0x24420001, 0x304207ff, 0x11a00006,
30590xaee2724c, 0x8ee201d0, 0x2442ffff, 0xaee201d0,
30600x8003f6b, 0x8ee201d0, 0x8ee201cc, 0x2442ffff,
30610xaee201cc, 0x8ee201cc, 0x8ee201d8, 0x2442ffff,
30620xaee201d8, 0x8004057, 0x8ee201d8, 0x8f420240,
30630x104000e5, 0x0, 0x8ee20e1c, 0x24420001,
30640x8004057, 0xaee20e1c, 0x9582001e, 0xad82001c,
30650x8f420240, 0x10400072, 0x0, 0x8ee20e1c,
30660x24420001, 0xaee20e1c, 0x8f430240, 0x43102b,
30670x144000d5, 0x0, 0x8f830120, 0x27623800,
30680x24660020, 0xc2102b, 0x50400001, 0x27663000,
30690x8f820128, 0x10c20004, 0x0, 0x8f820124,
30700x14c20007, 0x0, 0x8ee201a4, 0x8021,
30710x24420001, 0xaee201a4, 0x8003fda, 0x8ee201a4,
30720x8ee2724c, 0xac62001c, 0x8ee404a8, 0x8ee504ac,
30730x2462001c, 0xac620008, 0x24020008, 0xa462000e,
30740x24020011, 0xac620018, 0xac640000, 0xac650004,
30750x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
30760x14400034, 0x24100001, 0x8ee24e30, 0x210c0,
30770x24425038, 0x2e22021, 0x8c820000, 0x1455001f,
30780x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
30790x0, 0x8c820004, 0x24420001, 0xac820004,
30800x8ee24e34, 0x8ee34e30, 0x24420001, 0x10540007,
30810x0, 0x8ee24e34, 0x24420001, 0x10620005,
30820x0, 0x8003fc6, 0x0, 0x14600005,
30830x0, 0x8f820128, 0x24420020, 0xaf820128,
30840x8f820128, 0x8c820004, 0x2c420011, 0x50400011,
30850xac800000, 0x8003fda, 0x0, 0x8ee24e30,
30860x24420001, 0x50540003, 0x1021, 0x8ee24e30,
30870x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
30880x24425038, 0x2e22021, 0x24020001, 0xac950000,
30890xac820004, 0x5600000b, 0x24100001, 0x8ee2724c,
30900x3c040001, 0x248458a8, 0xafa00014, 0xafa20010,
30910x8ee6724c, 0x8f470280, 0x3c050009, 0xc002403,
30920x34a5f008, 0x56000001, 0xaee00e1c, 0x8ee20188,
30930x24420001, 0xaee20188, 0x8004050, 0x8ee20188,
30940x8f830120, 0x27623800, 0x24660020, 0xc2102b,
30950x50400001, 0x27663000, 0x8f820128, 0x10c20004,
30960x0, 0x8f820124, 0x14c20007, 0x0,
30970x8ee201a4, 0x8021, 0x24420001, 0xaee201a4,
30980x8004044, 0x8ee201a4, 0x8ee2724c, 0xac62001c,
30990x8ee404a8, 0x8ee504ac, 0x2462001c, 0xac620008,
31000x24020008, 0xa462000e, 0x24020011, 0xac620018,
31010xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
31020xaf860120, 0x92e24e20, 0x14400034, 0x24100001,
31030x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
31040x8c820000, 0x1455001f, 0x0, 0x8ee34e30,
31050x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
31060x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
31070x24420001, 0x10540007, 0x0, 0x8ee24e34,
31080x24420001, 0x10620005, 0x0, 0x8004030,
31090x0, 0x14600005, 0x0, 0x8f820128,
31100x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
31110x2c420011, 0x50400011, 0xac800000, 0x8004044,
31120x0, 0x8ee24e30, 0x24420001, 0x50540003,
31130x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
31140x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
31150x24020001, 0xac950000, 0xac820004, 0x1600000b,
31160x0, 0x8ee2724c, 0x3c040001, 0x248458a8,
31170xafa00014, 0xafa20010, 0x8ee6724c, 0x8f470280,
31180x3c050009, 0xc002403, 0x34a5f008, 0x8ee20174,
31190x24420001, 0xaee20174, 0x8004057, 0x8ee20174,
31200x24020001, 0xaee24e24, 0x8f830128, 0x8f820124,
31210x1462fd58, 0x0, 0x8fbf0030, 0x8fb5002c,
31220x8fb40028, 0x8fb30024, 0x8fb20020, 0x8fb1001c,
31230x8fb00018, 0x3e00008, 0x27bd0038, 0x27bdffe8,
31240x27840208, 0x27450200, 0x24060008, 0xafbf0014,
31250xc00249a, 0xafb00010, 0x2021, 0x24100001,
31260x2402241f, 0xaf900210, 0xaf900200, 0xaf800204,
31270xaf820214, 0x8f460248, 0x24030004, 0x3c020040,
31280x3c010001, 0xac235cc4, 0x3c010001, 0xac235cc8,
31290x3c010001, 0xac205d9c, 0x3c010001, 0xac225cc0,
31300x3c010001, 0xac235cc8, 0xc005108, 0x24050004,
31310xc004822, 0x0, 0x8ee20000, 0x3c03feff,
31320x3463fffd, 0x431024, 0xaee20000, 0x3c023c00,
31330xaf82021c, 0x3c010001, 0x370821, 0xac3083ac,
31340x8fbf0014, 0x8fb00010, 0x3e00008, 0x27bd0018,
31350x27bdffe0, 0x3c050008, 0x34a50400, 0xafbf0018,
31360xafa00010, 0xafa00014, 0x8f860200, 0x3c040001,
31370x248459f0, 0xc002403, 0x3821, 0x8ee20280,
31380x24420001, 0xaee20280, 0x8ee20280, 0x8f830200,
31390x3c023f00, 0x621824, 0x8fbf0018, 0x3c020400,
31400x3e00008, 0x27bd0020, 0x27bdffd8, 0xafbf0020,
31410xafb1001c, 0xafb00018, 0x8f900220, 0x8ee20214,
31420x3821, 0x24420001, 0xaee20214, 0x8ee20214,
31430x3c020300, 0x2021024, 0x10400027, 0x3c110400,
31440xc00429b, 0x0, 0x3c020100, 0x2021024,
31450x10400007, 0x0, 0x8ee20218, 0x24420001,
31460xaee20218, 0x8ee20218, 0x80040c6, 0x3c03fdff,
31470x8ee2021c, 0x24420001, 0xaee2021c, 0x8ee2021c,
31480x3c03fdff, 0x3463ffff, 0x3c0808ff, 0x3508ffff,
31490x8ee20000, 0x3c040001, 0x248459fc, 0x3c050008,
31500x2003021, 0x431024, 0xaee20000, 0x8f820220,
31510x3821, 0x3c030300, 0x481024, 0x431025,
31520xaf820220, 0xafa00010, 0xc002403, 0xafa00014,
31530x8004296, 0x0, 0x2111024, 0x1040001f,
31540x3c024000, 0x8f830224, 0x24021402, 0x1462000b,
31550x3c03fdff, 0x3c040001, 0x24845a08, 0x3c050008,
31560xafa00010, 0xafa00014, 0x8f860224, 0x34a5ffff,
31570xc002403, 0x3821, 0x3c03fdff, 0x8ee20000,
31580x3463ffff, 0x2002021, 0x431024, 0xc004e54,
31590xaee20000, 0x8ee20220, 0x24420001, 0xaee20220,
31600x8ee20220, 0x8f820220, 0x3c0308ff, 0x3463ffff,
31610x431024, 0x8004295, 0x511025, 0x2021024,
31620x10400142, 0x0, 0x8ee2022c, 0x24420001,
31630xaee2022c, 0x8ee2022c, 0x8f820220, 0x3c0308ff,
31640x3463ffff, 0x431024, 0x34420004, 0xaf820220,
31650x8f830054, 0x8f820054, 0x800410e, 0x24630002,
31660x8f820054, 0x621023, 0x2c420003, 0x1440fffc,
31670x0, 0x8f8600e0, 0x8f8400e4, 0x30c20007,
31680x10400012, 0x0, 0x8f8300e4, 0x2402fff8,
31690xc21024, 0x1043000d, 0x0, 0x8f820054,
31700x8f8300e0, 0x14c30009, 0x24440050, 0x8f820054,
31710x821023, 0x2c420051, 0x10400004, 0x0,
31720x8f8200e0, 0x10c2fff9, 0x0, 0x8f820220,
31730x3c0308ff, 0x3463fffd, 0x431024, 0xaf820220,
31740x8f8600e0, 0x30c20007, 0x10400003, 0x2402fff8,
31750xc23024, 0xaf8600e0, 0x8f8300c4, 0x3c02001f,
31760x3442ffff, 0x24680008, 0x48102b, 0x10400003,
31770x3c02fff5, 0x34421000, 0x1024021, 0x8f8b00c8,
31780x8f850120, 0x8f840124, 0x8004145, 0x6021,
31790x27623800, 0x82102b, 0x50400001, 0x27643000,
31800x10a40010, 0x318200ff, 0x8c820018, 0x38430007,
31810x2c630001, 0x3842000b, 0x2c420001, 0x621825,
31820x5060fff3, 0x24840020, 0x8ee20240, 0x240c0001,
31830x24420001, 0xaee20240, 0x8ee20240, 0x8c8b0008,
31840x318200ff, 0x14400065, 0x0, 0x3c020001,
31850x571021, 0x904283c0, 0x14400060, 0x0,
31860x8f8400e4, 0xc41023, 0x218c3, 0x4620001,
31870x24630200, 0x8f8900c4, 0x10600005, 0x24020001,
31880x10620009, 0x0, 0x8004187, 0x0,
31890x8ee20230, 0x1205821, 0x24420001, 0xaee20230,
31900x80041bc, 0x8ee20230, 0x8ee20234, 0x3c05000a,
31910x24420001, 0xaee20234, 0x8c8b0000, 0x34a5f000,
31920x8ee20234, 0x12b1823, 0xa3102b, 0x54400001,
31930x651821, 0x2c62233f, 0x14400040, 0x0,
31940x8f8200e8, 0x24420008, 0xaf8200e8, 0x8f8200e8,
31950x8f8200e4, 0x1205821, 0x24420008, 0xaf8200e4,
31960x80041bc, 0x8f8200e4, 0x8ee20238, 0x3c03000a,
31970x24420001, 0xaee20238, 0x8c840000, 0x3463f000,
31980x8ee20238, 0x883823, 0x67102b, 0x54400001,
31990xe33821, 0x3c020003, 0x34420d40, 0x47102b,
32000x10400003, 0x0, 0x80041bc, 0x805821,
32010x8f8200e4, 0x24440008, 0xaf8400e4, 0x8f8400e4,
32020x10860018, 0x3c05000a, 0x34a5f000, 0x3c0a0003,
32030x354a0d40, 0x8ee2007c, 0x24420001, 0xaee2007c,
32040x8c830000, 0x8ee2007c, 0x683823, 0xa7102b,
32050x54400001, 0xe53821, 0x147102b, 0x54400007,
32060x605821, 0x8f8200e4, 0x24440008, 0xaf8400e4,
32070x8f8400e4, 0x1486ffef, 0x0, 0x14860005,
32080x0, 0x1205821, 0xaf8600e4, 0x80041bc,
32090xaf8600e8, 0xaf8400e4, 0xaf8400e8, 0x8f8200c8,
32100x3c03000a, 0x3463f000, 0x483823, 0x67102b,
32110x54400001, 0xe33821, 0x3c020003, 0x34420d3f,
32120x47102b, 0x54400007, 0x6021, 0x1683823,
32130x67102b, 0x54400003, 0xe33821, 0x80041cf,
32140x3c020003, 0x3c020003, 0x34420d3f, 0x47102b,
32150x14400016, 0x318200ff, 0x14400006, 0x0,
32160x3c020001, 0x571021, 0x904283c0, 0x1040000f,
32170x0, 0x8ee2023c, 0x3c04fdff, 0x8ee30000,
32180x3484ffff, 0x24420001, 0xaee2023c, 0x8ee2023c,
32190x24020001, 0x641824, 0x3c010001, 0x370821,
32200xa02283b8, 0x800422c, 0xaee30000, 0xaf8b00c8,
32210x8f8300c8, 0x8f8200c4, 0x3c04000a, 0x3484f000,
32220x623823, 0x87102b, 0x54400001, 0xe43821,
32230x3c020003, 0x34420d40, 0x47102b, 0x2ce30001,
32240x431025, 0x10400008, 0x0, 0x8f820220,
32250x3c0308ff, 0x3463ffff, 0x431024, 0x3c034000,
32260x431025, 0xaf820220, 0x8f8600e0, 0x8f8400e4,
32270x10c4002a, 0x0, 0x8ee2007c, 0x24420001,
32280xaee2007c, 0x8ee2007c, 0x24c2fff8, 0xaf8200e0,
32290x3c020001, 0x8c427e30, 0x3c030008, 0x8f8600e0,
32300x431024, 0x1040001d, 0x0, 0x10c4001b,
32310x240dfff8, 0x3c0a000a, 0x354af000, 0x3c0c0080,
32320x24850008, 0x27622800, 0x50a20001, 0x27651800,
32330x8c880004, 0x8c820000, 0x8ca90000, 0x3103ffff,
32340x431021, 0x4d1024, 0x24430010, 0x6b102b,
32350x54400001, 0x6a1821, 0x12b102b, 0x54400001,
32360x12a4821, 0x10690002, 0x10c1025, 0xac820004,
32370xa02021, 0x14c4ffeb, 0x24850008, 0x8f820220,
32380x3c0308ff, 0x3463ffff, 0x431024, 0x34420002,
32390xaf820220, 0x8f830054, 0x8f820054, 0x8004237,
32400x24630001, 0x8f820054, 0x621023, 0x2c420002,
32410x1440fffc, 0x0, 0x8f820220, 0x3c0308ff,
32420x3463fffb, 0x431024, 0xaf820220, 0x6010055,
32430x0, 0x8ee20228, 0x24420001, 0xaee20228,
32440x8ee20228, 0x8f820220, 0x3c0308ff, 0x3463ffff,
32450x431024, 0x34420004, 0xaf820220, 0x8f830054,
32460x8f820054, 0x8004251, 0x24630002, 0x8f820054,
32470x621023, 0x2c420003, 0x1440fffc, 0x0,
32480x8f8600e0, 0x30c20007, 0x10400012, 0x0,
32490x8f8300e4, 0x2402fff8, 0xc21024, 0x1043000d,
32500x0, 0x8f820054, 0x8f8300e0, 0x14c30009,
32510x24440032, 0x8f820054, 0x821023, 0x2c420033,
32520x10400004, 0x0, 0x8f8200e0, 0x10c2fff9,
32530x0, 0x8f820220, 0x3c0308ff, 0x3463fffd,
32540x431024, 0xaf820220, 0x8f8600e0, 0x30c20007,
32550x10400003, 0x2402fff8, 0xc23024, 0xaf8600e0,
32560x240301f5, 0x8f8200e8, 0x673823, 0x718c0,
32570x431021, 0xaf8200e8, 0x8f8200e8, 0xaf8200e4,
32580x8ee2007c, 0x3c0408ff, 0x3484ffff, 0x471021,
32590xaee2007c, 0x8f820220, 0x3c038000, 0x34630002,
32600x441024, 0x431025, 0xaf820220, 0x8f830054,
32610x8f820054, 0x800428d, 0x24630001, 0x8f820054,
32620x621023, 0x2c420002, 0x1440fffc, 0x0,
32630x8f820220, 0x3c0308ff, 0x3463fffb, 0x431024,
32640xaf820220, 0x8fbf0020, 0x8fb1001c, 0x8fb00018,
32650x3e00008, 0x27bd0028, 0x3c020001, 0x8c425cd8,
32660x27bdffd8, 0x10400012, 0xafbf0020, 0x3c040001,
32670x24845a14, 0x3c050008, 0x24020001, 0x3c010001,
32680x370821, 0xac2283ac, 0xafa00010, 0xafa00014,
32690x8f860220, 0x34a50498, 0x3c010001, 0xac205cd8,
32700x3c010001, 0xac225ccc, 0xc002403, 0x3821,
32710x8f420268, 0x3c037fff, 0x3463ffff, 0x431024,
32720xaf420268, 0x8ee204d0, 0x8ee404d4, 0x2403fffe,
32730x431024, 0x30840002, 0x1080011e, 0xaee204d0,
32740x8ee204d4, 0x2403fffd, 0x431024, 0xaee204d4,
32750x8f820044, 0x3c030600, 0x34632000, 0x34420020,
32760xaf820044, 0xafa30018, 0x8ee20608, 0x8f430228,
32770x24420001, 0x304a00ff, 0x514300fe, 0xafa00010,
32780x8ee20608, 0x210c0, 0x571021, 0x8fa30018,
32790x8fa4001c, 0xac43060c, 0xac440610, 0x8f830054,
32800x8f820054, 0x24690032, 0x1221023, 0x2c420033,
32810x1040006a, 0x5821, 0x24180008, 0x240f000d,
32820x240d0007, 0x240c0040, 0x240e0001, 0x8f870120,
32830x27623800, 0x24e80020, 0x102102b, 0x50400001,
32840x27683000, 0x8f820128, 0x11020004, 0x0,
32850x8f820124, 0x15020007, 0x1021, 0x8ee201a4,
32860x2821, 0x24420001, 0xaee201a4, 0x800433d,
32870x8ee201a4, 0x8ee40608, 0x420c0, 0x801821,
32880x8ee40430, 0x8ee50434, 0xa32821, 0xa3302b,
32890x822021, 0x862021, 0xace40000, 0xace50004,
32900x8ee20608, 0xa4f8000e, 0xacef0018, 0xacea001c,
32910x210c0, 0x2442060c, 0x2e21021, 0xace20008,
32920x8ee204c4, 0xace20010, 0xaf880120, 0x92e24e20,
32930x14400033, 0x24050001, 0x8ee24e30, 0x210c0,
32940x24425038, 0x2e22021, 0x8c820000, 0x144d001f,
32950x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
32960x0, 0x8c820004, 0x24420001, 0xac820004,
32970x8ee24e34, 0x8ee34e30, 0x24420001, 0x104c0007,
32980x0, 0x8ee24e34, 0x24420001, 0x10620005,
32990x0, 0x800432a, 0x0, 0x14600005,
33000x0, 0x8f820128, 0x24420020, 0xaf820128,
33010x8f820128, 0x8c820004, 0x2c420011, 0x50400010,
33020xac800000, 0x800433d, 0x0, 0x8ee24e30,
33030x24420001, 0x504c0003, 0x1021, 0x8ee24e30,
33040x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
33050x24425038, 0x2e22021, 0xac8d0000, 0xac8e0004,
33060x54a00006, 0x240b0001, 0x8f820054, 0x1221023,
33070x2c420033, 0x1440ff9d, 0x0, 0x316300ff,
33080x24020001, 0x54620079, 0xafa00010, 0xaeea0608,
33090x8f830054, 0x8f820054, 0x24690032, 0x1221023,
33100x2c420033, 0x10400061, 0x5821, 0x240d0008,
33110x240c0011, 0x24080012, 0x24070040, 0x240a0001,
33120x8f830120, 0x27623800, 0x24660020, 0xc2102b,
33130x50400001, 0x27663000, 0x8f820128, 0x10c20004,
33140x0, 0x8f820124, 0x14c20007, 0x0,
33150x8ee201a4, 0x2821, 0x24420001, 0xaee201a4,
33160x80043a9, 0x8ee201a4, 0x8ee20608, 0xac62001c,
33170x8ee404a0, 0x8ee504a4, 0x2462001c, 0xac620008,
33180xa46d000e, 0xac6c0018, 0xac640000, 0xac650004,
33190x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
33200x14400033, 0x24050001, 0x8ee24e30, 0x210c0,
33210x24425038, 0x2e22021, 0x8c820000, 0x1448001f,
33220x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
33230x0, 0x8c820004, 0x24420001, 0xac820004,
33240x8ee24e34, 0x8ee34e30, 0x24420001, 0x10470007,
33250x0, 0x8ee24e34, 0x24420001, 0x10620005,
33260x0, 0x8004396, 0x0, 0x14600005,
33270x0, 0x8f820128, 0x24420020, 0xaf820128,
33280x8f820128, 0x8c820004, 0x2c420011, 0x50400010,
33290xac800000, 0x80043a9, 0x0, 0x8ee24e30,
33300x24420001, 0x50470003, 0x1021, 0x8ee24e30,
33310x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
33320x24425038, 0x2e22021, 0xac880000, 0xac8a0004,
33330x54a00006, 0x240b0001, 0x8f820054, 0x1221023,
33340x2c420033, 0x1440ffa6, 0x0, 0x316300ff,
33350x24020001, 0x54620003, 0xafa00010, 0x80043d6,
33360x0, 0x3c040001, 0x24845a20, 0xafa00014,
33370x8f860120, 0x8f870124, 0x3c050009, 0xc002403,
33380x34a5f011, 0x80043d6, 0x0, 0x3c040001,
33390x24845a2c, 0xafa00014, 0x8f860120, 0x8f870124,
33400x3c050009, 0xc002403, 0x34a5f010, 0x80043d6,
33410x0, 0x3c040001, 0x24845a38, 0xafa00014,
33420x8ee60608, 0x8f470228, 0x3c050009, 0xc002403,
33430x34a5f00f, 0x8ee201ac, 0x24420001, 0xaee201ac,
33440x8ee201ac, 0x8ee2015c, 0x24420001, 0xaee2015c,
33450x8ee2015c, 0x8fbf0020, 0x3e00008, 0x27bd0028,
33460x3c020001, 0x8c425cd8, 0x27bdffe0, 0x1440000d,
33470xafbf0018, 0x3c040001, 0x24845a44, 0x3c050008,
33480xafa00010, 0xafa00014, 0x8f860220, 0x34a50499,
33490x24020001, 0x3c010001, 0xac225cd8, 0xc002403,
33500x3821, 0x8ee204d0, 0x3c030001, 0x771821,
33510x946383b2, 0x34420001, 0x10600007, 0xaee204d0,
33520x8f820220, 0x3c0308ff, 0x3463ffff, 0x431024,
33530x34420008, 0xaf820220, 0x2021, 0xc0052a2,
33540x24050004, 0xaf420268, 0x8fbf0018, 0x3e00008,
33550x27bd0020, 0x0, 0x0, 0x0,
33560x0, 0x0, 0x0, 0x0,
33570x0, 0x0, 0x0, 0x0,
33580x0, 0x0, 0x0, 0x0,
33590x0, 0x0, 0x0, 0x0,
33600x0, 0x0, 0x0, 0x0,
33610x0, 0x0, 0x0, 0x0,
33620x0, 0x0, 0x0, 0x0,
33630x0, 0x0, 0x0, 0x0,
33640x0, 0x0, 0x0, 0x0,
33650x0, 0x0, 0x0, 0x0,
33660x0, 0x0, 0x0, 0x0,
33670x0, 0x0, 0x0, 0x0,
33680x0, 0x0, 0x0, 0x0,
33690x0, 0x0, 0x0, 0x0,
33700x0, 0x0, 0x0, 0x3c120001,
33710x26521200, 0x3c140001, 0x8e945c50, 0x3c100001,
33720x26101120, 0x3c15c000, 0x36b50060, 0x8e8a0000,
33730x8eb30000, 0x26a400b, 0x248000a, 0x200f821,
33740x0, 0xd, 0x0, 0x0,
33750x0, 0x0, 0x0, 0x0,
33760x0, 0x0, 0x0, 0x0,
33770x0, 0x0, 0x0, 0x0,
33780x0, 0x0, 0x0, 0x0,
33790x0, 0x0, 0x0, 0x0,
33800x0, 0x0, 0x0, 0x0,
33810x0, 0x0, 0x0, 0x0,
33820x0, 0x0, 0x0, 0x0,
33830x0, 0x0, 0x0, 0x0,
33840x0, 0x0, 0x0, 0x0,
33850x0, 0x0, 0x0, 0x0,
33860x0, 0x0, 0x0, 0x80014d6,
33870x0, 0x80014d8, 0x3c0a0001, 0x80014d8,
33880x3c0a0002, 0x80014d8, 0x0, 0x80024a6,
33890x0, 0x80014d8, 0x3c0a0003, 0x80014d8,
33900x3c0a0004, 0x8002f8c, 0x0, 0x80014d8,
33910x3c0a0005, 0x8003ce8, 0x0, 0x8003c66,
33920x0, 0x80014d8, 0x3c0a0006, 0x80014d8,
33930x3c0a0007, 0x80014d8, 0x0, 0x80014d8,
33940x0, 0x80014d8, 0x0, 0x8002a75,
33950x0, 0x80014d8, 0x3c0a000b, 0x80014d8,
33960x3c0a000c, 0x80014d8, 0x3c0a000d, 0x800237a,
33970x0, 0x8002339, 0x0, 0x80014d8,
33980x3c0a000e, 0x8001b3c, 0x0, 0x80024a4,
33990x0, 0x80014d8, 0x3c0a000f, 0x80040a7,
34000x0, 0x8004091, 0x0, 0x80014d8,
34010x3c0a0010, 0x80014ee, 0x0, 0x80014d8,
34020x3c0a0011, 0x80014d8, 0x3c0a0012, 0x80014d8,
34030x3c0a0013, 0x0, 0x0, 0x0,
34040x0, 0x0, 0x0, 0x0,
34050x0, 0x0, 0x0, 0x0,
34060x0, 0x0, 0x0, 0x0,
34070x0, 0x0, 0x0, 0x0,
34080x0, 0x0, 0x0, 0x0,
34090x0, 0x0, 0x0, 0x0,
34100x0, 0x0, 0x0, 0x0,
34110x0, 0x0, 0x0, 0x0,
34120x0, 0x0, 0x0, 0x0,
34130x0, 0x0, 0x0, 0x0,
34140x0, 0x0, 0x0, 0x0,
34150x0, 0x0, 0x0, 0x0,
34160x0, 0x0, 0x0, 0x0,
34170x0, 0x0, 0x0, 0x0,
34180x0, 0x0, 0x0, 0x3c030001,
34190x34633800, 0x24050080, 0x2404001f, 0x2406ffff,
34200x24020001, 0xaf80021c, 0xaf820200, 0xaf820220,
34210x3631021, 0xaf8200c0, 0x3631021, 0xaf8200c4,
34220x3631021, 0xaf8200c8, 0x27623800, 0xaf8200d0,
34230x27623800, 0xaf8200d4, 0x27623800, 0xaf8200d8,
34240x27621800, 0xaf8200e0, 0x27621800, 0xaf8200e4,
34250x27621800, 0xaf8200e8, 0x27621000, 0xaf8200f0,
34260x27621000, 0xaf8200f4, 0x27621000, 0xaf8200f8,
34270xaca00000, 0x2484ffff, 0x1486fffd, 0x24a50004,
34280x8f830040, 0x3c02f000, 0x621824, 0x3c025000,
34290x1062000c, 0x43102b, 0x14400006, 0x3c026000,
34300x3c024000, 0x10620008, 0x24020800, 0x8004539,
34310x0, 0x10620004, 0x24020800, 0x8004539,
34320x0, 0x24020700, 0x3c010001, 0xac225cdc,
34330x3e00008, 0x0, 0x27bdffd8, 0xafbf0024,
34340xafb00020, 0x8f830054, 0x8f820054, 0x3c010001,
34350xac205cc4, 0x8004545, 0x24630064, 0x8f820054,
34360x621023, 0x2c420065, 0x1440fffc, 0x0,
34370xc004d71, 0x0, 0x24040001, 0x2821,
34380x27a60018, 0x34028000, 0xc00498e, 0xa7a20018,
34390x8f830054, 0x8f820054, 0x8004556, 0x24630064,
34400x8f820054, 0x621023, 0x2c420065, 0x1440fffc,
34410x24040001, 0x24050001, 0xc00494c, 0x27a60018,
34420x8f830054, 0x8f820054, 0x8004562, 0x24630064,
34430x8f820054, 0x621023, 0x2c420065, 0x1440fffc,
34440x24040001, 0x24050001, 0xc00494c, 0x27a60018,
34450x8f830054, 0x8f820054, 0x800456e, 0x24630064,
34460x8f820054, 0x621023, 0x2c420065, 0x1440fffc,
34470x24040001, 0x3c060001, 0x24c65da0, 0xc00494c,
34480x24050002, 0x8f830054, 0x8f820054, 0x800457b,
34490x24630064, 0x8f820054, 0x621023, 0x2c420065,
34500x1440fffc, 0x24040001, 0x24050003, 0x3c100001,
34510x26105da2, 0xc00494c, 0x2003021, 0x97a60018,
34520x3c070001, 0x94e75da0, 0x3c040001, 0x24845ab0,
34530xafa00014, 0x96020000, 0x3c05000d, 0x34a50100,
34540xc002403, 0xafa20010, 0x97a20018, 0x1040004c,
34550x24036040, 0x96020000, 0x3042fff0, 0x1443000a,
34560x24020020, 0x3c030001, 0x94635da0, 0x54620009,
34570x24027830, 0x24020003, 0x3c010001, 0xac225cc4,
34580x80045ac, 0x24020005, 0x3c030001, 0x94635da0,
34590x24027830, 0x1462000f, 0x24030010, 0x3c020001,
34600x94425da2, 0x3042fff0, 0x1443000a, 0x24020003,
34610x3c010001, 0xac225cc4, 0x24020006, 0x3c010001,
34620xac225db0, 0x3c010001, 0xac225dbc, 0x80045e6,
34630x3c09fff0, 0x3c020001, 0x8c425cc4, 0x3c030001,
34640x94635da0, 0x34420001, 0x3c010001, 0xac225cc4,
34650x24020015, 0x1462000f, 0x0, 0x3c020001,
34660x94425da2, 0x3042fff0, 0x3843f420, 0x2c630001,
34670x3842f430, 0x2c420001, 0x621825, 0x10600005,
34680x24020003, 0x3c010001, 0xac225dbc, 0x80045e6,
34690x3c09fff0, 0x3c030001, 0x94635da0, 0x24027810,
34700x1462000b, 0x24020002, 0x3c020001, 0x94425da2,
34710x3042fff0, 0x14400006, 0x24020002, 0x24020004,
34720x3c010001, 0xac225dbc, 0x80045e6, 0x3c09fff0,
34730x3c010001, 0xac225dbc, 0x80045e6, 0x3c09fff0,
34740x3c020001, 0x8c425cc4, 0x24030001, 0x3c010001,
34750xac235dbc, 0x34420004, 0x3c010001, 0xac225cc4,
34760x3c09fff0, 0x3529bdc0, 0x3c060001, 0x8cc65cc4,
34770x3c040001, 0x24845ab0, 0x24020001, 0x3c010001,
34780xac225ccc, 0x8f820054, 0x3c070001, 0x8ce75dbc,
34790x3c030001, 0x94635da0, 0x3c080001, 0x95085da2,
34800x3c05000d, 0x34a50100, 0x3c010001, 0xac205cc8,
34810x491021, 0x3c010001, 0xac225dac, 0xafa30010,
34820xc002403, 0xafa80014, 0x8fbf0024, 0x8fb00020,
34830x3e00008, 0x27bd0028, 0x27bdffe8, 0x3c050001,
34840x8ca55cc8, 0x24060004, 0x24020001, 0x14a20014,
34850xafbf0010, 0x3c020001, 0x8c427e3c, 0x30428000,
34860x10400005, 0x3c04000f, 0x3c030001, 0x8c635dbc,
34870x8004617, 0x34844240, 0x3c040004, 0x3c030001,
34880x8c635dbc, 0x348493e0, 0x24020005, 0x14620016,
34890x0, 0x3c04003d, 0x800462f, 0x34840900,
34900x3c020001, 0x8c427e38, 0x30428000, 0x10400005,
34910x3c04001e, 0x3c030001, 0x8c635dbc, 0x800462a,
34920x34848480, 0x3c04000f, 0x3c030001, 0x8c635dbc,
34930x34844240, 0x24020005, 0x14620003, 0x0,
34940x3c04007a, 0x34841200, 0x3c020001, 0x8c425dac,
34950x8f830054, 0x441021, 0x431023, 0x44102b,
34960x14400037, 0x0, 0x3c020001, 0x8c425cd0,
34970x14400033, 0x0, 0x3c010001, 0x10c00025,
34980xac205ce0, 0x3c090001, 0x8d295cc4, 0x24070001,
34990x3c044000, 0x3c080001, 0x25087e3c, 0x250afffc,
35000x52842, 0x14a00002, 0x24c6ffff, 0x24050008,
35010xa91024, 0x10400010, 0x0, 0x14a70008,
35020x0, 0x8d020000, 0x441024, 0x1040000a,
35030x0, 0x3c010001, 0x800465b, 0xac255ce0,
35040x8d420000, 0x441024, 0x10400003, 0x0,
35050x3c010001, 0xac275ce0, 0x3c020001, 0x8c425ce0,
35060x6182b, 0x2c420001, 0x431024, 0x5440ffe5,
35070x52842, 0x8f820054, 0x3c030001, 0x8c635ce0,
35080x3c010001, 0xac225dac, 0x1060002a, 0x24020001,
35090x3c010001, 0xac255cc8, 0x3c010001, 0xac225ccc,
35100x3c020001, 0x8c425ce0, 0x10400022, 0x0,
35110x3c020001, 0x8c425ccc, 0x1040000a, 0x24020001,
35120x3c010001, 0xac205ccc, 0x3c010001, 0x370821,
35130xac2283ac, 0x3c010001, 0xac205d4c, 0x3c010001,
35140xac225d04, 0x3c030001, 0x771821, 0x8c6383ac,
35150x24020008, 0x10620005, 0x24020001, 0xc004695,
35160x0, 0x8004692, 0x0, 0x3c030001,
35170x8c635cc8, 0x10620007, 0x2402000e, 0x3c030001,
35180x8c637dd0, 0x10620003, 0x0, 0xc004e54,
35190x8f840220, 0x8fbf0010, 0x3e00008, 0x27bd0018,
35200x27bdffe0, 0x3c02fdff, 0xafbf0018, 0x8ee30000,
35210x3c050001, 0x8ca55cc8, 0x3c040001, 0x8c845cf0,
35220x3442ffff, 0x621824, 0x14a40008, 0xaee30000,
35230x3c030001, 0x771821, 0x8c6383ac, 0x3c020001,
35240x8c425cf4, 0x10620008, 0x0, 0x3c020001,
35250x571021, 0x8c4283ac, 0x3c010001, 0xac255cf0,
35260x3c010001, 0xac225cf4, 0x3c030001, 0x8c635cc8,
35270x24020002, 0x10620169, 0x2c620003, 0x10400005,
35280x24020001, 0x10620008, 0x0, 0x800481c,
35290x0, 0x24020004, 0x106200b1, 0x24020001,
35300x800481d, 0x0, 0x3c020001, 0x571021,
35310x8c4283ac, 0x2443ffff, 0x2c620008, 0x1040015a,
35320x31080, 0x3c010001, 0x220821, 0x8c225ac8,
35330x400008, 0x0, 0x3c030001, 0x8c635dbc,
35340x24020005, 0x14620014, 0x0, 0x3c020001,
35350x8c425cd4, 0x1040000a, 0x24020003, 0xc004822,
35360x0, 0x24020002, 0x3c010001, 0x370821,
35370xac2283ac, 0x3c010001, 0x80046e0, 0xac205cd4,
35380x3c010001, 0x370821, 0xac2283ac, 0x3c010001,
35390x800481f, 0xac205c60, 0xc004822, 0x0,
35400x3c020001, 0x8c425cd4, 0x3c010001, 0xac205c60,
35410x104000dd, 0x24020002, 0x3c010001, 0x370821,
35420xac2283ac, 0x3c010001, 0x800481f, 0xac205cd4,
35430x3c030001, 0x8c635dbc, 0x24020005, 0x14620003,
35440x24020001, 0x3c010001, 0xac225d00, 0xc0049cf,
35450x0, 0x3c030001, 0x8c635d00, 0x800478e,
35460x24020011, 0x3c050001, 0x8ca55cc8, 0x3c060001,
35470x8cc67e3c, 0xc005108, 0x2021, 0x24020005,
35480x3c010001, 0xac205cd4, 0x3c010001, 0x370821,
35490x800481f, 0xac2283ac, 0x3c040001, 0x24845abc,
35500x3c05000f, 0x34a50100, 0x3021, 0x3821,
35510xafa00010, 0xc002403, 0xafa00014, 0x800481f,
35520x0, 0x8f820220, 0x3c03f700, 0x431025,
35530x80047b7, 0xaf820220, 0x8f820220, 0x3c030004,
35540x431024, 0x144000a9, 0x24020007, 0x8f830054,
35550x3c020001, 0x8c425da4, 0x2463d8f0, 0x431023,
35560x2c422710, 0x144000f8, 0x24020001, 0x800481d,
35570x0, 0x3c050001, 0x8ca55cc8, 0xc0052a2,
35580x2021, 0xc005386, 0x2021, 0x3c030001,
35590x8c637e34, 0x46100ea, 0x24020001, 0x3c020008,
35600x621024, 0x10400006, 0x0, 0x8f820214,
35610x3c03ffff, 0x431024, 0x8004741, 0x3442251f,
35620x8f820214, 0x3c03ffff, 0x431024, 0x3442241f,
35630xaf820214, 0x8ee20000, 0x3c030200, 0x431025,
35640xaee20000, 0x8f820220, 0x2403fffb, 0x431024,
35650xaf820220, 0x8f820220, 0x34420002, 0xaf820220,
35660x24020008, 0x3c010001, 0x370821, 0xac2283ac,
35670x8f820220, 0x3c030004, 0x431024, 0x14400005,
35680x0, 0x8f820220, 0x3c03f700, 0x431025,
35690xaf820220, 0x3c030001, 0x8c635dbc, 0x24020005,
35700x1462000a, 0x0, 0x3c020001, 0x94425da2,
35710x24429fbc, 0x2c420004, 0x10400004, 0x24040018,
35720x24050002, 0xc004d93, 0x24060020, 0xc0043dd,
35730x0, 0x3c010001, 0x800481f, 0xac205d50,
35740x3c020001, 0x571021, 0x8c4283ac, 0x2443ffff,
35750x2c620008, 0x104000ac, 0x31080, 0x3c010001,
35760x220821, 0x8c225ae8, 0x400008, 0x0,
35770xc00429b, 0x0, 0x3c010001, 0xac205ccc,
35780xaf800204, 0x3c010001, 0xc004822, 0xac207e20,
35790x24020001, 0x3c010001, 0xac225ce4, 0x24020002,
35800x3c010001, 0x370821, 0x800481f, 0xac2283ac,
35810xc00489f, 0x0, 0x3c030001, 0x8c635ce4,
35820x24020009, 0x14620090, 0x24020003, 0x3c010001,
35830x370821, 0x800481f, 0xac2283ac, 0x3c020001,
35840x8c427e38, 0x30424000, 0x10400005, 0x0,
35850x8f820044, 0x3c03ffff, 0x800479f, 0x34637fff,
35860x8f820044, 0x2403ff7f, 0x431024, 0xaf820044,
35870x8f830054, 0x80047b9, 0x24020004, 0x8f830054,
35880x3c020001, 0x8c425da4, 0x2463d8f0, 0x431023,
35890x2c422710, 0x14400074, 0x24020005, 0x3c010001,
35900x370821, 0x800481f, 0xac2283ac, 0x8f820220,
35910x3c03f700, 0x431025, 0xaf820220, 0xaf800204,
35920x3c010001, 0xac207e20, 0x8f830054, 0x24020006,
35930x3c010001, 0x370821, 0xac2283ac, 0x3c010001,
35940x800481f, 0xac235da4, 0x8f830054, 0x3c020001,
35950x8c425da4, 0x2463fff6, 0x431023, 0x2c42000a,
35960x14400059, 0x0, 0x24020007, 0x3c010001,
35970x370821, 0x800481f, 0xac2283ac, 0x8f820220,
35980x3c04f700, 0x441025, 0xaf820220, 0x8f820220,
35990x3c030300, 0x431024, 0x14400005, 0x1821,
36000x8f820220, 0x24030001, 0x441025, 0xaf820220,
36010x10600043, 0x24020001, 0x8f820214, 0x3c03ffff,
36020x3c040001, 0x8c845d98, 0x431024, 0x3442251f,
36030xaf820214, 0x24020008, 0x3c010001, 0x370821,
36040x1080000b, 0xac2283ac, 0x3c020001, 0x8c425d74,
36050x14400007, 0x24020001, 0x3c010001, 0xac227dd0,
36060xc004e54, 0x8f840220, 0x800480c, 0x0,
36070x8f820220, 0x3c030008, 0x431024, 0x14400017,
36080x2402000e, 0x3c010001, 0xac227dd0, 0x8ee20000,
36090x2021, 0x3c030200, 0x431025, 0xc005386,
36100xaee20000, 0x8f820220, 0x2403fffb, 0x431024,
36110xaf820220, 0x8f820220, 0x34420002, 0xc0043dd,
36120xaf820220, 0x3c050001, 0x8ca55cc8, 0xc0052a2,
36130x2021, 0x800481f, 0x0, 0x3c020001,
36140x8c425d74, 0x10400010, 0x0, 0x3c020001,
36150x8c425d70, 0x2442ffff, 0x3c010001, 0xac225d70,
36160x14400009, 0x24020002, 0x3c010001, 0xac205d74,
36170x3c010001, 0x800481f, 0xac225d70, 0x24020001,
36180x3c010001, 0xac225ccc, 0x8fbf0018, 0x3e00008,
36190x27bd0020, 0x8f820200, 0x8f820220, 0x8f820220,
36200x34420004, 0xaf820220, 0x8f820200, 0x3c060001,
36210x8cc65cc8, 0x34420004, 0xaf820200, 0x24020002,
36220x10c2003a, 0x2cc20003, 0x10400005, 0x24020001,
36230x10c20008, 0x0, 0x8004868, 0x0,
36240x24020004, 0x10c20013, 0x24020001, 0x8004868,
36250x0, 0x3c030001, 0x8c635cb8, 0x3c020001,
36260x8c425cc0, 0x3c040001, 0x8c845cdc, 0x3c050001,
36270x8ca55cbc, 0xaf860200, 0xaf860220, 0x34630022,
36280x441025, 0x451025, 0x34420002, 0x8004867,
36290xaf830200, 0x3c030001, 0x8c635d98, 0xaf820200,
36300x10600009, 0xaf820220, 0x3c020001, 0x8c425d74,
36310x14400005, 0x3c033f00, 0x3c020001, 0x8c425cb0,
36320x800485b, 0x346300e0, 0x3c020001, 0x8c425cb0,
36330x3c033f00, 0x346300e2, 0x431025, 0xaf820200,
36340x3c030001, 0x8c635cb4, 0x3c04f700, 0x3c020001,
36350x8c425cc0, 0x3c050001, 0x8ca55cdc, 0x641825,
36360x431025, 0x451025, 0xaf820220, 0x3e00008,
36370x0, 0x8f820220, 0x3c030001, 0x8c635cc8,
36380x34420004, 0xaf820220, 0x24020001, 0x1062000f,
36390x0, 0x8f830054, 0x8f820054, 0x24630002,
36400x621023, 0x2c420003, 0x10400011, 0x0,
36410x8f820054, 0x621023, 0x2c420003, 0x1040000c,
36420x0, 0x8004879, 0x0, 0x8f830054,
36430x8f820054, 0x8004885, 0x24630007, 0x8f820054,
36440x621023, 0x2c420008, 0x1440fffc, 0x0,
36450x8f8400e0, 0x30820007, 0x1040000d, 0x0,
36460x8f820054, 0x8f8300e0, 0x14830009, 0x24450032,
36470x8f820054, 0xa21023, 0x2c420033, 0x10400004,
36480x0, 0x8f8200e0, 0x1082fff9, 0x0,
36490x8f820220, 0x2403fffd, 0x431024, 0xaf820220,
36500x3e00008, 0x0, 0x3c030001, 0x8c635ce4,
36510x3c020001, 0x8c425ce8, 0x50620004, 0x2463ffff,
36520x3c010001, 0xac235ce8, 0x2463ffff, 0x2c620009,
36530x1040009d, 0x31080, 0x3c010001, 0x220821,
36540x8c225b08, 0x400008, 0x0, 0x8f820044,
36550x34428080, 0xaf820044, 0x8f830054, 0x8004938,
36560x24020002, 0x8f830054, 0x3c020001, 0x8c425da8,
36570x2463d8f0, 0x431023, 0x2c422710, 0x1440008a,
36580x24020003, 0x8004945, 0x0, 0x8f820044,
36590x3c03ffff, 0x34637fff, 0x431024, 0xaf820044,
36600x8f830054, 0x8004938, 0x24020004, 0x8f830054,
36610x3c020001, 0x8c425da8, 0x2463fff6, 0x431023,
36620x2c42000a, 0x14400078, 0x24020005, 0x8004945,
36630x0, 0x8f820220, 0x3c03f700, 0x431025,
36640xaf820220, 0x8f820220, 0x2403fffb, 0x431024,
36650xaf820220, 0x8f820220, 0x34420002, 0xaf820220,
36660x3c023f00, 0x344200e0, 0xaf820200, 0x8f820200,
36670x2403fffd, 0x431024, 0xaf820200, 0x24040001,
36680x3405ffff, 0xaf840204, 0x8f830054, 0x8f820054,
36690x80048ec, 0x24630001, 0x8f820054, 0x621023,
36700x2c420002, 0x1440fffc, 0x0, 0x8f820224,
36710x42040, 0xa4102b, 0x1040fff2, 0x0,
36720x8f820220, 0x3c03f700, 0x431025, 0xaf820220,
36730x8f820214, 0x3c03ffff, 0x431024, 0x3442251f,
36740xaf820214, 0x8f820220, 0x2403fffb, 0x431024,
36750xaf820220, 0x8f820220, 0x3c04f700, 0x34840008,
36760x34420002, 0xaf820220, 0x8f820220, 0x3c033f00,
36770x346300e2, 0x441025, 0xaf820220, 0xaf830200,
36780x8f8400f0, 0x276217f8, 0x14820002, 0x24850008,
36790x27651000, 0x8f8200f4, 0x10a20007, 0x3c038000,
36800x34630040, 0x3c020001, 0x24425c70, 0xac820000,
36810xac830004, 0xaf8500f0, 0x8f830054, 0x8004938,
36820x24020006, 0x8f830054, 0x3c020001, 0x8c425da8,
36830x2463fff6, 0x431023, 0x2c42000a, 0x14400022,
36840x24020007, 0x8004945, 0x0, 0x8f8200e0,
36850xaf8200e4, 0x8f8200e0, 0xaf8200e8, 0x8f820220,
36860x34420004, 0xaf820220, 0x8f820220, 0x2403fff7,
36870x431024, 0xaf820220, 0x8f820044, 0x34428080,
36880xaf820044, 0x8f830054, 0x24020008, 0x3c010001,
36890xac225ce4, 0x3c010001, 0x8004947, 0xac235da8,
36900x8f830054, 0x3c020001, 0x8c425da8, 0x2463d8f0,
36910x431023, 0x2c422710, 0x14400003, 0x24020009,
36920x3c010001, 0xac225ce4, 0x3e00008, 0x0,
36930x0, 0x0, 0x0, 0x27bdffd8,
36940xafb20018, 0x809021, 0xafb3001c, 0xa09821,
36950xafb10014, 0xc08821, 0xafb00010, 0x8021,
36960xafbf0020, 0xa6200000, 0xc004d4b, 0x24040001,
36970x26100001, 0x2e020020, 0x1440fffb, 0x0,
36980xc004d4b, 0x2021, 0xc004d4b, 0x24040001,
36990xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
37000x24100010, 0x2501024, 0x10400002, 0x2021,
37010x24040001, 0xc004d4b, 0x108042, 0x1600fffa,
37020x2501024, 0x24100010, 0x2701024, 0x10400002,
37030x2021, 0x24040001, 0xc004d4b, 0x108042,
37040x1600fffa, 0x2701024, 0xc004d71, 0x34108000,
37050xc004d71, 0x0, 0xc004d2b, 0x0,
37060x50400005, 0x108042, 0x96220000, 0x501025,
37070xa6220000, 0x108042, 0x1600fff7, 0x0,
37080xc004d71, 0x0, 0x8fbf0020, 0x8fb3001c,
37090x8fb20018, 0x8fb10014, 0x8fb00010, 0x3e00008,
37100x27bd0028, 0x27bdffd8, 0xafb10014, 0x808821,
37110xafb20018, 0xa09021, 0xafb3001c, 0xc09821,
37120xafb00010, 0x8021, 0xafbf0020, 0xc004d4b,
37130x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
37140x0, 0xc004d4b, 0x2021, 0xc004d4b,
37150x24040001, 0xc004d4b, 0x2021, 0xc004d4b,
37160x24040001, 0x24100010, 0x2301024, 0x10400002,
37170x2021, 0x24040001, 0xc004d4b, 0x108042,
37180x1600fffa, 0x2301024, 0x24100010, 0x2501024,
37190x10400002, 0x2021, 0x24040001, 0xc004d4b,
37200x108042, 0x1600fffa, 0x2501024, 0xc004d4b,
37210x24040001, 0xc004d4b, 0x2021, 0x34108000,
37220x96620000, 0x501024, 0x10400002, 0x2021,
37230x24040001, 0xc004d4b, 0x108042, 0x1600fff8,
37240x0, 0xc004d71, 0x0, 0x8fbf0020,
37250x8fb3001c, 0x8fb20018, 0x8fb10014, 0x8fb00010,
37260x3e00008, 0x27bd0028, 0x3c030001, 0x8c635d00,
37270x3c020001, 0x8c425d48, 0x27bdffd8, 0xafbf0020,
37280xafb1001c, 0x10620003, 0xafb00018, 0x3c010001,
37290xac235d48, 0x2463ffff, 0x2c620013, 0x10400349,
37300x31080, 0x3c010001, 0x220821, 0x8c225b30,
37310x400008, 0x0, 0xc004d71, 0x8021,
37320x34028000, 0xa7a20010, 0x27b10010, 0xc004d4b,
37330x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
37340x0, 0xc004d4b, 0x2021, 0xc004d4b,
37350x24040001, 0xc004d4b, 0x2021, 0xc004d4b,
37360x24040001, 0x24100010, 0x32020001, 0x10400002,
37370x2021, 0x24040001, 0xc004d4b, 0x108042,
37380x1600fffa, 0x32020001, 0x24100010, 0xc004d4b,
37390x2021, 0x108042, 0x1600fffc, 0x0,
37400xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
37410x34108000, 0x96220000, 0x501024, 0x10400002,
37420x2021, 0x24040001, 0xc004d4b, 0x108042,
37430x1600fff8, 0x0, 0xc004d71, 0x0,
37440x8004d24, 0x24020002, 0x27b10010, 0xa7a00010,
37450x8021, 0xc004d4b, 0x24040001, 0x26100001,
37460x2e020020, 0x1440fffb, 0x0, 0xc004d4b,
37470x2021, 0xc004d4b, 0x24040001, 0xc004d4b,
37480x24040001, 0xc004d4b, 0x2021, 0x24100010,
37490x32020001, 0x10400002, 0x2021, 0x24040001,
37500xc004d4b, 0x108042, 0x1600fffa, 0x32020001,
37510x24100010, 0xc004d4b, 0x2021, 0x108042,
37520x1600fffc, 0x0, 0xc004d71, 0x34108000,
37530xc004d71, 0x0, 0xc004d2b, 0x0,
37540x50400005, 0x108042, 0x96220000, 0x501025,
37550xa6220000, 0x108042, 0x1600fff7, 0x0,
37560xc004d71, 0x0, 0x97a20010, 0x30428000,
37570x144002dc, 0x24020003, 0x8004d24, 0x0,
37580x24021200, 0xa7a20010, 0x27b10010, 0x8021,
37590xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
37600x1440fffb, 0x0, 0xc004d4b, 0x2021,
37610xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
37620xc004d4b, 0x24040001, 0x24100010, 0x32020001,
37630x10400002, 0x2021, 0x24040001, 0xc004d4b,
37640x108042, 0x1600fffa, 0x32020001, 0x24100010,
37650xc004d4b, 0x2021, 0x108042, 0x1600fffc,
37660x0, 0xc004d4b, 0x24040001, 0xc004d4b,
37670x2021, 0x34108000, 0x96220000, 0x501024,
37680x10400002, 0x2021, 0x24040001, 0xc004d4b,
37690x108042, 0x1600fff8, 0x0, 0xc004d71,
37700x0, 0x8f830054, 0x8004d16, 0x24020004,
37710x8f830054, 0x3c020001, 0x8c425db8, 0x2463ff9c,
37720x431023, 0x2c420064, 0x1440029e, 0x24020002,
37730x3c030001, 0x8c635dbc, 0x10620297, 0x2c620003,
37740x14400296, 0x24020011, 0x24020003, 0x10620005,
37750x24020004, 0x10620291, 0x2402000f, 0x8004d24,
37760x24020011, 0x8004d24, 0x24020005, 0x24020014,
37770xa7a20010, 0x27b10010, 0x8021, 0xc004d4b,
37780x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
37790x0, 0xc004d4b, 0x2021, 0xc004d4b,
37800x24040001, 0xc004d4b, 0x2021, 0xc004d4b,
37810x24040001, 0x24100010, 0x32020001, 0x10400002,
37820x2021, 0x24040001, 0xc004d4b, 0x108042,
37830x1600fffa, 0x32020001, 0x24100010, 0x32020012,
37840x10400002, 0x2021, 0x24040001, 0xc004d4b,
37850x108042, 0x1600fffa, 0x32020012, 0xc004d4b,
37860x24040001, 0xc004d4b, 0x2021, 0x34108000,
37870x96220000, 0x501024, 0x10400002, 0x2021,
37880x24040001, 0xc004d4b, 0x108042, 0x1600fff8,
37890x0, 0xc004d71, 0x0, 0x8f830054,
37900x8004d16, 0x24020006, 0x8f830054, 0x3c020001,
37910x8c425db8, 0x2463ff9c, 0x431023, 0x2c420064,
37920x14400250, 0x24020007, 0x8004d24, 0x0,
37930x24020006, 0xa7a20010, 0x27b10010, 0x8021,
37940xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
37950x1440fffb, 0x0, 0xc004d4b, 0x2021,
37960xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
37970xc004d4b, 0x24040001, 0x24100010, 0x32020001,
37980x10400002, 0x2021, 0x24040001, 0xc004d4b,
37990x108042, 0x1600fffa, 0x32020001, 0x24100010,
38000x32020013, 0x10400002, 0x2021, 0x24040001,
38010xc004d4b, 0x108042, 0x1600fffa, 0x32020013,
38020xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
38030x34108000, 0x96220000, 0x501024, 0x10400002,
38040x2021, 0x24040001, 0xc004d4b, 0x108042,
38050x1600fff8, 0x0, 0xc004d71, 0x0,
38060x8f830054, 0x8004d16, 0x24020008, 0x8f830054,
38070x3c020001, 0x8c425db8, 0x2463ff9c, 0x431023,
38080x2c420064, 0x1440020f, 0x24020009, 0x8004d24,
38090x0, 0x27b10010, 0xa7a00010, 0x8021,
38100xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
38110x1440fffb, 0x0, 0xc004d4b, 0x2021,
38120xc004d4b, 0x24040001, 0xc004d4b, 0x24040001,
38130xc004d4b, 0x2021, 0x24100010, 0x32020001,
38140x10400002, 0x2021, 0x24040001, 0xc004d4b,
38150x108042, 0x1600fffa, 0x32020001, 0x24100010,
38160x32020018, 0x10400002, 0x2021, 0x24040001,
38170xc004d4b, 0x108042, 0x1600fffa, 0x32020018,
38180xc004d71, 0x34108000, 0xc004d71, 0x0,
38190xc004d2b, 0x0, 0x50400005, 0x108042,
38200x96220000, 0x501025, 0xa6220000, 0x108042,
38210x1600fff7, 0x0, 0xc004d71, 0x8021,
38220x97a20010, 0x27b10010, 0x34420001, 0xa7a20010,
38230xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
38240x1440fffb, 0x0, 0xc004d4b, 0x2021,
38250xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
38260xc004d4b, 0x24040001, 0x24100010, 0x32020001,
38270x10400002, 0x2021, 0x24040001, 0xc004d4b,
38280x108042, 0x1600fffa, 0x32020001, 0x24100010,
38290x32020018, 0x10400002, 0x2021, 0x24040001,
38300xc004d4b, 0x108042, 0x1600fffa, 0x32020018,
38310xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
38320x34108000, 0x96220000, 0x501024, 0x10400002,
38330x2021, 0x24040001, 0xc004d4b, 0x108042,
38340x1600fff8, 0x0, 0xc004d71, 0x0,
38350x8f830054, 0x8004d16, 0x2402000a, 0x8f830054,
38360x3c020001, 0x8c425db8, 0x2463ff9c, 0x431023,
38370x2c420064, 0x1440019b, 0x2402000b, 0x8004d24,
38380x0, 0x27b10010, 0xa7a00010, 0x8021,
38390xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
38400x1440fffb, 0x0, 0xc004d4b, 0x2021,
38410xc004d4b, 0x24040001, 0xc004d4b, 0x24040001,
38420xc004d4b, 0x2021, 0x24100010, 0x32020001,
38430x10400002, 0x2021, 0x24040001, 0xc004d4b,
38440x108042, 0x1600fffa, 0x32020001, 0x24100010,
38450x32020017, 0x10400002, 0x2021, 0x24040001,
38460xc004d4b, 0x108042, 0x1600fffa, 0x32020017,
38470xc004d71, 0x34108000, 0xc004d71, 0x0,
38480xc004d2b, 0x0, 0x50400005, 0x108042,
38490x96220000, 0x501025, 0xa6220000, 0x108042,
38500x1600fff7, 0x0, 0xc004d71, 0x8021,
38510x97a20010, 0x27b10010, 0x34420700, 0xa7a20010,
38520xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
38530x1440fffb, 0x0, 0xc004d4b, 0x2021,
38540xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
38550xc004d4b, 0x24040001, 0x24100010, 0x32020001,
38560x10400002, 0x2021, 0x24040001, 0xc004d4b,
38570x108042, 0x1600fffa, 0x32020001, 0x24100010,
38580x32020017, 0x10400002, 0x2021, 0x24040001,
38590xc004d4b, 0x108042, 0x1600fffa, 0x32020017,
38600xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
38610x34108000, 0x96220000, 0x501024, 0x10400002,
38620x2021, 0x24040001, 0xc004d4b, 0x108042,
38630x1600fff8, 0x0, 0xc004d71, 0x0,
38640x8f830054, 0x8004d16, 0x2402000c, 0x8f830054,
38650x3c020001, 0x8c425db8, 0x2463ff9c, 0x431023,
38660x2c420064, 0x14400127, 0x24020012, 0x8004d24,
38670x0, 0x27b10010, 0xa7a00010, 0x8021,
38680xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
38690x1440fffb, 0x0, 0xc004d4b, 0x2021,
38700xc004d4b, 0x24040001, 0xc004d4b, 0x24040001,
38710xc004d4b, 0x2021, 0x24100010, 0x32020001,
38720x10400002, 0x2021, 0x24040001, 0xc004d4b,
38730x108042, 0x1600fffa, 0x32020001, 0x24100010,
38740x32020014, 0x10400002, 0x2021, 0x24040001,
38750xc004d4b, 0x108042, 0x1600fffa, 0x32020014,
38760xc004d71, 0x34108000, 0xc004d71, 0x0,
38770xc004d2b, 0x0, 0x50400005, 0x108042,
38780x96220000, 0x501025, 0xa6220000, 0x108042,
38790x1600fff7, 0x0, 0xc004d71, 0x8021,
38800x97a20010, 0x27b10010, 0x34420010, 0xa7a20010,
38810xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
38820x1440fffb, 0x0, 0xc004d4b, 0x2021,
38830xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
38840xc004d4b, 0x24040001, 0x24100010, 0x32020001,
38850x10400002, 0x2021, 0x24040001, 0xc004d4b,
38860x108042, 0x1600fffa, 0x32020001, 0x24100010,
38870x32020014, 0x10400002, 0x2021, 0x24040001,
38880xc004d4b, 0x108042, 0x1600fffa, 0x32020014,
38890xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
38900x34108000, 0x96220000, 0x501024, 0x10400002,
38910x2021, 0x24040001, 0xc004d4b, 0x108042,
38920x1600fff8, 0x0, 0xc004d71, 0x0,
38930x8f830054, 0x8004d16, 0x24020013, 0x8f830054,
38940x3c020001, 0x8c425db8, 0x2463ff9c, 0x431023,
38950x2c420064, 0x144000b3, 0x2402000d, 0x8004d24,
38960x0, 0x27b10010, 0xa7a00010, 0x8021,
38970xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
38980x1440fffb, 0x0, 0xc004d4b, 0x2021,
38990xc004d4b, 0x24040001, 0xc004d4b, 0x24040001,
39000xc004d4b, 0x2021, 0x24100010, 0x32020001,
39010x10400002, 0x2021, 0x24040001, 0xc004d4b,
39020x108042, 0x1600fffa, 0x32020001, 0x24100010,
39030x32020018, 0x10400002, 0x2021, 0x24040001,
39040xc004d4b, 0x108042, 0x1600fffa, 0x32020018,
39050xc004d71, 0x34108000, 0xc004d71, 0x0,
39060xc004d2b, 0x0, 0x50400005, 0x108042,
39070x96220000, 0x501025, 0xa6220000, 0x108042,
39080x1600fff7, 0x0, 0xc004d71, 0x8021,
39090x97a20010, 0x27b10010, 0x3042fffe, 0xa7a20010,
39100xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
39110x1440fffb, 0x0, 0xc004d4b, 0x2021,
39120xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
39130xc004d4b, 0x24040001, 0x24100010, 0x32020001,
39140x10400002, 0x2021, 0x24040001, 0xc004d4b,
39150x108042, 0x1600fffa, 0x32020001, 0x24100010,
39160x32020018, 0x10400002, 0x2021, 0x24040001,
39170xc004d4b, 0x108042, 0x1600fffa, 0x32020018,
39180xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
39190x34108000, 0x96220000, 0x501024, 0x10400002,
39200x2021, 0x24040001, 0xc004d4b, 0x108042,
39210x1600fff8, 0x0, 0xc004d71, 0x0,
39220x8f830054, 0x8004d16, 0x2402000e, 0x24020840,
39230xa7a20010, 0x27b10010, 0x8021, 0xc004d4b,
39240x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
39250x0, 0xc004d4b, 0x2021, 0xc004d4b,
39260x24040001, 0xc004d4b, 0x2021, 0xc004d4b,
39270x24040001, 0x24100010, 0x32020001, 0x10400002,
39280x2021, 0x24040001, 0xc004d4b, 0x108042,
39290x1600fffa, 0x32020001, 0x24100010, 0x32020013,
39300x10400002, 0x2021, 0x24040001, 0xc004d4b,
39310x108042, 0x1600fffa, 0x32020013, 0xc004d4b,
39320x24040001, 0xc004d4b, 0x2021, 0x34108000,
39330x96220000, 0x501024, 0x10400002, 0x2021,
39340x24040001, 0xc004d4b, 0x108042, 0x1600fff8,
39350x0, 0xc004d71, 0x0, 0x8f830054,
39360x24020010, 0x3c010001, 0xac225d00, 0x3c010001,
39370x8004d26, 0xac235db8, 0x8f830054, 0x3c020001,
39380x8c425db8, 0x2463ff9c, 0x431023, 0x2c420064,
39390x14400004, 0x0, 0x24020011, 0x3c010001,
39400xac225d00, 0x8fbf0020, 0x8fb1001c, 0x8fb00018,
39410x3e00008, 0x27bd0028, 0x8f850044, 0x8f820044,
39420x3c030001, 0x431025, 0x3c030008, 0xaf820044,
39430x8f840054, 0x8f820054, 0xa32824, 0x8004d37,
39440x24840001, 0x8f820054, 0x821023, 0x2c420002,
39450x1440fffc, 0x0, 0x8f820044, 0x3c03fffe,
39460x3463ffff, 0x431024, 0xaf820044, 0x8f830054,
39470x8f820054, 0x8004d45, 0x24630001, 0x8f820054,
39480x621023, 0x2c420002, 0x1440fffc, 0x0,
39490x3e00008, 0xa01021, 0x8f830044, 0x3c02fff0,
39500x3442ffff, 0x42480, 0x621824, 0x3c020002,
39510x822025, 0x641825, 0xaf830044, 0x8f820044,
39520x3c03fffe, 0x3463ffff, 0x431024, 0xaf820044,
39530x8f830054, 0x8f820054, 0x8004d5e, 0x24630001,
39540x8f820054, 0x621023, 0x2c420002, 0x1440fffc,
39550x0, 0x8f820044, 0x3c030001, 0x431025,
39560xaf820044, 0x8f830054, 0x8f820054, 0x8004d6b,
39570x24630001, 0x8f820054, 0x621023, 0x2c420002,
39580x1440fffc, 0x0, 0x3e00008, 0x0,
39590x8f820044, 0x3c03fff0, 0x3463ffff, 0x431024,
39600xaf820044, 0x8f820044, 0x3c030001, 0x431025,
39610xaf820044, 0x8f830054, 0x8f820054, 0x8004d7f,
39620x24630001, 0x8f820054, 0x621023, 0x2c420002,
39630x1440fffc, 0x0, 0x8f820044, 0x3c03fffe,
39640x3463ffff, 0x431024, 0xaf820044, 0x8f830054,
39650x8f820054, 0x8004d8d, 0x24630001, 0x8f820054,
39660x621023, 0x2c420002, 0x1440fffc, 0x0,
39670x3e00008, 0x0, 0x27bdffc8, 0xafb30024,
39680x809821, 0xafb5002c, 0xa0a821, 0xafb20020,
39690xc09021, 0x32a2ffff, 0xafbf0030, 0xafb40028,
39700xafb1001c, 0xafb00018, 0x14400034, 0xa7b20010,
39710x3271ffff, 0x27b20010, 0x8021, 0xc004d4b,
39720x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
39730x0, 0xc004d4b, 0x2021, 0xc004d4b,
39740x24040001, 0xc004d4b, 0x2021, 0xc004d4b,
39750x24040001, 0x24100010, 0x32020001, 0x10400002,
39760x2021, 0x24040001, 0xc004d4b, 0x108042,
39770x1600fffa, 0x32020001, 0x24100010, 0x2301024,
39780x10400002, 0x2021, 0x24040001, 0xc004d4b,
39790x108042, 0x1600fffa, 0x2301024, 0xc004d4b,
39800x24040001, 0xc004d4b, 0x2021, 0x34108000,
39810x96420000, 0x501024, 0x10400002, 0x2021,
39820x24040001, 0xc004d4b, 0x108042, 0x12000075,
39830x0, 0x8004dc9, 0x0, 0x3274ffff,
39840x27b10010, 0xa7a00010, 0x8021, 0xc004d4b,
39850x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
39860x0, 0xc004d4b, 0x2021, 0xc004d4b,
39870x24040001, 0xc004d4b, 0x24040001, 0xc004d4b,
39880x2021, 0x24100010, 0x32020001, 0x10400002,
39890x2021, 0x24040001, 0xc004d4b, 0x108042,
39900x1600fffa, 0x32020001, 0x24100010, 0x2901024,
39910x10400002, 0x2021, 0x24040001, 0xc004d4b,
39920x108042, 0x1600fffa, 0x2901024, 0xc004d71,
39930x34108000, 0xc004d71, 0x0, 0xc004d2b,
39940x0, 0x50400005, 0x108042, 0x96220000,
39950x501025, 0xa6220000, 0x108042, 0x1600fff7,
39960x0, 0xc004d71, 0x0, 0x32a5ffff,
39970x24020001, 0x54a20004, 0x24020002, 0x97a20010,
39980x8004e14, 0x521025, 0x14a20006, 0x3271ffff,
39990x97a20010, 0x121827, 0x431024, 0xa7a20010,
40000x3271ffff, 0x27b20010, 0x8021, 0xc004d4b,
40010x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
40020x0, 0xc004d4b, 0x2021, 0xc004d4b,
40030x24040001, 0xc004d4b, 0x2021, 0xc004d4b,
40040x24040001, 0x24100010, 0x32020001, 0x10400002,
40050x2021, 0x24040001, 0xc004d4b, 0x108042,
40060x1600fffa, 0x32020001, 0x24100010, 0x2301024,
40070x10400002, 0x2021, 0x24040001, 0xc004d4b,
40080x108042, 0x1600fffa, 0x2301024, 0xc004d4b,
40090x24040001, 0xc004d4b, 0x2021, 0x34108000,
40100x96420000, 0x501024, 0x10400002, 0x2021,
40110x24040001, 0xc004d4b, 0x108042, 0x1600fff8,
40120x0, 0xc004d71, 0x0, 0x8fbf0030,
40130x8fb5002c, 0x8fb40028, 0x8fb30024, 0x8fb20020,
40140x8fb1001c, 0x8fb00018, 0x3e00008, 0x27bd0038,
40150x0, 0x0, 0x0, 0x27bdffe8,
40160xafbf0010, 0x3c030001, 0x771821, 0x8c6383ac,
40170x24020008, 0x1462022c, 0x803021, 0x3c020001,
40180x8c425d98, 0x14400033, 0x0, 0x8f850224,
40190x38a30020, 0x2c630001, 0x38a20010, 0x2c420001,
40200x621825, 0x1460000d, 0x38a30030, 0x2c630001,
40210x38a20400, 0x2c420001, 0x621825, 0x14600007,
40220x38a30402, 0x2c630001, 0x38a20404, 0x2c420001,
40230x621825, 0x10600005, 0x0, 0xc00429b,
40240x0, 0x8004e8d, 0x2402000e, 0xc0043dd,
40250x0, 0x3c050001, 0x8ca55cc8, 0xc0052a2,
40260x2021, 0x3c030001, 0x8c635cc8, 0x24020004,
40270x14620005, 0x2403fffb, 0x3c020001, 0x8c425cc4,
40280x8004e89, 0x2403fff7, 0x3c020001, 0x8c425cc4,
40290x431024, 0x3c010001, 0xac225cc4, 0x2402000e,
40300x3c010001, 0xc00429b, 0xac227dd0, 0x8005087,
40310x0, 0x8f820220, 0x3c030400, 0x431024,
40320x10400027, 0x2403ffbf, 0x8f850224, 0x3c020001,
40330x8c427ddc, 0xa32024, 0x431024, 0x1482000c,
40340x0, 0x3c020001, 0x8c427de0, 0x24420001,
40350x3c010001, 0xac227de0, 0x2c420002, 0x14400008,
40360x24020001, 0x3c010001, 0x8004ead, 0xac227e00,
40370x3c010001, 0xac207de0, 0x3c010001, 0xac207e00,
40380x3c020001, 0x8c427e00, 0x10400006, 0x30a20040,
40390x10400004, 0x24020001, 0x3c010001, 0x8004eb8,
40400xac227e04, 0x3c010001, 0xac207e04, 0x3c010001,
40410xac257ddc, 0x3c010001, 0x8004ec8, 0xac207e10,
40420x24020001, 0x3c010001, 0xac227e10, 0x3c010001,
40430xac207e00, 0x3c010001, 0xac207de0, 0x3c010001,
40440xac207e04, 0x3c010001, 0xac207ddc, 0x3c030001,
40450x8c637dd0, 0x3c020001, 0x8c427dd4, 0x10620003,
40460x3c020200, 0x3c010001, 0xac237dd4, 0xc21024,
40470x10400007, 0x2463ffff, 0x8f820220, 0x24030001,
40480x3c010001, 0xac235ccc, 0x8005085, 0x3c03f700,
40490x2c62000e, 0x104001a8, 0x31080, 0x3c010001,
40500x220821, 0x8c225b80, 0x400008, 0x0,
40510x3c010001, 0xac207e00, 0x3c010001, 0xac207de0,
40520x3c010001, 0xac207ddc, 0x3c010001, 0xac207e04,
40530x3c010001, 0xac207df8, 0x3c010001, 0xac207df0,
40540xc00486a, 0xaf800224, 0x24020002, 0x3c010001,
40550xac227dd0, 0x3c020001, 0x8c427e10, 0x14400056,
40560x3c03fdff, 0x8ee20000, 0x3463ffff, 0x431024,
40570xc00429b, 0xaee20000, 0xaf800204, 0x8f820200,
40580x2403fffd, 0x431024, 0xaf820200, 0x3c010001,
40590xac207e20, 0x8f830054, 0x3c020001, 0x8c427df8,
40600x24040001, 0x3c010001, 0xac247e0c, 0x24420001,
40610x3c010001, 0xac227df8, 0x2c420004, 0x3c010001,
40620xac237df4, 0x14400006, 0x24020003, 0x3c010001,
40630xac245ccc, 0x3c010001, 0x8005083, 0xac207df8,
40640x3c010001, 0x8005083, 0xac227dd0, 0x8f830054,
40650x3c020001, 0x8c427df4, 0x2463d8f0, 0x431023,
40660x2c422710, 0x14400003, 0x24020004, 0x3c010001,
40670xac227dd0, 0x3c020001, 0x8c427e10, 0x14400026,
40680x3c03fdff, 0x8ee20000, 0x3463ffff, 0x431024,
40690x8005083, 0xaee20000, 0x3c040001, 0x8c845d9c,
40700x3c010001, 0xc00508a, 0xac207de8, 0x3c020001,
40710x8c427e1c, 0xaf820204, 0x3c020001, 0x8c427e10,
40720x14400015, 0x3c03fdff, 0x8ee20000, 0x3463ffff,
40730x431024, 0xaee20000, 0x8f820204, 0x30420030,
40740x1440013c, 0x24020002, 0x3c030001, 0x8c637e1c,
40750x24020005, 0x3c010001, 0xac227dd0, 0x3c010001,
40760x8005083, 0xac237e20, 0x3c020001, 0x8c427e10,
40770x10400010, 0x3c03fdff, 0x3c020001, 0x8c425d6c,
40780x24420001, 0x3c010001, 0xac225d6c, 0x2c420002,
40790x14400131, 0x24020001, 0x3c010001, 0xac225d74,
40800x3c010001, 0xac205d6c, 0x3c010001, 0x8005083,
40810xac225ccc, 0x8ee20000, 0x3463ffff, 0x431024,
40820xaee20000, 0x3c020001, 0x8c427e00, 0x10400122,
40830x0, 0x3c020001, 0x8c427ddc, 0x1040011e,
40840x0, 0x3c010001, 0xac227e08, 0x24020003,
40850x3c010001, 0xac227de0, 0x8005024, 0x24020006,
40860x3c010001, 0xac207de8, 0x8f820204, 0x34420040,
40870xaf820204, 0x3c020001, 0x8c427e20, 0x24030007,
40880x3c010001, 0xac237dd0, 0x34420040, 0x3c010001,
40890xac227e20, 0x3c020001, 0x8c427e00, 0x10400005,
40900x0, 0x3c020001, 0x8c427ddc, 0x104000f9,
40910x24020002, 0x3c050001, 0x24a57de0, 0x8ca20000,
40920x2c424e21, 0x104000f3, 0x24020002, 0x3c020001,
40930x8c427e04, 0x104000f8, 0x2404ffbf, 0x3c020001,
40940x8c427ddc, 0x3c030001, 0x8c637e08, 0x441024,
40950x641824, 0x10430004, 0x24020001, 0x3c010001,
40960x8005083, 0xac227dd0, 0x24020003, 0xaca20000,
40970x24020008, 0x3c010001, 0xac227dd0, 0x3c020001,
40980x8c427e0c, 0x1040000c, 0x24020001, 0x3c040001,
40990xc005097, 0x8c847ddc, 0x3c020001, 0x8c427e28,
41000x14400005, 0x24020001, 0x3c020001, 0x8c427e24,
41010x10400006, 0x24020001, 0x3c010001, 0xac225ccc,
41020x3c010001, 0x8005083, 0xac207df8, 0x3c020001,
41030x8c427df0, 0x3c030001, 0x8c637ddc, 0x2c420001,
41040x210c0, 0x30630008, 0x3c010001, 0xac227df0,
41050x3c010001, 0xac237dec, 0x8f830054, 0x24020009,
41060x3c010001, 0xac227dd0, 0x3c010001, 0x8005083,
41070xac237df4, 0x8f830054, 0x3c020001, 0x8c427df4,
41080x2463d8f0, 0x431023, 0x2c422710, 0x144000a8,
41090x0, 0x3c020001, 0x8c427e00, 0x10400005,
41100x0, 0x3c020001, 0x8c427ddc, 0x104000a9,
41110x24020002, 0x3c030001, 0x24637de0, 0x8c620000,
41120x2c424e21, 0x104000a3, 0x24020002, 0x3c020001,
41130x8c427e0c, 0x1040000e, 0x0, 0x3c020001,
41140x8c427ddc, 0x3c010001, 0xac207e0c, 0x30420080,
41150x1040002f, 0x2402000c, 0x8f820204, 0x30420080,
41160x1440000c, 0x24020003, 0x8005011, 0x2402000c,
41170x3c020001, 0x8c427ddc, 0x30420080, 0x14400005,
41180x24020003, 0x8f820204, 0x30420080, 0x1040001f,
41190x24020003, 0xac620000, 0x2402000a, 0x3c010001,
41200xac227dd0, 0x3c040001, 0x24847e18, 0x8c820000,
41210x3c030001, 0x8c637df0, 0x431025, 0xaf820204,
41220x8c830000, 0x3c040001, 0x8c847df0, 0x2402000b,
41230x3c010001, 0xac227dd0, 0x641825, 0x3c010001,
41240xac237e20, 0x3c050001, 0x24a57de0, 0x8ca20000,
41250x2c424e21, 0x1040006f, 0x24020002, 0x3c020001,
41260x8c427e10, 0x10400005, 0x0, 0x2402000c,
41270x3c010001, 0x8005083, 0xac227dd0, 0x3c020001,
41280x8c427e00, 0x1040006c, 0x0, 0x3c040001,
41290x8c847ddc, 0x1080005e, 0x30820008, 0x3c030001,
41300x8c637dec, 0x10620064, 0x24020003, 0x3c010001,
41310xac247e08, 0xaca20000, 0x24020006, 0x3c010001,
41320x8005083, 0xac227dd0, 0x8f820200, 0x34420002,
41330xaf820200, 0x8f830054, 0x2402000d, 0x3c010001,
41340xac227dd0, 0x3c010001, 0xac237df4, 0x8f830054,
41350x3c020001, 0x8c427df4, 0x2463d8f0, 0x431023,
41360x2c422710, 0x1440003a, 0x0, 0x3c020001,
41370x8c427e10, 0x10400029, 0x2402000e, 0x3c030001,
41380x8c637e24, 0x3c010001, 0x14600015, 0xac227dd0,
41390xc0043dd, 0x0, 0x3c050001, 0x8ca55cc8,
41400xc0052a2, 0x2021, 0x3c030001, 0x8c635cc8,
41410x24020004, 0x14620005, 0x2403fffb, 0x3c020001,
41420x8c425cc4, 0x8005052, 0x2403fff7, 0x3c020001,
41430x8c425cc4, 0x431024, 0x3c010001, 0xac225cc4,
41440x8ee20000, 0x3c030200, 0x431025, 0xaee20000,
41450x8f820224, 0x3c010001, 0xac227e2c, 0x8f820220,
41460x2403fffb, 0x431024, 0xaf820220, 0x8f820220,
41470x34420002, 0x8005083, 0xaf820220, 0x3c020001,
41480x8c427e00, 0x10400005, 0x0, 0x3c020001,
41490x8c427ddc, 0x1040000f, 0x24020002, 0x3c020001,
41500x8c427de0, 0x2c424e21, 0x1040000a, 0x24020002,
41510x3c020001, 0x8c427e00, 0x1040000f, 0x0,
41520x3c020001, 0x8c427ddc, 0x1440000b, 0x0,
41530x24020002, 0x3c010001, 0x8005083, 0xac227dd0,
41540x3c020001, 0x8c427e00, 0x10400003, 0x0,
41550xc00429b, 0x0, 0x8f820220, 0x3c03f700,
41560x431025, 0xaf820220, 0x8fbf0010, 0x3e00008,
41570x27bd0018, 0x3c030001, 0x24637e28, 0x8c620000,
41580x10400005, 0x34422000, 0x3c010001, 0xac227e1c,
41590x8005095, 0xac600000, 0x3c010001, 0xac247e1c,
41600x3e00008, 0x0, 0x27bdffe0, 0x30820030,
41610xafbf0018, 0x3c010001, 0xac227e24, 0x14400067,
41620x3c02ffff, 0x34421f0e, 0x821024, 0x14400061,
41630x24020030, 0x30822000, 0x1040005d, 0x30838000,
41640x31a02, 0x30820001, 0x21200, 0x3c040001,
41650x8c845d9c, 0x621825, 0x331c2, 0x3c030001,
41660x24635d78, 0x30828000, 0x21202, 0x30840001,
41670x42200, 0x441025, 0x239c2, 0x61080,
41680x431021, 0x471021, 0x90430000, 0x24020001,
41690x10620025, 0x0, 0x10600007, 0x24020002,
41700x10620013, 0x24020003, 0x1062002c, 0x3c05000f,
41710x80050f9, 0x0, 0x8f820200, 0x2403feff,
41720x431024, 0xaf820200, 0x8f820220, 0x3c03fffe,
41730x3463ffff, 0x431024, 0xaf820220, 0x3c010001,
41740xac207e44, 0x3c010001, 0x8005104, 0xac207e4c,
41750x8f820200, 0x34420100, 0xaf820200, 0x8f820220,
41760x3c03fffe, 0x3463ffff, 0x431024, 0xaf820220,
41770x24020100, 0x3c010001, 0xac227e44, 0x3c010001,
41780x8005104, 0xac207e4c, 0x8f820200, 0x2403feff,
41790x431024, 0xaf820200, 0x8f820220, 0x3c030001,
41800x431025, 0xaf820220, 0x3c010001, 0xac207e44,
41810x3c010001, 0x8005104, 0xac237e4c, 0x8f820200,
41820x34420100, 0xaf820200, 0x8f820220, 0x3c030001,
41830x431025, 0xaf820220, 0x24020100, 0x3c010001,
41840xac227e44, 0x3c010001, 0x8005104, 0xac237e4c,
41850x34a5ffff, 0x3c040001, 0x24845bb8, 0xafa30010,
41860xc002403, 0xafa00014, 0x8005104, 0x0,
41870x24020030, 0x3c010001, 0xac227e28, 0x8fbf0018,
41880x3e00008, 0x27bd0020, 0x0, 0x27bdffc8,
41890xafb20028, 0x809021, 0xafb3002c, 0xa09821,
41900xafb00020, 0xc08021, 0x3c040001, 0x24845bd0,
41910x3c050009, 0x3c020001, 0x8c425cc8, 0x34a59001,
41920x2403021, 0x2603821, 0xafbf0030, 0xafb10024,
41930xa7a0001a, 0xafb00014, 0xc002403, 0xafa20010,
41940x24020002, 0x12620083, 0x2e620003, 0x10400005,
41950x24020001, 0x1262000a, 0x0, 0x800529b,
41960x0, 0x24020004, 0x126200fa, 0x24020008,
41970x126200f9, 0x3c02ffec, 0x800529b, 0x0,
41980x3c020001, 0x8c425cc4, 0x30420002, 0x14400004,
41990x128940, 0x3c02fffb, 0x3442ffff, 0x2028024,
42000x3c010001, 0x310821, 0xac307e3c, 0x3c024000,
42010x2021024, 0x1040004e, 0x1023c2, 0x30840030,
42020x101382, 0x3042001c, 0x3c030001, 0x24635d08,
42030x431021, 0x823821, 0x3c020020, 0x2021024,
42040x10400006, 0x24020100, 0x3c010001, 0x310821,
42050xac227e40, 0x8005150, 0x3c020080, 0x3c010001,
42060x310821, 0xac207e40, 0x3c020080, 0x2021024,
42070x10400006, 0x121940, 0x3c020001, 0x3c010001,
42080x230821, 0x800515c, 0xac227e48, 0x121140,
42090x3c010001, 0x220821, 0xac207e48, 0x94e40000,
42100x3c030001, 0x8c635dbc, 0x24020005, 0x10620010,
42110xa7a40018, 0x32024000, 0x10400002, 0x34824000,
42120xa7a20018, 0x24040001, 0x94e20002, 0x24050004,
42130x24e60002, 0x34420001, 0xc00498e, 0xa4e20002,
42140x24040001, 0x2821, 0xc00498e, 0x27a60018,
42150x3c020001, 0x8c425cc8, 0x24110001, 0x3c010001,
42160xac315cd4, 0x14530004, 0x32028000, 0xc00429b,
42170x0, 0x32028000, 0x1040011f, 0x0,
42180xc00429b, 0x0, 0x3c030001, 0x8c635dbc,
42190x24020005, 0x10620118, 0x24020002, 0x3c010001,
42200xac315ccc, 0x3c010001, 0x800529b, 0xac225cc8,
42210x24040001, 0x24050004, 0x27b0001a, 0xc00498e,
42220x2003021, 0x24040001, 0x2821, 0xc00498e,
42230x2003021, 0x3c020001, 0x511021, 0x8c427e34,
42240x3c040001, 0x8c845cc8, 0x3c03bfff, 0x3463ffff,
42250x3c010001, 0xac335cd4, 0x431024, 0x3c010001,
42260x310821, 0x109300fa, 0xac227e34, 0x800529b,
42270x0, 0x3c022000, 0x2021024, 0x10400005,
42280x24020001, 0x3c010001, 0xac225d98, 0x80051ad,
42290x128940, 0x3c010001, 0xac205d98, 0x128940,
42300x3c010001, 0x310821, 0xac307e38, 0x3c024000,
42310x2021024, 0x14400016, 0x0, 0x3c020001,
42320x8c425d98, 0x10400008, 0x24040004, 0x24050001,
42330xc004d93, 0x24062000, 0x24020001, 0x3c010001,
42340x370821, 0xac2283ac, 0x3c020001, 0x511021,
42350x8c427e30, 0x3c03bfff, 0x3463ffff, 0x431024,
42360x3c010001, 0x310821, 0x8005299, 0xac227e30,
42370x3c020001, 0x8c425d98, 0x10400028, 0x3c0300a0,
42380x2031024, 0x5443000d, 0x3c020020, 0x3c020001,
42390x8c425d9c, 0x24030100, 0x3c010001, 0x310821,
42400xac237e44, 0x3c030001, 0x3c010001, 0x310821,
42410xac237e4c, 0x80051f0, 0x34420400, 0x2021024,
42420x10400008, 0x24030100, 0x3c020001, 0x8c425d9c,
42430x3c010001, 0x310821, 0xac237e44, 0x80051f0,
42440x34420800, 0x3c020080, 0x2021024, 0x1040002e,
42450x3c030001, 0x3c020001, 0x8c425d9c, 0x3c010001,
42460x310821, 0xac237e4c, 0x34420c00, 0x3c010001,
42470xac225d9c, 0x8005218, 0x24040001, 0x3c020020,
42480x2021024, 0x10400006, 0x24020100, 0x3c010001,
42490x310821, 0xac227e44, 0x8005201, 0x3c020080,
42500x3c010001, 0x310821, 0xac207e44, 0x3c020080,
42510x2021024, 0x10400007, 0x121940, 0x3c020001,
42520x3c010001, 0x230821, 0xac227e4c, 0x800520f,
42530x24040001, 0x121140, 0x3c010001, 0x220821,
42540xac207e4c, 0x24040001, 0x2821, 0x27b0001e,
42550xc00494c, 0x2003021, 0x24040001, 0x2821,
42560xc00494c, 0x2003021, 0x24040001, 0x24050001,
42570x27b0001c, 0xc00494c, 0x2003021, 0x24040001,
42580x24050001, 0xc00494c, 0x2003021, 0x8005299,
42590x0, 0x3c02ffec, 0x3442ffff, 0x2028024,
42600x3c020008, 0x2028025, 0x121140, 0x3c010001,
42610x220821, 0xac307e38, 0x3c022000, 0x2021024,
42620x10400009, 0x0, 0x3c020001, 0x8c425d74,
42630x14400005, 0x24020001, 0x3c010001, 0xac225d98,
42640x800523a, 0x3c024000, 0x3c010001, 0xac205d98,
42650x3c024000, 0x2021024, 0x1440001e, 0x0,
42660x3c020001, 0x8c425d98, 0x3c010001, 0xac205ce0,
42670x10400007, 0x24022020, 0x3c010001, 0xac225d9c,
42680x24020001, 0x3c010001, 0x370821, 0xac2283ac,
42690x3c04bfff, 0x121940, 0x3c020001, 0x431021,
42700x8c427e30, 0x3c050001, 0x8ca55cc8, 0x3484ffff,
42710x441024, 0x3c010001, 0x230821, 0xac227e30,
42720x24020001, 0x10a20044, 0x0, 0x8005299,
42730x0, 0x3c020001, 0x8c425d98, 0x1040001c,
42740x24022000, 0x3c010001, 0xac225d9c, 0x3c0300a0,
42750x2031024, 0x14430005, 0x121140, 0x3402a000,
42760x3c010001, 0x8005294, 0xac225d9c, 0x3c030001,
42770x621821, 0x8c637e38, 0x3c020020, 0x621024,
42780x10400004, 0x24022001, 0x3c010001, 0x8005294,
42790xac225d9c, 0x3c020080, 0x621024, 0x1040001f,
42800x3402a001, 0x3c010001, 0x8005294, 0xac225d9c,
42810x3c020020, 0x2021024, 0x10400007, 0x121940,
42820x24020100, 0x3c010001, 0x230821, 0xac227e44,
42830x8005288, 0x3c020080, 0x121140, 0x3c010001,
42840x220821, 0xac207e44, 0x3c020080, 0x2021024,
42850x10400006, 0x121940, 0x3c020001, 0x3c010001,
42860x230821, 0x8005294, 0xac227e4c, 0x121140,
42870x3c010001, 0x220821, 0xac207e4c, 0x3c030001,
42880x8c635cc8, 0x24020001, 0x10620003, 0x0,
42890xc00429b, 0x0, 0x8fbf0030, 0x8fb3002c,
42900x8fb20028, 0x8fb10024, 0x8fb00020, 0x3e00008,
42910x27bd0038, 0x27bdffd8, 0xafb20020, 0x809021,
42920xafb1001c, 0x8821, 0x24020002, 0xafbf0024,
42930xafb00018, 0xa7a00012, 0x10a200d3, 0xa7a00010,
42940x2ca20003, 0x10400005, 0x24020001, 0x10a2000a,
42950x128140, 0x8005380, 0x2201021, 0x24020004,
42960x10a2007d, 0x24020008, 0x10a2007c, 0x122940,
42970x8005380, 0x2201021, 0x3c030001, 0x701821,
42980x8c637e3c, 0x3c024000, 0x621024, 0x14400009,
42990x24040001, 0x3c027fff, 0x3442ffff, 0x628824,
43000x3c010001, 0x300821, 0xac317e34, 0x8005380,
43010x2201021, 0x24050001, 0xc00494c, 0x27a60010,
43020x24040001, 0x24050001, 0xc00494c, 0x27a60010,
43030x97a20010, 0x30420004, 0x10400034, 0x3c114000,
43040x3c020001, 0x8c425dbc, 0x2443ffff, 0x2c620006,
43050x10400034, 0x31080, 0x3c010001, 0x220821,
43060x8c225be0, 0x400008, 0x0, 0x24040001,
43070x24050011, 0x27b00012, 0xc00494c, 0x2003021,
43080x24040001, 0x24050011, 0xc00494c, 0x2003021,
43090x97a50012, 0x30a24000, 0x10400002, 0x3c040010,
43100x3c040008, 0x3c030001, 0x8005301, 0x30a28000,
43110x24040001, 0x24050014, 0x27b00012, 0xc00494c,
43120x2003021, 0x24040001, 0x24050014, 0xc00494c,
43130x2003021, 0x97a50012, 0x30a21000, 0x10400002,
43140x3c040010, 0x3c040008, 0x3c030001, 0x30a20800,
43150x54400001, 0x3c030002, 0x3c028000, 0x2221025,
43160x641825, 0x800530e, 0x438825, 0x3c110001,
43170x2308821, 0x8e317e3c, 0x3c027fff, 0x3442ffff,
43180x2228824, 0x3c020001, 0x8c425cd8, 0x1040001d,
43190x121140, 0x3c020001, 0x8c425d98, 0x10400002,
43200x3c022000, 0x2228825, 0x121140, 0x3c010001,
43210x220821, 0x8c227e40, 0x10400003, 0x3c020020,
43220x8005322, 0x2228825, 0x3c02ffdf, 0x3442ffff,
43230x2228824, 0x121140, 0x3c010001, 0x220821,
43240x8c227e48, 0x10400003, 0x3c020080, 0x800532d,
43250x2228825, 0x3c02ff7f, 0x3442ffff, 0x2228824,
43260x121140, 0x3c010001, 0x220821, 0xac317e34,
43270x8005380, 0x2201021, 0x122940, 0x3c030001,
43280x651821, 0x8c637e38, 0x3c024000, 0x621024,
43290x14400008, 0x3c027fff, 0x3442ffff, 0x628824,
43300x3c010001, 0x250821, 0xac317e30, 0x8005380,
43310x2201021, 0x3c020001, 0x8c425cd8, 0x10400033,
43320x3c11c00c, 0x3c020001, 0x8c425d74, 0x3c04c00c,
43330x34842000, 0x3c030001, 0x8c635d98, 0x2102b,
43340x21023, 0x441024, 0x10600003, 0x518825,
43350x3c022000, 0x2228825, 0x3c020001, 0x451021,
43360x8c427e44, 0x10400003, 0x3c020020, 0x800535d,
43370x2228825, 0x3c02ffdf, 0x3442ffff, 0x2228824,
43380x121140, 0x3c010001, 0x220821, 0x8c227e4c,
43390x10400003, 0x3c020080, 0x8005368, 0x2228825,
43400x3c02ff7f, 0x3442ffff, 0x2228824, 0x3c020001,
43410x8c425d60, 0x10400002, 0x3c020800, 0x2228825,
43420x3c020001, 0x8c425d64, 0x10400002, 0x3c020400,
43430x2228825, 0x3c020001, 0x8c425d68, 0x10400006,
43440x3c020100, 0x800537b, 0x2228825, 0x3c027fff,
43450x3442ffff, 0x628824, 0x121140, 0x3c010001,
43460x220821, 0xac317e30, 0x2201021, 0x8fbf0024,
43470x8fb20020, 0x8fb1001c, 0x8fb00018, 0x3e00008,
43480x27bd0028, 0x27bdffd8, 0xafb40020, 0x80a021,
43490xafbf0024, 0xafb3001c, 0xafb20018, 0xafb10014,
43500xafb00010, 0x8f900200, 0x3c030001, 0x8c635cc8,
43510x8f930220, 0x24020002, 0x10620063, 0x2c620003,
43520x10400005, 0x24020001, 0x1062000a, 0x141940,
43530x8005448, 0x0, 0x24020004, 0x1062005a,
43540x24020008, 0x10620059, 0x149140, 0x8005448,
43550x0, 0x3c040001, 0x832021, 0x8c847e3c,
43560x3c110001, 0x2238821, 0x8e317e34, 0x3c024000,
43570x821024, 0x1040003e, 0x3c020008, 0x2221024,
43580x10400020, 0x36100002, 0x3c020001, 0x431021,
43590x8c427e40, 0x10400005, 0x36100020, 0x36100100,
43600x3c020020, 0x80053bd, 0x2228825, 0x2402feff,
43610x2028024, 0x3c02ffdf, 0x3442ffff, 0x2228824,
43620x141140, 0x3c010001, 0x220821, 0x8c227e48,
43630x10400005, 0x3c020001, 0x2629825, 0x3c020080,
43640x80053dc, 0x2228825, 0x3c02fffe, 0x3442ffff,
43650x2629824, 0x3c02ff7f, 0x3442ffff, 0x80053dc,
43660x2228824, 0x2402fedf, 0x2028024, 0x3c02fffe,
43670x3442ffff, 0x2629824, 0x3c02ff5f, 0x3442ffff,
43680x2228824, 0x3c010001, 0x230821, 0xac207e40,
43690x3c010001, 0x230821, 0xac207e48, 0xc00486a,
43700x0, 0xaf900200, 0xaf930220, 0x8f820220,
43710x2403fffb, 0x431024, 0xaf820220, 0x8f820220,
43720x34420002, 0xaf820220, 0x80053f3, 0x141140,
43730x8f820200, 0x2403fffd, 0x431024, 0xc00486a,
43740xaf820200, 0x3c02bfff, 0x3442ffff, 0xc00429b,
43750x2228824, 0x141140, 0x3c010001, 0x220821,
43760x8005448, 0xac317e34, 0x149140, 0x3c040001,
43770x922021, 0x8c847e38, 0x3c110001, 0x2328821,
43780x8e317e30, 0x3c024000, 0x821024, 0x14400011,
43790x0, 0x3c020001, 0x8c425d98, 0x14400006,
43800x3c02bfff, 0x8f820200, 0x34420002, 0xc00486a,
43810xaf820200, 0x3c02bfff, 0x3442ffff, 0xc00429b,
43820x2228824, 0x3c010001, 0x320821, 0x8005448,
43830xac317e30, 0x3c020001, 0x8c425d98, 0x10400005,
43840x3c020020, 0x3c020001, 0x8c425d74, 0x1040002b,
43850x3c020020, 0x821024, 0x10400007, 0x36100020,
43860x24020100, 0x3c010001, 0x320821, 0xac227e44,
43870x8005428, 0x36100100, 0x3c010001, 0x320821,
43880xac207e44, 0x2402feff, 0x2028024, 0x3c020080,
43890x821024, 0x10400007, 0x141940, 0x3c020001,
43900x3c010001, 0x230821, 0xac227e4c, 0x8005439,
43910x2629825, 0x141140, 0x3c010001, 0x220821,
43920xac207e4c, 0x3c02fffe, 0x3442ffff, 0x2629824,
43930xc00486a, 0x0, 0xaf900200, 0xaf930220,
43940x8f820220, 0x2403fffb, 0x431024, 0xaf820220,
43950x8f820220, 0x34420002, 0xaf820220, 0x141140,
43960x3c010001, 0x220821, 0xac317e30, 0x8fbf0024,
43970x8fb40020, 0x8fb3001c, 0x8fb20018, 0x8fb10014,
43980x8fb00010, 0x3e00008, 0x27bd0028, 0x0 };
4399static u32 tigonFwRodata[(MAX_RODATA_LEN/4) + 1] __devinitdata = {
44000x24486561, 0x6465723a, 0x202f7072,
44010x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
44020x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
44030x2f66776d, 0x61696e2e, 0x632c7620, 0x312e312e,
44040x322e3131, 0x20313939, 0x382f3034, 0x2f323720,
44050x32323a31, 0x333a3432, 0x20736875, 0x616e6720,
44060x45787020, 0x24000000, 0x7468655f, 0x4441574e,
44070x0, 0x53544143, 0x4b5f3120, 0x0,
44080x42616453, 0x6e64526e, 0x67000000, 0x3f456e71,
44090x45767400, 0x3f6e6f51, 0x64457650, 0x0,
44100x6576526e, 0x6746756c, 0x6c000000, 0x496c6c43,
44110x6f6e6652, 0x78000000, 0x53656e64, 0x436b5375,
44120x6d000000, 0x52656376, 0x566c616e, 0x0,
44130x0, 0x24486561, 0x6465723a, 0x202f7072,
44140x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
44150x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
44160x2f74696d, 0x65722e63, 0x2c762031, 0x2e312e32,
44170x2e382031, 0x3939382f, 0x30372f33, 0x31203137,
44180x3a35383a, 0x34352073, 0x6875616e, 0x67204578,
44190x70202400, 0x542d446d, 0x61526431, 0x0,
44200x542d446d, 0x61424200, 0x542d446d, 0x61320000,
44210x3f6e6f51, 0x64547845, 0x0, 0x3f6e6f51,
44220x64527845, 0x0, 0x656e714d, 0x45765046,
44230x61696c00, 0x656e714d, 0x45764661, 0x696c0000,
44240x6661696c, 0x456e454d, 0x0, 0x3f456e71,
44250x45767400, 0x3f6e6f51, 0x64457650, 0x0,
44260x6576526e, 0x6746756c, 0x6c000000, 0x0,
44270x0, 0x24486561, 0x6465723a, 0x202f7072,
44280x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
44290x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
44300x2f636f6d, 0x6d616e64, 0x2e632c76, 0x20312e31,
44310x2e322e31, 0x30203139, 0x39382f31, 0x312f3138,
44320x2031373a, 0x31313a31, 0x38207368, 0x75616e67,
44330x20457870, 0x20240000, 0x3f4d626f, 0x78457674,
44340x0, 0x4e4f636f, 0x6d616e64, 0x0,
44350x68737465, 0x5f455252, 0x0, 0x412d4572,
44360x72427563, 0x0, 0x4552524f, 0x522d4164,
44370x64000000, 0x656e714d, 0x45765046, 0x61696c00,
44380x656e714d, 0x45764661, 0x696c0000, 0x6661696c,
44390x456e454d, 0x0, 0x442d4572, 0x724c6173,
44400x74000000, 0x442d4572, 0x72320000, 0x6d437374,
44410x4d644552, 0x52000000, 0x70726f6d, 0x4d644552,
44420x52000000, 0x46696c74, 0x4d644552, 0x52000000,
44430x636d645f, 0x45525200, 0x3f456e71, 0x45767400,
44440x3f6e6f51, 0x64457650, 0x0, 0x6576526e,
44450x6746756c, 0x6c000000, 0x0, 0x6ea0,
44460x7fbc, 0x6e38, 0x8734, 0x82b0,
44470x8780, 0x8780, 0x6f54, 0x7694,
44480x7f0c, 0x80a8, 0x8074, 0x8780,
44490x7e70, 0x80cc, 0x6e64, 0x81cc,
44500x0, 0x24486561, 0x6465723a, 0x202f7072,
44510x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
44520x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
44530x2f646d61, 0x2e632c76, 0x20312e31, 0x2e322e33,
44540x20313939, 0x382f3034, 0x2f323720, 0x32323a31,
44550x333a3431, 0x20736875, 0x616e6720, 0x45787020,
44560x24000000, 0x646d6172, 0x6441544e, 0x0,
44570x646d6177, 0x7241544e, 0x0, 0x0,
44580x0, 0x24486561, 0x6465723a, 0x202f7072,
44590x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
44600x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
44610x2f747261, 0x63652e63, 0x2c762031, 0x2e312e32,
44620x2e322031, 0x3939382f, 0x30342f32, 0x37203232,
44630x3a31333a, 0x35302073, 0x6875616e, 0x67204578,
44640x70202400, 0x24486561, 0x6465723a, 0x202f7072,
44650x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
44660x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
44670x2f646174, 0x612e632c, 0x7620312e, 0x312e322e,
44680x32203139, 0x39382f30, 0x342f3237, 0x2032323a,
44690x31333a34, 0x30207368, 0x75616e67, 0x20457870,
44700x20240000, 0x46575f56, 0x45525349, 0x4f4e3a20,
44710x23312046, 0x72692041, 0x70722037, 0x2031373a,
44720x35353a34, 0x38205044, 0x54203230, 0x30300000,
44730x46575f43, 0x4f4d5049, 0x4c455f54, 0x494d453a,
44740x2031373a, 0x35353a34, 0x38000000, 0x46575f43,
44750x4f4d5049, 0x4c455f42, 0x593a2064, 0x65767263,
44760x73000000, 0x46575f43, 0x4f4d5049, 0x4c455f48,
44770x4f53543a, 0x20636f6d, 0x70757465, 0x0,
44780x46575f43, 0x4f4d5049, 0x4c455f44, 0x4f4d4149,
44790x4e3a2065, 0x6e672e61, 0x6374656f, 0x6e2e636f,
44800x6d000000, 0x46575f43, 0x4f4d5049, 0x4c45523a,
44810x20676363, 0x20766572, 0x73696f6e, 0x20322e37,
44820x2e320000, 0x0, 0x0, 0x0,
44830x0, 0x24486561, 0x6465723a, 0x202f7072,
44840x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
44850x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
44860x2f6d656d, 0x2e632c76, 0x20312e31, 0x2e322e32,
44870x20313939, 0x382f3034, 0x2f323720, 0x32323a31,
44880x333a3434, 0x20736875, 0x616e6720, 0x45787020,
44890x24000000, 0x24486561, 0x6465723a, 0x202f7072,
44900x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
44910x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
44920x2f73656e, 0x642e632c, 0x7620312e, 0x312e322e,
44930x31312031, 0x3939382f, 0x31322f32, 0x32203137,
44940x3a31373a, 0x35352073, 0x6875616e, 0x67204578,
44950x70202400, 0x736e6464, 0x654e6f51, 0x20000000,
44960x6e6f454e, 0x515f5458, 0x0, 0x736e6464,
44970x744e6f51, 0x20000000, 0x3f6e6f51, 0x64547845,
44980x0, 0x756e6b72, 0x64747970, 0x65000000,
44990x0, 0xaccc, 0xaccc, 0xad9c,
45000xaab0, 0xaab0, 0xad9c, 0xad9c,
45010xad9c, 0xad9c, 0xad9c, 0xad9c,
45020xad9c, 0xad9c, 0xad9c, 0xad9c,
45030xad9c, 0xad9c, 0xad9c, 0xad7c,
45040x0, 0xbca8, 0xbca8, 0xbd70,
45050xae4c, 0xb058, 0xbd70, 0xbd70,
45060xbd70, 0xbd70, 0xbd70, 0xbd70,
45070xbd70, 0xbd70, 0xbd70, 0xbd70,
45080xbd70, 0xbd70, 0xbd70, 0xbd54,
45090xb040, 0x24486561, 0x6465723a, 0x202f7072,
45100x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
45110x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
45120x2f726563, 0x762e632c, 0x7620312e, 0x312e322e,
45130x31392031, 0x3939382f, 0x30372f32, 0x34203231,
45140x3a33303a, 0x30352073, 0x6875616e, 0x67204578,
45150x70202400, 0x706b5278, 0x45525200, 0x66726d32,
45160x4c617267, 0x65000000, 0x72784e6f, 0x52784264,
45170x0, 0x72785144, 0x6d614446, 0x0,
45180x72785144, 0x6d614246, 0x0, 0x3f6e6f51,
45190x64527845, 0x0, 0x706b5278, 0x45525273,
45200x0, 0x66726d32, 0x4c726753, 0x0,
45210x72784e6f, 0x42645300, 0x3f724264, 0x446d6146,
45220x0, 0x3f724a42, 0x64446d46, 0x0,
45230x0, 0xf678, 0xf678, 0xf678,
45240xf678, 0xf678, 0xf678, 0xf678,
45250xf678, 0xf678, 0xf678, 0xf678,
45260xf678, 0xf678, 0xf678, 0xf678,
45270xf670, 0xf670, 0xf670, 0x572d444d,
45280x41456e46, 0x0, 0x0, 0xfdc0,
45290x1015c, 0xfddc, 0x1015c, 0x1015c,
45300x1015c, 0x1015c, 0x1015c, 0x1015c,
45310xf704, 0x1015c, 0x1015c, 0x1015c,
45320x1015c, 0x1015c, 0x10154, 0x10154,
45330x10154, 0x24486561, 0x6465723a, 0x202f7072,
45340x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
45350x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
45360x2f6d6163, 0x2e632c76, 0x20312e31, 0x2e322e31,
45370x32203139, 0x39382f30, 0x342f3237, 0x2032323a,
45380x31333a34, 0x32207368, 0x75616e67, 0x20457870,
45390x20240000, 0x6d616374, 0x7841544e, 0x0,
45400x4e745379, 0x6e264c6b, 0x0, 0x72656d61,
45410x73737274, 0x0, 0x6c696e6b, 0x444f574e,
45420x0, 0x656e714d, 0x45765046, 0x61696c00,
45430x656e714d, 0x45764661, 0x696c0000, 0x6661696c,
45440x456e454d, 0x0, 0x6c696e6b, 0x55500000,
45450x0, 0x24486561, 0x6465723a, 0x202f7072,
45460x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
45470x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
45480x2f636b73, 0x756d2e63, 0x2c762031, 0x2e312e32,
45490x2e322031, 0x3939382f, 0x30342f32, 0x37203232,
45500x3a31333a, 0x33392073, 0x6875616e, 0x67204578,
45510x70202400, 0x50726f62, 0x65506879, 0x0,
45520x6c6e6b41, 0x53535254, 0x0, 0x11b2c,
45530x11bc4, 0x11bf8, 0x11c2c, 0x11c58,
45540x11c6c, 0x11ca8, 0x1207c, 0x11de4,
45550x11e24, 0x11e50, 0x11e90, 0x11ec0,
45560x11efc, 0x11f30, 0x1207c, 0x122c0,
45570x122d8, 0x12300, 0x12320, 0x12348,
45580x12478, 0x124a0, 0x124f4, 0x1251c,
45590x0, 0x1278c, 0x1285c, 0x12934,
45600x12a04, 0x12a60, 0x12b3c, 0x12b64,
45610x12c40, 0x12c68, 0x12e10, 0x12e38,
45620x12fe0, 0x131d8, 0x1346c, 0x13380,
45630x1346c, 0x13498, 0x13008, 0x131b0,
45640x0, 0x13b84, 0x13bc8, 0x13c60,
45650x13cac, 0x13d1c, 0x13db4, 0x13de8,
45660x13e70, 0x13f08, 0x13fd8, 0x14018,
45670x1409c, 0x140c0, 0x141f4, 0x646f4261,
45680x73655067, 0x0, 0x0, 0x0,
45690x0, 0x73746d61, 0x634c4e4b, 0x0,
45700x0, 0x14c38, 0x14c38, 0x14b80,
45710x14bc4, 0x14c38, 0x14c38, 0x0,
45720x0, 0x0 };
4573static u32 tigonFwData[(MAX_DATA_LEN/4) + 1] __devinitdata = {
45740x416c7465,
45750x6f6e2041, 0x63654e49, 0x43205600, 0x416c7465,
45760x6f6e2041, 0x63654e49, 0x43205600, 0x42424242,
45770x0, 0x0, 0x0, 0x135418,
45780x13e7fc, 0x0, 0x0, 0x0,
45790x0, 0x0, 0x0, 0x60cf00,
45800x60, 0xcf000000, 0x0, 0x0,
45810x0, 0x0, 0x0, 0x0,
45820x0, 0x0, 0x0, 0x0,
45830x0, 0x0, 0x0, 0x0,
45840x0, 0x0, 0x0, 0x0,
45850x0, 0x0, 0x3, 0x0,
45860x1, 0x0, 0x0, 0x0,
45870x1, 0x0, 0x1, 0x0,
45880x0, 0x0, 0x0, 0x1,
45890x1, 0x0, 0x0, 0x0,
45900x0, 0x0, 0x1000000, 0x21000000,
45910x12000140, 0x0, 0x0, 0x20000000,
45920x120000a0, 0x0, 0x12000060, 0x12000180,
45930x120001e0, 0x0, 0x0, 0x0,
45940x1, 0x0, 0x0, 0x0,
45950x0, 0x0, 0x0, 0x2,
45960x0, 0x0, 0x30001, 0x1,
45970x30201, 0x0, 0x0, 0x0 };
4598#endif
4599/* Generated by genfw.c */
4600#define tigon2FwReleaseMajor 0xc
4601#define tigon2FwReleaseMinor 0x4
4602#define tigon2FwReleaseFix 0xb
4603#define tigon2FwStartAddr 0x00004000
4604#define tigon2FwTextAddr 0x00004000
4605#define tigon2FwTextLen 0x11bc0
4606#define tigon2FwRodataAddr 0x00015bc0
4607#define tigon2FwRodataLen 0x10d0
4608#define tigon2FwDataAddr 0x00016cc0
4609#define tigon2FwDataLen 0x1c0
4610#define tigon2FwSbssAddr 0x00016e80
4611#define tigon2FwSbssLen 0xcc
4612#define tigon2FwBssAddr 0x00016f50
4613#define tigon2FwBssLen 0x20c0
4614static u32 tigon2FwText[(MAX_TEXT_LEN/4) + 1] __devinitdata = {
46150x0,
46160x10000003, 0x0, 0xd, 0xd,
46170x3c1d0001, 0x8fbd6d20, 0x3a0f021, 0x3c100000,
46180x26104000, 0xc0010c0, 0x0, 0xd,
46190x3c1d0001, 0x8fbd6d24, 0x3a0f021, 0x3c100000,
46200x26104000, 0xc0017e0, 0x0, 0xd,
46210x0, 0x0, 0x0, 0x0,
46220x0, 0x0, 0x0, 0x0,
46230x0, 0x0, 0x0, 0x0,
46240x0, 0x0, 0x0, 0x0,
46250x0, 0x0, 0x0, 0x0,
46260x0, 0x0, 0x0, 0x0,
46270x0, 0x0, 0x0, 0x0,
46280x0, 0x0, 0x0, 0x0,
46290x0, 0x0, 0x0, 0x0,
46300x0, 0x0, 0x0, 0x0,
46310x0, 0x0, 0x0, 0x2000008,
46320x0, 0x800172f, 0x3c0a0001, 0x800172f,
46330x3c0a0002, 0x800172f, 0x0, 0x8002cac,
46340x0, 0x8002c4f, 0x0, 0x800172f,
46350x3c0a0004, 0x800328a, 0x0, 0x8001a52,
46360x0, 0x800394d, 0x0, 0x80038f4,
46370x0, 0x800172f, 0x3c0a0006, 0x80039bb,
46380x3c0a0007, 0x800172f, 0x3c0a0008, 0x800172f,
46390x3c0a0009, 0x8003a13, 0x0, 0x8002ea6,
46400x0, 0x800172f, 0x3c0a000b, 0x800172f,
46410x3c0a000c, 0x800172f, 0x3c0a000d, 0x80028fb,
46420x0, 0x8002890, 0x0, 0x800172f,
46430x3c0a000e, 0x800208c, 0x0, 0x8001964,
46440x0, 0x8001a04, 0x0, 0x8003ca6,
46450x0, 0x8003c94, 0x0, 0x800172f,
46460x0, 0x800191a, 0x0, 0x800172f,
46470x0, 0x800172f, 0x3c0a0013, 0x800172f,
46480x3c0a0014, 0x0, 0x0, 0x0,
46490x0, 0x0, 0x0, 0x0,
46500x0, 0x0, 0x0, 0x0,
46510x0, 0x0, 0x0, 0x0,
46520x0, 0x0, 0x0, 0x0,
46530x0, 0x0, 0x0, 0x0,
46540x0, 0x0, 0x0, 0x0,
46550x0, 0x0, 0x0, 0x0,
46560x0, 0x0, 0x0, 0x0,
46570x0, 0x0, 0x0, 0x0,
46580x0, 0x0, 0x0, 0x0,
46590x0, 0x0, 0x0, 0x0,
46600x0, 0x0, 0x0, 0x0,
46610x0, 0x0, 0x0, 0x0,
46620x0, 0x0, 0x0, 0x0,
46630x0, 0x0, 0x0, 0x27bdffe0,
46640x3c1cc000, 0xafbf001c, 0xafb00018, 0x8f820140,
46650x24030003, 0xaf8300ec, 0x34420004, 0xc002b20,
46660xaf820140, 0x3c0100c0, 0xc001763, 0xac203ffc,
46670x401821, 0x3c020010, 0x3c010001, 0xac236e9c,
46680x10620011, 0x43102b, 0x14400002, 0x3c020020,
46690x3c020008, 0x1062000c, 0x24050100, 0x3c060001,
46700x8cc66e9c, 0x3c040001, 0x24845c74, 0x3821,
46710xafa00010, 0xc002b3b, 0xafa00014, 0x3c020020,
46720x3c010001, 0xac226e9c, 0x24020008, 0x3c010001,
46730xac226eb4, 0x2402001f, 0x3c010001, 0xac226ec4,
46740x24020016, 0x3c010001, 0xac226e98, 0x3c05fffe,
46750x34a56f08, 0x3c020001, 0x8c426e9c, 0x3c030002,
46760x24639010, 0x3c040001, 0x8c846cc4, 0x431023,
46770x14800002, 0x458021, 0x2610fa38, 0x2402f000,
46780x2028024, 0xc001785, 0x2002021, 0x2022823,
46790x3c040020, 0x821823, 0x651823, 0x247bb000,
46800x3c03fffe, 0x3463bf08, 0x363b821, 0x3c0600bf,
46810x34c6f000, 0x3c070001, 0x8ce76cc0, 0x3c0300bf,
46820x3463e000, 0x852023, 0x3c010001, 0xac246ea8,
46830x822023, 0x3c010001, 0xac256e90, 0x52842,
46840x3c010001, 0xac226e84, 0x27620ffc, 0x3c010001,
46850xac226d20, 0x27621ffc, 0xdb3023, 0x7b1823,
46860x3c010001, 0xac246e88, 0x3c010001, 0xac256eac,
46870x3c010001, 0xac226d24, 0xaf860150, 0x10e00011,
46880xaf830250, 0x3c1d0001, 0x8fbd6ccc, 0x3a0f021,
46890xc001749, 0x0, 0x3c020001, 0x8c426cd0,
46900x3c030001, 0x8c636cd4, 0x2442fe00, 0x24630200,
46910x3c010001, 0xac226cd0, 0x3c010001, 0x10000004,
46920xac236cd4, 0x3c1d0001, 0x8fbd6d20, 0x3a0f021,
46930x3c020001, 0x8c426cc4, 0x1040000d, 0x26fafa38,
46940x3c020001, 0x8c426cd0, 0x3c030001, 0x8c636cd4,
46950x3c1a0001, 0x8f5a6cd4, 0x2442fa38, 0x246305c8,
46960x3c010001, 0xac226cd0, 0x3c010001, 0xac236cd4,
46970x3c020001, 0x8c426cc8, 0x14400003, 0x0,
46980x3c010001, 0xac206cd0, 0xc001151, 0x0,
46990x8fbf001c, 0x8fb00018, 0x3e00008, 0x27bd0020,
47000x3c020001, 0x8c426cd0, 0x3c030001, 0x8c636cd4,
47010x27bdff98, 0xafb00048, 0x3c100001, 0x8e1066b8,
47020xafb20050, 0x3c120000, 0x26524100, 0xafbf0060,
47030xafbe005c, 0xafb50058, 0xafb30054, 0xafb1004c,
47040xafa20034, 0xafa30030, 0xafa00010, 0xafa00014,
47050x8f860040, 0x3c040001, 0x24845c80, 0x24050200,
47060x3c010001, 0xac326e80, 0xc002b3b, 0x2003821,
47070x8f830040, 0x3c02f000, 0x621824, 0x3c026000,
47080x1062000b, 0xa3a0003f, 0x240e0001, 0x3c040001,
47090x24845c88, 0xa3ae003f, 0xafa00010, 0xafa00014,
47100x8f860040, 0x24050300, 0xc002b3b, 0x2003821,
47110x8f820240, 0x3c030001, 0x431025, 0xaf820240,
47120xaf800048, 0x8f820048, 0x14400005, 0x0,
47130xaf800048, 0x8f820048, 0x10400004, 0x0,
47140xaf800048, 0x10000003, 0x2e02021, 0xaf80004c,
47150x2e02021, 0x3c050001, 0xc002ba8, 0x34a540f8,
47160x3402021, 0xc002ba8, 0x240505c8, 0x3c020001,
47170x8c426ea8, 0x3c0d0001, 0x8dad6e88, 0x3c030001,
47180x8c636e84, 0x3c080001, 0x8d086e90, 0x3c090001,
47190x8d296eac, 0x3c0a0001, 0x8d4a6eb4, 0x3c0b0001,
47200x8d6b6ec4, 0x3c0c0001, 0x8d8c6e98, 0x3c040001,
47210x24845c94, 0x24050400, 0xaf42013c, 0x8f42013c,
47220x24060001, 0x24070001, 0xaf400000, 0xaf4d0138,
47230xaf430144, 0xaf480148, 0xaf49014c, 0xaf4a0150,
47240xaf4b0154, 0xaf4c0158, 0x2442ff80, 0xaf420140,
47250x24020001, 0xafa20010, 0xc002b3b, 0xafa00014,
47260x8f420138, 0xafa20010, 0x8f42013c, 0xafa20014,
47270x8f460144, 0x8f470148, 0x3c040001, 0x24845ca0,
47280xc002b3b, 0x24050500, 0xafb70010, 0xafba0014,
47290x8f46014c, 0x8f470150, 0x3c040001, 0x24845cac,
47300xc002b3b, 0x24050600, 0x3c020001, 0x8c426e9c,
47310x3603821, 0x3c060002, 0x24c69010, 0x2448ffff,
47320x1061824, 0xe81024, 0x43102b, 0x10400006,
47330x24050900, 0x3c040001, 0x24845cb8, 0xafa80010,
47340xc002b3b, 0xafa00014, 0x8f82000c, 0xafa20010,
47350x8f82003c, 0xafa20014, 0x8f860000, 0x8f870004,
47360x3c040001, 0x24845cc4, 0xc002b3b, 0x24051000,
47370x8c020220, 0x8c030224, 0x8c060218, 0x8c07021c,
47380x3c040001, 0x24845ccc, 0x24051100, 0xafa20010,
47390xc002b3b, 0xafa30014, 0xaf800054, 0xaf80011c,
47400x8c020218, 0x30420002, 0x10400009, 0x0,
47410x8c020220, 0x3c030002, 0x34630004, 0x431025,
47420xaf42000c, 0x8c02021c, 0x10000008, 0x34420004,
47430x8c020220, 0x3c030002, 0x34630006, 0x431025,
47440xaf42000c, 0x8c02021c, 0x34420006, 0xaf420014,
47450x8c020218, 0x30420010, 0x1040000a, 0x0,
47460x8c02021c, 0x34420004, 0xaf420010, 0x8c020220,
47470x3c03000a, 0x34630004, 0x431025, 0x10000009,
47480xaf420008, 0x8c020220, 0x3c03000a, 0x34630006,
47490x431025, 0xaf420008, 0x8c02021c, 0x34420006,
47500xaf420010, 0x24020001, 0xaf8200a0, 0xaf8200b0,
47510x8f830054, 0x8f820054, 0xaf8000d0, 0xaf8000c0,
47520x10000002, 0x24630064, 0x8f820054, 0x621023,
47530x2c420065, 0x1440fffc, 0x0, 0x8c040208,
47540x8c05020c, 0x26e20028, 0xaee20020, 0x24020490,
47550xaee20010, 0xaee40008, 0xaee5000c, 0x26e40008,
47560x8c820000, 0x8c830004, 0xaf820090, 0xaf830094,
47570x8c820018, 0xaf8200b4, 0x9482000a, 0xaf82009c,
47580x8f420014, 0xaf8200b0, 0x8f8200b0, 0x30420004,
47590x1440fffd, 0x0, 0x8f8200b0, 0x3c03ef00,
47600x431024, 0x10400021, 0x0, 0x8f8200b4,
47610xafa20010, 0x8f820090, 0x8f830094, 0x3c040001,
47620x24845cd4, 0xafa30014, 0x8f8600b0, 0x8f87009c,
47630x3c050001, 0xc002b3b, 0x34a5200d, 0x3c040001,
47640x24845ce0, 0x240203c0, 0xafa20010, 0xafa00014,
47650x8f860144, 0x3c070001, 0x24e75ce8, 0xc002b3b,
47660x3405dead, 0x8f82011c, 0x34420002, 0xaf82011c,
47670x8f820220, 0x34420004, 0xaf820220, 0x8f820140,
47680x3c030001, 0x431025, 0xaf820140, 0x96e20472,
47690x96e60452, 0x96e70462, 0xafa20010, 0x96e20482,
47700x3c040001, 0x24845d14, 0x24051200, 0xc002b3b,
47710xafa20014, 0x96f00452, 0x32020001, 0x10400002,
47720xb021, 0x24160001, 0x32020002, 0x54400001,
47730x36d60002, 0x32020008, 0x54400001, 0x36d60004,
47740x32020010, 0x54400001, 0x36d60008, 0x32020020,
47750x54400001, 0x36d60010, 0x32020040, 0x54400001,
47760x36d60020, 0x32020080, 0x54400001, 0x36d60040,
47770x96e60482, 0x30c20200, 0x54400001, 0x36d64000,
47780x96e30472, 0x30620200, 0x10400003, 0x30620100,
47790x10000003, 0x36d62000, 0x54400001, 0x36d61000,
47800x96f00462, 0x32c24000, 0x14400004, 0x3207009b,
47810x30c2009b, 0x14e20007, 0x240e0001, 0x32c22000,
47820x1440000d, 0x32020001, 0x3062009b, 0x10e20009,
47830x240e0001, 0x3c040001, 0x24845d20, 0x24051300,
47840x2003821, 0xa3ae003f, 0xafa30010, 0xc002b3b,
47850xafa00014, 0x32020001, 0x54400001, 0x36d60080,
47860x32020002, 0x54400001, 0x36d60100, 0x32020008,
47870x54400001, 0x36d60200, 0x32020010, 0x54400001,
47880x36d60400, 0x32020080, 0x54400001, 0x36d60800,
47890x8c020218, 0x30420200, 0x10400002, 0x3c020008,
47900x2c2b025, 0x8c020218, 0x30420800, 0x10400002,
47910x3c020080, 0x2c2b025, 0x8c020218, 0x30420400,
47920x10400002, 0x3c020100, 0x2c2b025, 0x8c020218,
47930x30420100, 0x10400002, 0x3c020200, 0x2c2b025,
47940x8c020218, 0x30420080, 0x10400002, 0x3c020400,
47950x2c2b025, 0x8c020218, 0x30422000, 0x10400002,
47960x3c020010, 0x2c2b025, 0x8c020218, 0x30424000,
47970x10400002, 0x3c020020, 0x2c2b025, 0x8c020218,
47980x30421000, 0x10400002, 0x3c020040, 0x2c2b025,
47990x8ee20498, 0x8ee3049c, 0xaf420160, 0xaf430164,
48000x8ee204a0, 0x8ee304a4, 0xaf420168, 0xaf43016c,
48010x8ee204a8, 0x8ee304ac, 0xaf420170, 0xaf430174,
48020x8ee20428, 0x8ee3042c, 0xaf420178, 0xaf43017c,
48030x8ee20448, 0x8ee3044c, 0xaf420180, 0xaf430184,
48040x8ee20458, 0x8ee3045c, 0xaf420188, 0xaf43018c,
48050x8ee20468, 0x8ee3046c, 0xaf420190, 0xaf430194,
48060x8ee20478, 0x8ee3047c, 0xaf420198, 0xaf43019c,
48070x8ee20488, 0x8ee3048c, 0xaf4201a0, 0xaf4301a4,
48080x8ee204b0, 0x8ee304b4, 0x24040080, 0xaf4201a8,
48090xaf4301ac, 0xc002ba8, 0x24050080, 0x8c02025c,
48100x27440224, 0xaf4201f0, 0x8c020260, 0x24050200,
48110x24060008, 0xc002bbf, 0xaf4201f8, 0x3c043b9a,
48120x3484ca00, 0x3821, 0x24020006, 0x24030002,
48130xaf4201f4, 0x240203e8, 0xaf430204, 0xaf430200,
48140xaf4401fc, 0xaf420294, 0x24020001, 0xaf430290,
48150xaf42029c, 0x3c030001, 0x671821, 0x90636cd8,
48160x3471021, 0x24e70001, 0xa043022c, 0x2ce2000f,
48170x1440fff8, 0x3471821, 0x24e70001, 0x3c080001,
48180x350840f8, 0x8f820040, 0x3c040001, 0x24845d2c,
48190x24051400, 0x21702, 0x24420030, 0xa062022c,
48200x3471021, 0xa040022c, 0x8c070218, 0x2c03021,
48210x240205c8, 0xafa20010, 0xc002b3b, 0xafa80014,
48220x3c040001, 0x24845d38, 0x3c050000, 0x24a55c80,
48230x24060010, 0x27b10030, 0x2203821, 0x27b30034,
48240xc0017a3, 0xafb30010, 0x3c030001, 0x8c636cc8,
48250x1060000a, 0x408021, 0x8fa30030, 0x2405ff00,
48260x8fa20034, 0x246400ff, 0x852024, 0x831823,
48270x431023, 0xafa20034, 0xafa40030, 0x3c040001,
48280x24845d44, 0x3c050000, 0x24a54100, 0x24060108,
48290x2203821, 0xc0017a3, 0xafb30010, 0x409021,
48300x32c20003, 0x3c010001, 0xac326e80, 0x10400045,
48310x2203821, 0x8f820050, 0x3c030010, 0x431024,
48320x10400016, 0x0, 0x8c020218, 0x30420040,
48330x1040000f, 0x24020001, 0x8f820050, 0x8c030218,
48340x240e0001, 0x3c040001, 0x24845d50, 0xa3ae003f,
48350xafa20010, 0xafa30014, 0x8f870040, 0x24051500,
48360xc002b3b, 0x2c03021, 0x10000004, 0x0,
48370x3c010001, 0x370821, 0xa02240f4, 0x3c040001,
48380x24845d5c, 0x3c050001, 0x24a55b40, 0x3c060001,
48390x24c65bac, 0xc53023, 0x8f420010, 0x27b30030,
48400x2603821, 0x27b10034, 0x34420a00, 0xaf420010,
48410xc0017a3, 0xafb10010, 0x3c040001, 0x24845d70,
48420x3c050001, 0x24a5b714, 0x3c060001, 0x24c6ba90,
48430xc53023, 0x2603821, 0xaf420108, 0xc0017a3,
48440xafb10010, 0x3c040001, 0x24845d8c, 0x3c050001,
48450x24a5be58, 0x3c060001, 0x24c6c900, 0xc53023,
48460x2603821, 0x3c010001, 0xac226ef4, 0xc0017a3,
48470xafb10010, 0x3c040001, 0x24845da4, 0x10000024,
48480x24051600, 0x3c040001, 0x24845dac, 0x3c050001,
48490x24a5a10c, 0x3c060001, 0x24c6a238, 0xc53023,
48500xc0017a3, 0xafb30010, 0x3c040001, 0x24845dbc,
48510x3c050001, 0x24a5b2b0, 0x3c060001, 0x24c6b70c,
48520xc53023, 0x2203821, 0xaf420108, 0xc0017a3,
48530xafb30010, 0x3c040001, 0x24845dd0, 0x3c050001,
48540x24a5ba98, 0x3c060001, 0x24c6be50, 0xc53023,
48550x2203821, 0x3c010001, 0xac226ef4, 0xc0017a3,
48560xafb30010, 0x3c040001, 0x24845de4, 0x24051650,
48570x2c03021, 0x3821, 0x3c010001, 0xac226ef8,
48580xafa00010, 0xc002b3b, 0xafa00014, 0x32c20020,
48590x10400021, 0x27a70030, 0x3c040001, 0x24845df0,
48600x3c050001, 0x24a5b13c, 0x3c060001, 0x24c6b2a8,
48610xc53023, 0x24022000, 0xaf42001c, 0x27a20034,
48620xc0017a3, 0xafa20010, 0x21900, 0x31982,
48630x3c040800, 0x641825, 0xae430028, 0x24030010,
48640xaf43003c, 0x96e30450, 0xaf430040, 0x8f430040,
48650x3c040001, 0x24845e04, 0xafa00014, 0xafa30010,
48660x8f47001c, 0x24051660, 0x3c010001, 0xac226ef0,
48670x10000025, 0x32c60020, 0x8ee20448, 0x8ee3044c,
48680xaf43001c, 0x8f42001c, 0x2442e000, 0x2c422001,
48690x1440000a, 0x240e0001, 0x3c040001, 0x24845e10,
48700xa3ae003f, 0xafa00010, 0xafa00014, 0x8f46001c,
48710x24051700, 0xc002b3b, 0x3821, 0x3c020000,
48720x24425cbc, 0x21100, 0x21182, 0x3c030800,
48730x431025, 0xae420028, 0x24020008, 0xaf42003c,
48740x96e20450, 0xaf420040, 0x8f420040, 0x3c040001,
48750x24845e1c, 0xafa00014, 0xafa20010, 0x8f47001c,
48760x24051800, 0x32c60020, 0xc002b3b, 0x0,
48770x3c050fff, 0x3c030001, 0x8c636ef4, 0x34a5ffff,
48780x2403021, 0x3c020001, 0x8c426ef8, 0x3c040800,
48790x651824, 0x31882, 0x641825, 0x451024,
48800x21082, 0x441025, 0xacc20080, 0x32c20180,
48810x10400056, 0xacc30020, 0x8f82005c, 0x3c030080,
48820x431024, 0x1040000d, 0x0, 0x8f820050,
48830xafa20010, 0x8f82005c, 0x240e0001, 0x3c040001,
48840x24845e28, 0xa3ae003f, 0xafa20014, 0x8f870040,
48850x24051900, 0xc002b3b, 0x2c03021, 0x8f820050,
48860x3c030010, 0x431024, 0x10400016, 0x0,
48870x8c020218, 0x30420040, 0x1040000f, 0x24020001,
48880x8f820050, 0x8c030218, 0x240e0001, 0x3c040001,
48890x24845d50, 0xa3ae003f, 0xafa20010, 0xafa30014,
48900x8f870040, 0x24052000, 0xc002b3b, 0x2c03021,
48910x10000004, 0x0, 0x3c010001, 0x370821,
48920xa02240f4, 0x3c040001, 0x24845e34, 0x3c050001,
48930x24a55ac0, 0x3c060001, 0x24c65b38, 0xc53023,
48940x8f420008, 0x27b30030, 0x2603821, 0x27b10034,
48950x34420e00, 0xaf420008, 0xc0017a3, 0xafb10010,
48960x3c040001, 0x24845e4c, 0x3c050001, 0x24a5d8b4,
48970x3c060001, 0x24c6e3c8, 0xc53023, 0x2603821,
48980xaf42010c, 0xc0017a3, 0xafb10010, 0x3c040001,
48990x24845e64, 0x3c050001, 0x24a5e9ac, 0x3c060001,
49000x24c6f0f0, 0xc53023, 0x2603821, 0x3c010001,
49010xac226f04, 0xc0017a3, 0xafb10010, 0x3c040001,
49020x24845e7c, 0x10000027, 0x24052100, 0x3c040001,
49030x24845e84, 0x3c050001, 0x24a59fc8, 0x3c060001,
49040x24c6a104, 0xc53023, 0x27b10030, 0x2203821,
49050x27b30034, 0xc0017a3, 0xafb30010, 0x3c040001,
49060x24845e94, 0x3c050001, 0x24a5cad4, 0x3c060001,
49070x24c6d8ac, 0xc53023, 0x2203821, 0xaf42010c,
49080xc0017a3, 0xafb30010, 0x3c040001, 0x24845ea4,
49090x3c050001, 0x24a5e84c, 0x3c060001, 0x24c6e9a4,
49100xc53023, 0x2203821, 0x3c010001, 0xac226f04,
49110xc0017a3, 0xafb30010, 0x3c040001, 0x24845eb8,
49120x24052150, 0x2c03021, 0x3821, 0x3c010001,
49130xac226f10, 0xafa00010, 0xc002b3b, 0xafa00014,
49140x3c110fff, 0x3c030001, 0x8c636f04, 0x3631ffff,
49150x2409821, 0x3c020001, 0x8c426f10, 0x3c0e0800,
49160x711824, 0x31882, 0x6e1825, 0x511024,
49170x21082, 0x4e1025, 0xae630038, 0xae620078,
49180x8c020218, 0x30420040, 0x14400004, 0x24020001,
49190x3c010001, 0x370821, 0xa02240f4, 0x3c040001,
49200x24845ec4, 0x3c050001, 0x24a5e3d0, 0x3c060001,
49210x24c6e52c, 0xc53023, 0x27be0030, 0x3c03821,
49220x27b50034, 0xc0017a3, 0xafb50010, 0x3c010001,
49230xac226efc, 0x511024, 0x21082, 0x3c0e0800,
49240x4e1025, 0xae620050, 0x32c22000, 0x10400006,
49250x3c03821, 0x3c020000, 0x24425cbc, 0x2221024,
49260x1000000f, 0x21082, 0x3c040001, 0x24845ed8,
49270x3c050001, 0x24a5e534, 0x3c060001, 0x24c6e6e4,
49280xc53023, 0xc0017a3, 0xafb50010, 0x3c010001,
49290xac226f14, 0x511024, 0x21082, 0x3c0e0800,
49300x4e1025, 0xae620048, 0x32c24000, 0x10400005,
49310x27a70030, 0x3c020000, 0x24425cbc, 0x1000000e,
49320x21100, 0x3c040001, 0x24845ef0, 0x3c050001,
49330x24a5e6ec, 0x3c060001, 0x24c6e844, 0xc53023,
49340x27a20034, 0xc0017a3, 0xafa20010, 0x3c010001,
49350xac226f08, 0x21100, 0x21182, 0x3c030800,
49360x431025, 0xae420060, 0x3c040001, 0x24845f08,
49370x3c050001, 0x24a58230, 0x3c060001, 0x24c68650,
49380xc53023, 0x27b10030, 0x2203821, 0x27b30034,
49390xc0017a3, 0xafb30010, 0x3c0e0fff, 0x35ceffff,
49400x3c040001, 0x24845f14, 0x3c050000, 0x24a56468,
49410x3c060000, 0x24c66588, 0xc53023, 0x2203821,
49420x240f021, 0x3c010001, 0xac226edc, 0x4e1024,
49430x21082, 0x3c150800, 0x551025, 0xafae0044,
49440xafc200b8, 0xc0017a3, 0xafb30010, 0x3c040001,
49450x24845f20, 0x3c050000, 0x24a56590, 0x3c060000,
49460x24c66808, 0x8fae0044, 0xc53023, 0x2203821,
49470x3c010001, 0xac226ed0, 0x4e1024, 0x21082,
49480x551025, 0xafc200e8, 0xc0017a3, 0xafb30010,
49490x3c040001, 0x24845f38, 0x3c050000, 0x24a56810,
49500x3c060000, 0x24c66940, 0x8fae0044, 0xc53023,
49510x2203821, 0x3c010001, 0xac226ec8, 0x4e1024,
49520x21082, 0x551025, 0xafc200c0, 0xc0017a3,
49530xafb30010, 0x3c040001, 0x24845f50, 0x3c050001,
49540x24a5fad0, 0x3c060001, 0x24c6fba8, 0x8fae0044,
49550xc53023, 0x2203821, 0x3c010001, 0xac226ed4,
49560x4e1024, 0x21082, 0x551025, 0xafc200c8,
49570xc0017a3, 0xafb30010, 0x3c040001, 0x24845f5c,
49580x3c050001, 0x24a5c93c, 0x3c060001, 0x24c6ca20,
49590xc53023, 0x2203821, 0xaf420110, 0xc0017a3,
49600xafb30010, 0x3c040001, 0x24845f6c, 0x3c050001,
49610x24a5c910, 0x3c060001, 0x24c6c934, 0xc53023,
49620x2203821, 0xaf420124, 0xc0017a3, 0xafb30010,
49630x3c040001, 0x24845f7c, 0x3c050001, 0x24a55a80,
49640x3c060001, 0x24c65aac, 0xc53023, 0x2203821,
49650xaf420120, 0xaf420114, 0xc0017a3, 0xafb30010,
49660x3c040001, 0x24845f88, 0x3c050001, 0x24a5f298,
49670x3c060001, 0x24c6f6b4, 0xc53023, 0x2203821,
49680xaf420118, 0xc0017a3, 0xafb30010, 0x8fae0044,
49690x3c010001, 0xac226f18, 0x4e1024, 0x21082,
49700x551025, 0xc003fc3, 0xafc200d0, 0xc003c40,
49710x0, 0xc0027a8, 0x0, 0xac000228,
49720xac00022c, 0x96e20450, 0x2442ffff, 0xaf420038,
49730x96e20460, 0xaf420080, 0x32c24000, 0x14400003,
49740x0, 0x96e20480, 0xaf420084, 0x96e70490,
49750x50e00001, 0x24070800, 0x24e2ffff, 0xaf420088,
49760xaf42007c, 0x24020800, 0x10e2000f, 0x32c24000,
49770x10400003, 0x24020400, 0x10e2000b, 0x0,
49780x240e0001, 0x3c040001, 0x24845f98, 0xa3ae003f,
49790x96e60490, 0x24052170, 0x2c03821, 0xafa00010,
49800xc002b3b, 0xafa00014, 0x8f430138, 0x8f440138,
49810x24020001, 0xa34205c2, 0xaf430094, 0xaf440098,
49820xafa00010, 0xafa00014, 0x8f460080, 0x8f470084,
49830x3c040001, 0x24845fa4, 0xc002b3b, 0x24052200,
49840xc0024a4, 0x3c110800, 0x3c1433d8, 0x3694cb58,
49850x3c020800, 0x34420080, 0x3c040001, 0x24845fb0,
49860x3c050000, 0x24a55d00, 0x3c060000, 0x24c65d1c,
49870xc53023, 0x27a70030, 0xaf820060, 0x2402ffff,
49880xaf820064, 0x27a20034, 0xc0017a3, 0xafa20010,
49890x3c010001, 0xac226eb8, 0x21100, 0x21182,
49900x511025, 0xc0018fc, 0xae420000, 0x8f820240,
49910x3c030001, 0x431025, 0xaf820240, 0x3c020000,
49920x24424034, 0xaf820244, 0xaf800240, 0x8f820060,
49930x511024, 0x14400005, 0x3c030800, 0x8f820060,
49940x431024, 0x1040fffd, 0x0, 0xc003c4d,
49950x8821, 0x3c020100, 0xafa20020, 0x8f530018,
49960x240200ff, 0x56620001, 0x26710001, 0x8c020228,
49970x1622000e, 0x1330c0, 0x8f42033c, 0x24420001,
49980xaf42033c, 0x8f42033c, 0x8c020228, 0x3c040001,
49990x24845c24, 0x3c050009, 0xafa00014, 0xafa20010,
50000x8fa60020, 0x1000003f, 0x34a50100, 0xd71021,
50010x8fa30020, 0x8fa40024, 0xac4304c0, 0xac4404c4,
50020xc01821, 0x8f440178, 0x8f45017c, 0x1021,
50030x24070004, 0xafa70010, 0xafb10014, 0x8f48000c,
50040x24c604c0, 0x2e63021, 0xafa80018, 0x8f48010c,
50050x24070008, 0xa32821, 0xa3482b, 0x822021,
50060x100f809, 0x892021, 0x1440000b, 0x24070008,
50070x8f820120, 0xafa20010, 0x8f820124, 0x3c040001,
50080x24845c2c, 0x3c050009, 0xafa20014, 0x8fa60020,
50090x1000001c, 0x34a50200, 0x8f440160, 0x8f450164,
50100x8f43000c, 0xaf510018, 0x8f860120, 0x24020010,
50110xafa20010, 0xafb10014, 0xafa30018, 0x8f42010c,
50120x40f809, 0x24c6001c, 0x14400010, 0x0,
50130x8f420340, 0x24420001, 0xaf420340, 0x8f420340,
50140x8f820120, 0xafa20010, 0x8f820124, 0x3c040001,
50150x24845c34, 0x3c050009, 0xafa20014, 0x8fa60020,
50160x34a50300, 0xc002b3b, 0x2603821, 0x8f4202e4,
50170x24420001, 0xaf4202e4, 0x8f4202e4, 0x93a2003f,
50180x10400069, 0x3c020700, 0x34423000, 0xafa20028,
50190x8f530018, 0x240200ff, 0x12620002, 0x8821,
50200x26710001, 0x8c020228, 0x1622000e, 0x1330c0,
50210x8f42033c, 0x24420001, 0xaf42033c, 0x8f42033c,
50220x8c020228, 0x3c040001, 0x24845c24, 0x3c050009,
50230xafa00014, 0xafa20010, 0x8fa60028, 0x1000003f,
50240x34a50100, 0xd71021, 0x8fa30028, 0x8fa4002c,
50250xac4304c0, 0xac4404c4, 0xc01821, 0x8f440178,
50260x8f45017c, 0x1021, 0x24070004, 0xafa70010,
50270xafb10014, 0x8f48000c, 0x24c604c0, 0x2e63021,
50280xafa80018, 0x8f48010c, 0x24070008, 0xa32821,
50290xa3482b, 0x822021, 0x100f809, 0x892021,
50300x1440000b, 0x24070008, 0x8f820120, 0xafa20010,
50310x8f820124, 0x3c040001, 0x24845c2c, 0x3c050009,
50320xafa20014, 0x8fa60028, 0x1000001c, 0x34a50200,
50330x8f440160, 0x8f450164, 0x8f43000c, 0xaf510018,
50340x8f860120, 0x24020010, 0xafa20010, 0xafb10014,
50350xafa30018, 0x8f42010c, 0x40f809, 0x24c6001c,
50360x14400010, 0x0, 0x8f420340, 0x24420001,
50370xaf420340, 0x8f420340, 0x8f820120, 0xafa20010,
50380x8f820124, 0x3c040001, 0x24845c34, 0x3c050009,
50390xafa20014, 0x8fa60028, 0x34a50300, 0xc002b3b,
50400x2603821, 0x8f4202f0, 0x24420001, 0xaf4202f0,
50410x8f4202f0, 0x3c040001, 0x24845fc0, 0xafa00010,
50420xafa00014, 0x8fa60028, 0x24052300, 0xc002b3b,
50430x3821, 0x10000004, 0x0, 0x8c020264,
50440x10400005, 0x0, 0x8f8200a0, 0x30420004,
50450x1440fffa, 0x0, 0x8f820044, 0x34420004,
50460xaf820044, 0x8f420308, 0x24420001, 0xaf420308,
50470x8f420308, 0x8f8200d8, 0x8f8300d4, 0x431023,
50480x2442ff80, 0xaf420090, 0x8f420090, 0x2842ff81,
50490x10400006, 0x24020001, 0x8f420090, 0x8f430144,
50500x431021, 0xaf420090, 0x24020001, 0xaf42008c,
50510x32c20008, 0x10400006, 0x0, 0x8f820214,
50520x3c038100, 0x3042ffff, 0x431025, 0xaf820214,
50530x3c030001, 0x8c636d94, 0x30620002, 0x10400009,
50540x30620001, 0x3c040001, 0x24845fcc, 0x3c050000,
50550x24a56d50, 0x3c060000, 0x24c671c8, 0x10000012,
50560xc53023, 0x10400009, 0x0, 0x3c040001,
50570x24845fdc, 0x3c050000, 0x24a571d0, 0x3c060000,
50580x24c67678, 0x10000008, 0xc53023, 0x3c040001,
50590x24845fec, 0x3c050000, 0x24a56948, 0x3c060000,
50600x24c66d48, 0xc53023, 0x27a70030, 0x27a20034,
50610xc0017a3, 0xafa20010, 0x3c010001, 0xac226ecc,
50620x3c020001, 0x8c426ecc, 0x3c030800, 0x21100,
50630x21182, 0x431025, 0xae420040, 0x8f8200a0,
50640xafa20010, 0x8f8200b0, 0xafa20014, 0x8f86005c,
50650x8f87011c, 0x3c040001, 0x24845ffc, 0x3c010001,
50660xac366ea4, 0x3c010001, 0xac206e94, 0x3c010001,
50670xac3c6e8c, 0x3c010001, 0xac3b6ebc, 0x3c010001,
50680xac376ec0, 0x3c010001, 0xac3a6ea0, 0xc002b3b,
50690x24052400, 0x8f820200, 0xafa20010, 0x8f820220,
50700xafa20014, 0x8f860044, 0x8f870050, 0x3c040001,
50710x24846008, 0xc002b3b, 0x24052500, 0x8f830060,
50720x74100b, 0x242000a, 0x200f821, 0x0,
50730xd, 0x8fbf0060, 0x8fbe005c, 0x8fb50058,
50740x8fb30054, 0x8fb20050, 0x8fb1004c, 0x8fb00048,
50750x3e00008, 0x27bd0068, 0x27bdffe0, 0x3c040001,
50760x24846014, 0x24052600, 0x3021, 0x3821,
50770xafbf0018, 0xafa00010, 0xc002b3b, 0xafa00014,
50780x8fbf0018, 0x3e00008, 0x27bd0020, 0x3e00008,
50790x0, 0x3e00008, 0x0, 0x0,
50800x0, 0x0, 0x0, 0x0,
50810x3e00008, 0x0, 0x3e00008, 0x0,
50820x27bdfde0, 0x27a50018, 0x3c04dead, 0x3484beef,
50830xafbf0218, 0x8f820150, 0x3c03001f, 0x3463ffff,
50840xafa40018, 0xa22823, 0xa32824, 0x8ca20000,
50850x1044000a, 0x0, 0xafa50010, 0x8ca20000,
50860xafa20014, 0x8f860150, 0x8f870250, 0x3c040001,
50870x2484601c, 0xc002b3b, 0x24052700, 0x8fbf0218,
50880x3e00008, 0x27bd0220, 0x27bdffe0, 0x3c06abba,
50890x34c6babe, 0xafb00018, 0x3c100004, 0x3c07007f,
50900x34e7ffff, 0xafbf001c, 0x102840, 0x8e040000,
50910x8ca30000, 0xaca00000, 0xae060000, 0x8ca20000,
50920xaca30000, 0x10460005, 0xae040000, 0xa08021,
50930xf0102b, 0x1040fff5, 0x102840, 0x3c040001,
50940x24846028, 0x24052800, 0x2003021, 0x3821,
50950xafa00010, 0xc002b3b, 0xafa00014, 0x2001021,
50960x8fbf001c, 0x8fb00018, 0x3e00008, 0x27bd0020,
50970x8c020224, 0x3047003f, 0x10e00010, 0x803021,
50980x2821, 0x24030020, 0xe31024, 0x10400002,
50990x63042, 0xa62821, 0x31842, 0x1460fffb,
51000xe31024, 0x2402f000, 0xa22824, 0x3402ffff,
51010x45102b, 0x14400003, 0x3c020001, 0x10000008,
51020x3c020001, 0x3442ffff, 0x851823, 0x43102b,
51030x14400003, 0xa01021, 0x3c02fffe, 0x821021,
51040x3e00008, 0x0, 0x27bdffd0, 0xafb50028,
51050x8fb50040, 0xafb20020, 0xa09021, 0xafb1001c,
51060x24c60003, 0xafbf002c, 0xafb30024, 0xafb00018,
51070x8ea20000, 0x2403fffc, 0xc38024, 0x50102b,
51080x1440001b, 0xe08821, 0x8e330000, 0xafb00010,
51090x8ea20000, 0xafa20014, 0x8e270000, 0x24053000,
51100xc002b3b, 0x2403021, 0x8e230000, 0x702021,
51110x64102b, 0x10400007, 0x2402821, 0x8ca20000,
51120xac620000, 0x24630004, 0x64102b, 0x1440fffb,
51130x24a50004, 0x8ea20000, 0x501023, 0xaea20000,
51140x8e220000, 0x501021, 0x1000000b, 0xae220000,
51150x2402002d, 0xa0820000, 0xafb00010, 0x8ea20000,
51160x2409821, 0xafa20014, 0x8e270000, 0x24053100,
51170xc002b3b, 0x2603021, 0x2601021, 0x8fbf002c,
51180x8fb50028, 0x8fb30024, 0x8fb20020, 0x8fb1001c,
51190x8fb00018, 0x3e00008, 0x27bd0030, 0x27bdffe8,
51200x3c1cc000, 0x3c05fffe, 0x3c030001, 0x8c636e84,
51210x3c040001, 0x8c846e90, 0x34a5bf08, 0x24021ffc,
51220x3c010001, 0xac226cd0, 0x3c0200c0, 0x3c010001,
51230xac226cd4, 0x3c020020, 0xafbf0010, 0x3c0100c0,
51240xac201ffc, 0x431023, 0x441023, 0x245bb000,
51250x365b821, 0x3c1d0001, 0x8fbd6ccc, 0x3a0f021,
51260x3c0400c0, 0x34840200, 0x3c1a00c0, 0x3c0300c0,
51270x346307c8, 0x24021dfc, 0x3c010001, 0xac226cd0,
51280x24021834, 0x3c010001, 0xac246cd4, 0x3c010001,
51290xac226cd0, 0x3c010001, 0xac236cd4, 0xc00180d,
51300x375a0200, 0x8fbf0010, 0x3e00008, 0x27bd0018,
51310x27bdffc8, 0x3c040001, 0x24846034, 0x24053200,
51320x3c020001, 0x8c426cd0, 0x3c030001, 0x8c636cd4,
51330x3021, 0x3603821, 0xafbf0030, 0xafb3002c,
51340xafb20028, 0xafb10024, 0xafb00020, 0xafa2001c,
51350xafa30018, 0xafb70010, 0xc002b3b, 0xafba0014,
51360xc001916, 0x0, 0x8f820240, 0x34420004,
51370xaf820240, 0x24020001, 0xaf420000, 0x3c020001,
51380x571021, 0x904240f4, 0x10400092, 0x2403fffc,
51390x3c100001, 0x2610ac73, 0x3c120001, 0x2652a84c,
51400x2121023, 0x438024, 0x8fa3001c, 0x3c040001,
51410x24846040, 0x70102b, 0x1440001a, 0x27b30018,
51420x8fb10018, 0x24053000, 0x2403021, 0xafb00010,
51430xafa30014, 0xc002b3b, 0x2203821, 0x8fa30018,
51440x702021, 0x64102b, 0x10400007, 0x2403021,
51450x8cc20000, 0xac620000, 0x24630004, 0x64102b,
51460x1440fffb, 0x24c60004, 0x8fa2001c, 0x501023,
51470xafa2001c, 0x8e620000, 0x501021, 0x1000000a,
51480xae620000, 0x2408821, 0x24053100, 0xafb00010,
51490xafa30014, 0x8fa70018, 0x2203021, 0x2402002d,
51500xc002b3b, 0xa0820000, 0x24070020, 0x8fa3001c,
51510x3c040001, 0x2484605c, 0x24120020, 0x3c010001,
51520xac316eb0, 0x2c620020, 0x1440001d, 0x27b10018,
51530x8fb00018, 0x24053000, 0x3c060001, 0x24c66f50,
51540xafa70010, 0xafa30014, 0xc002b3b, 0x2003821,
51550x8fa30018, 0x3c040001, 0x24846f50, 0x24650020,
51560x65102b, 0x10400007, 0x0, 0x8c820000,
51570xac620000, 0x24630004, 0x65102b, 0x1440fffb,
51580x24840004, 0x8fa2001c, 0x521023, 0xafa2001c,
51590x8e220000, 0x521021, 0x1000000b, 0xae220000,
51600x3c100001, 0x26106f50, 0x24053100, 0xafa70010,
51610xafa30014, 0x8fa70018, 0x2003021, 0x2402002d,
51620xc002b3b, 0xa0820000, 0x24070020, 0x3c040001,
51630x24846070, 0x8fa3001c, 0x24120020, 0x3c010001,
51640xac306ee4, 0x2c620020, 0x1440001d, 0x27b10018,
51650x8fb00018, 0x24053000, 0x3c060001, 0x24c66f70,
51660xafa70010, 0xafa30014, 0xc002b3b, 0x2003821,
51670x8fa30018, 0x3c040001, 0x24846f70, 0x24650020,
51680x65102b, 0x10400007, 0x0, 0x8c820000,
51690xac620000, 0x24630004, 0x65102b, 0x1440fffb,
51700x24840004, 0x8fa2001c, 0x521023, 0xafa2001c,
51710x8e220000, 0x521021, 0x1000000b, 0xae220000,
51720x3c100001, 0x26106f70, 0x24053100, 0xafa70010,
51730xafa30014, 0x8fa70018, 0x2003021, 0x2402002d,
51740xc002b3b, 0xa0820000, 0x3c010001, 0x10000031,
51750xac306ee0, 0x3c100001, 0x2610821f, 0x3c120001,
51760x2652809c, 0x2121023, 0x438024, 0x8fa3001c,
51770x3c040001, 0x24846084, 0x70102b, 0x1440001a,
51780x27b30018, 0x8fb10018, 0x24053000, 0x2403021,
51790xafb00010, 0xafa30014, 0xc002b3b, 0x2203821,
51800x8fa30018, 0x702021, 0x64102b, 0x10400007,
51810x2403021, 0x8cc20000, 0xac620000, 0x24630004,
51820x64102b, 0x1440fffb, 0x24c60004, 0x8fa2001c,
51830x501023, 0xafa2001c, 0x8e620000, 0x501021,
51840x1000000a, 0xae620000, 0x2408821, 0x24053100,
51850xafb00010, 0xafa30014, 0x8fa70018, 0x2203021,
51860x2402002d, 0xc002b3b, 0xa0820000, 0x3c010001,
51870xac316eb0, 0x3c030001, 0x8c636eb0, 0x24020400,
51880x60f809, 0xaf820070, 0x8fbf0030, 0x8fb3002c,
51890x8fb20028, 0x8fb10024, 0x8fb00020, 0x3e00008,
51900x27bd0038, 0x0, 0x0, 0x8f820040,
51910x3c03f000, 0x431024, 0x3c036000, 0x14430006,
51920x0, 0x8f820050, 0x2403ff80, 0x431024,
51930x34420055, 0xaf820050, 0x8f820054, 0x244203e8,
51940xaf820058, 0x240201f4, 0xaf4200e0, 0x24020004,
51950xaf4200e8, 0x24020002, 0xaf4001b0, 0xaf4000e4,
51960xaf4200dc, 0xaf4000d8, 0xaf4000d4, 0x3e00008,
51970xaf4000d0, 0x8f820054, 0x24420005, 0x3e00008,
51980xaf820078, 0x27bdffe8, 0xafbf0010, 0x8f820054,
51990x244203e8, 0xaf820058, 0x3c020800, 0x2c21024,
52000x10400004, 0x3c02f7ff, 0x3442ffff, 0x2c2b024,
52010x36940040, 0x3c020001, 0x8c426da8, 0x10400017,
52020x3c020200, 0x3c030001, 0x8c636f1c, 0x10600016,
52030x282a025, 0x3c020001, 0x8c426e44, 0x14400012,
52040x3c020200, 0x3c020001, 0x8c426d94, 0x30420003,
52050x1440000d, 0x3c020200, 0x8f830224, 0x3c020002,
52060x8c428fec, 0x10620008, 0x3c020200, 0xc003daf,
52070x0, 0x10000004, 0x3c020200, 0xc004196,
52080x0, 0x3c020200, 0x2c21024, 0x10400003,
52090x0, 0xc001f4b, 0x0, 0x8f4200d8,
52100x8f4300dc, 0x24420001, 0xaf4200d8, 0x43102b,
52110x14400003, 0x0, 0xaf4000d8, 0x36940080,
52120x8c030238, 0x1060000c, 0x0, 0x8f4201b0,
52130x244203e8, 0xaf4201b0, 0x43102b, 0x14400006,
52140x0, 0x934205c5, 0x14400003, 0x0,
52150xc001da0, 0x0, 0x8fbf0010, 0x3e00008,
52160x27bd0018, 0x3e00008, 0x0, 0x27bdffd8,
52170xafbf0020, 0x8f43002c, 0x8f420038, 0x10620059,
52180x0, 0x3c020001, 0x571021, 0x904240f0,
52190x10400026, 0x24070008, 0x8f440170, 0x8f450174,
52200x8f48000c, 0x8f860120, 0x24020020, 0xafa20010,
52210xafa30014, 0xafa80018, 0x8f42010c, 0x40f809,
52220x24c6001c, 0x14400011, 0x24020001, 0x3c010001,
52230x370821, 0xa02240f0, 0x8f820124, 0xafa20010,
52240x8f820128, 0x3c040001, 0x24846128, 0xafa20014,
52250x8f46002c, 0x8f870120, 0x3c050009, 0xc002b3b,
52260x34a50900, 0x1000005c, 0x0, 0x8f420300,
52270x24420001, 0xaf420300, 0x8f420300, 0x8f42002c,
52280xa34005c1, 0x10000027, 0xaf420038, 0x8f440170,
52290x8f450174, 0x8f43002c, 0x8f48000c, 0x8f860120,
52300x24020080, 0xafa20010, 0xafa30014, 0xafa80018,
52310x8f42010c, 0x40f809, 0x24c6001c, 0x14400011,
52320x24020001, 0x3c010001, 0x370821, 0xa02240f1,
52330x8f820124, 0xafa20010, 0x8f820128, 0x3c040001,
52340x24846134, 0xafa20014, 0x8f46002c, 0x8f870120,
52350x3c050009, 0xc002b3b, 0x34a51100, 0x10000036,
52360x0, 0x8f420300, 0x8f43002c, 0x24420001,
52370xaf420300, 0x8f420300, 0x24020001, 0xa34205c1,
52380xaf430038, 0x3c010001, 0x370821, 0xa02040f1,
52390x3c010001, 0x370821, 0xa02040f0, 0x10000026,
52400xaf400034, 0x934205c1, 0x1040001d, 0x0,
52410xa34005c1, 0x8f820040, 0x30420001, 0x14400008,
52420x2021, 0x8c030104, 0x24020001, 0x50620005,
52430x24040001, 0x8c020264, 0x10400003, 0x801021,
52440x24040001, 0x801021, 0x10400006, 0x0,
52450x8f42030c, 0x24420001, 0xaf42030c, 0x10000008,
52460x8f42030c, 0x8f820044, 0x34420004, 0xaf820044,
52470x8f420308, 0x24420001, 0xaf420308, 0x8f420308,
52480x3c010001, 0x370821, 0xa02040f0, 0x3c010001,
52490x370821, 0xa02040f1, 0x8f420000, 0x10400007,
52500x0, 0xaf80004c, 0x8f82004c, 0x1040fffd,
52510x0, 0x10000005, 0x0, 0xaf800048,
52520x8f820048, 0x1040fffd, 0x0, 0x8f820060,
52530x3c03ff7f, 0x3463ffff, 0x431024, 0xaf820060,
52540x8f420000, 0x10400003, 0x0, 0x10000002,
52550xaf80004c, 0xaf800048, 0x8fbf0020, 0x3e00008,
52560x27bd0028, 0x3e00008, 0x0, 0x27bdffd8,
52570xafbf0020, 0x8f430044, 0x8f42007c, 0x10620029,
52580x24070008, 0x8f440168, 0x8f45016c, 0x8f48000c,
52590x8f860120, 0x24020040, 0xafa20010, 0xafa30014,
52600xafa80018, 0x8f42010c, 0x40f809, 0x24c6001c,
52610x14400011, 0x24020001, 0x3c010001, 0x370821,
52620xa02240f2, 0x8f820124, 0xafa20010, 0x8f820128,
52630x3c040001, 0x2484613c, 0xafa20014, 0x8f460044,
52640x8f870120, 0x3c050009, 0xc002b3b, 0x34a51300,
52650x1000000f, 0x0, 0x8f420304, 0x24420001,
52660xaf420304, 0x8f420304, 0x8f420044, 0xaf42007c,
52670x3c010001, 0x370821, 0xa02040f2, 0x10000004,
52680xaf400078, 0x3c010001, 0x370821, 0xa02040f2,
52690x8f420000, 0x10400007, 0x0, 0xaf80004c,
52700x8f82004c, 0x1040fffd, 0x0, 0x10000005,
52710x0, 0xaf800048, 0x8f820048, 0x1040fffd,
52720x0, 0x8f820060, 0x3c03feff, 0x3463ffff,
52730x431024, 0xaf820060, 0x8f420000, 0x10400003,
52740x0, 0x10000002, 0xaf80004c, 0xaf800048,
52750x8fbf0020, 0x3e00008, 0x27bd0028, 0x3e00008,
52760x0, 0x3c020001, 0x8c426da8, 0x27bdffa8,
52770xafbf0050, 0xafbe004c, 0xafb50048, 0xafb30044,
52780xafb20040, 0xafb1003c, 0xafb00038, 0x104000d5,
52790x8f900044, 0x8f4200d0, 0x24430001, 0x2842000b,
52800x144000e4, 0xaf4300d0, 0x8f420004, 0x30420002,
52810x1440009c, 0xaf4000d0, 0x8f420004, 0x3c030001,
52820x8c636d98, 0x34420002, 0xaf420004, 0x24020001,
52830x14620003, 0x3c020600, 0x10000002, 0x34423000,
52840x34421000, 0xafa20020, 0x8f4a0018, 0xafaa0034,
52850x27aa0020, 0xafaa002c, 0x8faa0034, 0x240200ff,
52860x11420002, 0x1821, 0x25430001, 0x8c020228,
52870x609821, 0x1662000e, 0x3c050009, 0x8f42033c,
52880x24420001, 0xaf42033c, 0x8f42033c, 0x8c020228,
52890x8fa70034, 0x3c040001, 0x2484610c, 0xafa00014,
52900xafa20010, 0x8fa60020, 0x10000070, 0x34a50500,
52910x8faa0034, 0xa38c0, 0xf71021, 0x8fa30020,
52920x8fa40024, 0xac4304c0, 0xac4404c4, 0x8f830054,
52930x8f820054, 0x247103e8, 0x2221023, 0x2c4203e9,
52940x1040001b, 0xa821, 0xe09021, 0x265e04c0,
52950x8f440178, 0x8f45017c, 0x2401821, 0x240a0004,
52960xafaa0010, 0xafb30014, 0x8f48000c, 0x1021,
52970x2fe3021, 0xafa80018, 0x8f48010c, 0x24070008,
52980xa32821, 0xa3482b, 0x822021, 0x100f809,
52990x892021, 0x54400006, 0x24150001, 0x8f820054,
53000x2221023, 0x2c4203e9, 0x1440ffe9, 0x0,
53010x32a200ff, 0x54400018, 0xaf530018, 0x8f420378,
53020x24420001, 0xaf420378, 0x8f420378, 0x8f820120,
53030x8faa002c, 0x8fa70034, 0xafa20010, 0x8f820124,
53040x3c040001, 0x24846118, 0xafa20014, 0x8d460000,
53050x3c050009, 0x10000035, 0x34a50600, 0x8f420308,
53060x24150001, 0x24420001, 0xaf420308, 0x8f420308,
53070x1000001e, 0x32a200ff, 0x8f830054, 0x8f820054,
53080x247103e8, 0x2221023, 0x2c4203e9, 0x10400016,
53090xa821, 0x3c1e0020, 0x24120010, 0x8f42000c,
53100x8f440160, 0x8f450164, 0x8f860120, 0xafb20010,
53110xafb30014, 0x5e1025, 0xafa20018, 0x8f42010c,
53120x24070008, 0x40f809, 0x24c6001c, 0x1440ffe3,
53130x0, 0x8f820054, 0x2221023, 0x2c4203e9,
53140x1440ffee, 0x0, 0x32a200ff, 0x14400011,
53150x3c050009, 0x8f420378, 0x24420001, 0xaf420378,
53160x8f420378, 0x8f820120, 0x8faa002c, 0x8fa70034,
53170xafa20010, 0x8f820124, 0x3c040001, 0x24846120,
53180xafa20014, 0x8d460000, 0x34a50700, 0xc002b3b,
53190x0, 0x8f4202ec, 0x24420001, 0xaf4202ec,
53200x8f4202ec, 0x8f420004, 0x30420001, 0x50400029,
53210x36100040, 0x3c020400, 0x2c21024, 0x10400013,
53220x2404ffdf, 0x8f420250, 0x8f430254, 0x8f4401b4,
53230x14640006, 0x36100040, 0x8f420270, 0x8f430274,
53240x8f4401b8, 0x10640007, 0x2402ffdf, 0x8f420250,
53250x8f430254, 0x8f440270, 0x8f450274, 0x10000012,
53260x3a100020, 0x1000002b, 0x2028024, 0x8f420250,
53270x8f430254, 0x8f4501b4, 0x14650006, 0x2048024,
53280x8f420270, 0x8f430274, 0x8f4401b8, 0x50640021,
53290x36100040, 0x8f420250, 0x8f430254, 0x8f440270,
53300x8f450274, 0x3a100040, 0xaf4301b4, 0x10000019,
53310xaf4501b8, 0x8f4200d4, 0x24430001, 0x10000011,
53320x28420033, 0x8f420004, 0x30420001, 0x10400009,
53330x3c020400, 0x2c21024, 0x10400004, 0x2402ffdf,
53340x2028024, 0x1000000b, 0x36100040, 0x10000009,
53350x36100060, 0x8f4200d4, 0x36100040, 0x24430001,
53360x284201f5, 0x14400003, 0xaf4300d4, 0xaf4000d4,
53370x3a100020, 0xaf900044, 0x2402ff7f, 0x282a024,
53380x8fbf0050, 0x8fbe004c, 0x8fb50048, 0x8fb30044,
53390x8fb20040, 0x8fb1003c, 0x8fb00038, 0x3e00008,
53400x27bd0058, 0x3e00008, 0x0, 0x3c020001,
53410x8c426da8, 0x27bdffb0, 0xafbf0048, 0xafbe0044,
53420xafb50040, 0xafb3003c, 0xafb20038, 0xafb10034,
53430x104000c7, 0xafb00030, 0x8f4200d0, 0x24430001,
53440x2842000b, 0x144000da, 0xaf4300d0, 0x8f420004,
53450x30420002, 0x14400097, 0xaf4000d0, 0x8f420004,
53460x3c030001, 0x8c636d98, 0x34420002, 0xaf420004,
53470x24020001, 0x14620003, 0x3c020600, 0x10000002,
53480x34423000, 0x34421000, 0xafa20020, 0x1821,
53490x8f5e0018, 0x27aa0020, 0x240200ff, 0x13c20002,
53500xafaa002c, 0x27c30001, 0x8c020228, 0x609021,
53510x1642000e, 0x1e38c0, 0x8f42033c, 0x24420001,
53520xaf42033c, 0x8f42033c, 0x8c020228, 0x3c040001,
53530x2484610c, 0x3c050009, 0xafa00014, 0xafa20010,
53540x8fa60020, 0x1000006d, 0x34a50500, 0xf71021,
53550x8fa30020, 0x8fa40024, 0xac4304c0, 0xac4404c4,
53560x8f830054, 0x8f820054, 0x247003e8, 0x2021023,
53570x2c4203e9, 0x1040001b, 0x9821, 0xe08821,
53580x263504c0, 0x8f440178, 0x8f45017c, 0x2201821,
53590x240a0004, 0xafaa0010, 0xafb20014, 0x8f48000c,
53600x1021, 0x2f53021, 0xafa80018, 0x8f48010c,
53610x24070008, 0xa32821, 0xa3482b, 0x822021,
53620x100f809, 0x892021, 0x54400006, 0x24130001,
53630x8f820054, 0x2021023, 0x2c4203e9, 0x1440ffe9,
53640x0, 0x326200ff, 0x54400017, 0xaf520018,
53650x8f420378, 0x24420001, 0xaf420378, 0x8f420378,
53660x8f820120, 0x8faa002c, 0xafa20010, 0x8f820124,
53670x3c040001, 0x24846118, 0x3c050009, 0xafa20014,
53680x8d460000, 0x10000035, 0x34a50600, 0x8f420308,
53690x24130001, 0x24420001, 0xaf420308, 0x8f420308,
53700x1000001e, 0x326200ff, 0x8f830054, 0x8f820054,
53710x247003e8, 0x2021023, 0x2c4203e9, 0x10400016,
53720x9821, 0x3c150020, 0x24110010, 0x8f42000c,
53730x8f440160, 0x8f450164, 0x8f860120, 0xafb10010,
53740xafb20014, 0x551025, 0xafa20018, 0x8f42010c,
53750x24070008, 0x40f809, 0x24c6001c, 0x1440ffe3,
53760x0, 0x8f820054, 0x2021023, 0x2c4203e9,
53770x1440ffee, 0x0, 0x326200ff, 0x14400011,
53780x0, 0x8f420378, 0x24420001, 0xaf420378,
53790x8f420378, 0x8f820120, 0x8faa002c, 0xafa20010,
53800x8f820124, 0x3c040001, 0x24846120, 0x3c050009,
53810xafa20014, 0x8d460000, 0x34a50700, 0xc002b3b,
53820x3c03821, 0x8f4202ec, 0x24420001, 0xaf4202ec,
53830x8f4202ec, 0x8f420004, 0x30420001, 0x10400018,
53840x24040001, 0x8f420250, 0x8f430254, 0x8f4501b4,
53850x3c010001, 0x14650006, 0xa0246cf1, 0x8f420270,
53860x8f430274, 0x8f4401b8, 0x10640021, 0x0,
53870x8f420250, 0x8f430254, 0x3c040001, 0x90846cf0,
53880x8f460270, 0x8f470274, 0x38840001, 0xaf4301b4,
53890xaf4701b8, 0x3c010001, 0x10000025, 0xa0246cf0,
53900x8f4200d4, 0x3c010001, 0xa0206cf0, 0x24430001,
53910x28420033, 0x1440001e, 0xaf4300d4, 0x3c020001,
53920x90426cf1, 0xaf4000d4, 0x10000017, 0x38420001,
53930x8f420004, 0x30420001, 0x10400008, 0x0,
53940xc00565a, 0x2021, 0x3c010001, 0xa0206cf1,
53950x3c010001, 0x1000000e, 0xa0206cf0, 0x8f4200d4,
53960x3c010001, 0xa0206cf0, 0x24430001, 0x284201f5,
53970x14400007, 0xaf4300d4, 0x3c020001, 0x90426cf1,
53980xaf4000d4, 0x421026, 0x3c010001, 0xa0226cf1,
53990x3c030001, 0x8c636d98, 0x24020002, 0x1462000c,
54000x3c030002, 0x3c030001, 0x90636cf1, 0x24020001,
54010x5462001f, 0x2021, 0x3c020001, 0x90426cf0,
54020x1443001b, 0x24040005, 0x10000019, 0x24040006,
54030x3c020002, 0x8c428ff4, 0x431024, 0x1040000b,
54040x24020001, 0x3c030001, 0x90636cf1, 0x54620010,
54050x2021, 0x3c020001, 0x90426cf0, 0x1443000c,
54060x24040003, 0x1000000a, 0x24040004, 0x3c030001,
54070x90636cf1, 0x14620006, 0x2021, 0x3c020001,
54080x90426cf0, 0x24040001, 0x50440001, 0x24040002,
54090xc00565a, 0x0, 0x2402ff7f, 0x282a024,
54100x8fbf0048, 0x8fbe0044, 0x8fb50040, 0x8fb3003c,
54110x8fb20038, 0x8fb10034, 0x8fb00030, 0x3e00008,
54120x27bd0050, 0x3e00008, 0x0, 0x3c020001,
54130x8c426da8, 0x27bdffb0, 0xafbf0048, 0xafbe0044,
54140xafb50040, 0xafb3003c, 0xafb20038, 0xafb10034,
54150x104000de, 0xafb00030, 0x8f4200d0, 0x3c040001,
54160x8c846d98, 0x24430001, 0x2842000b, 0xaf4400e8,
54170x144000fe, 0xaf4300d0, 0x8f420004, 0x30420002,
54180x14400095, 0xaf4000d0, 0x8f420004, 0x34420002,
54190xaf420004, 0x24020001, 0x14820003, 0x3c020600,
54200x10000002, 0x34423000, 0x34421000, 0xafa20020,
54210x1821, 0x8f5e0018, 0x27aa0020, 0x240200ff,
54220x13c20002, 0xafaa002c, 0x27c30001, 0x8c020228,
54230x609021, 0x1642000e, 0x1e38c0, 0x8f42033c,
54240x24420001, 0xaf42033c, 0x8f42033c, 0x8c020228,
54250x3c040001, 0x2484610c, 0x3c050009, 0xafa00014,
54260xafa20010, 0x8fa60020, 0x1000006d, 0x34a50500,
54270xf71021, 0x8fa30020, 0x8fa40024, 0xac4304c0,
54280xac4404c4, 0x8f830054, 0x8f820054, 0x247003e8,
54290x2021023, 0x2c4203e9, 0x1040001b, 0x9821,
54300xe08821, 0x263504c0, 0x8f440178, 0x8f45017c,
54310x2201821, 0x240a0004, 0xafaa0010, 0xafb20014,
54320x8f48000c, 0x1021, 0x2f53021, 0xafa80018,
54330x8f48010c, 0x24070008, 0xa32821, 0xa3482b,
54340x822021, 0x100f809, 0x892021, 0x54400006,
54350x24130001, 0x8f820054, 0x2021023, 0x2c4203e9,
54360x1440ffe9, 0x0, 0x326200ff, 0x54400017,
54370xaf520018, 0x8f420378, 0x24420001, 0xaf420378,
54380x8f420378, 0x8f820120, 0x8faa002c, 0xafa20010,
54390x8f820124, 0x3c040001, 0x24846118, 0x3c050009,
54400xafa20014, 0x8d460000, 0x10000035, 0x34a50600,
54410x8f420308, 0x24130001, 0x24420001, 0xaf420308,
54420x8f420308, 0x1000001e, 0x326200ff, 0x8f830054,
54430x8f820054, 0x247003e8, 0x2021023, 0x2c4203e9,
54440x10400016, 0x9821, 0x3c150020, 0x24110010,
54450x8f42000c, 0x8f440160, 0x8f450164, 0x8f860120,
54460xafb10010, 0xafb20014, 0x551025, 0xafa20018,
54470x8f42010c, 0x24070008, 0x40f809, 0x24c6001c,
54480x1440ffe3, 0x0, 0x8f820054, 0x2021023,
54490x2c4203e9, 0x1440ffee, 0x0, 0x326200ff,
54500x14400011, 0x0, 0x8f420378, 0x24420001,
54510xaf420378, 0x8f420378, 0x8f820120, 0x8faa002c,
54520xafa20010, 0x8f820124, 0x3c040001, 0x24846120,
54530x3c050009, 0xafa20014, 0x8d460000, 0x34a50700,
54540xc002b3b, 0x3c03821, 0x8f4202ec, 0x24420001,
54550xaf4202ec, 0x8f4202ec, 0x8f420004, 0x30420001,
54560x10400033, 0x3c020400, 0x2c21024, 0x10400017,
54570x0, 0x934205c0, 0x8f440250, 0x8f450254,
54580x8f4301b4, 0x34420020, 0x14a30006, 0xa34205c0,
54590x8f420270, 0x8f430274, 0x8f4401b8, 0x10640008,
54600x0, 0x8f420250, 0x8f430254, 0x934405c0,
54610x8f460270, 0x8f470274, 0x10000016, 0x38840040,
54620x934205c0, 0x10000048, 0x304200bf, 0x934205c0,
54630x8f440250, 0x8f450254, 0x8f4301b4, 0x304200bf,
54640x14a30006, 0xa34205c0, 0x8f420270, 0x8f430274,
54650x8f4401b8, 0x1064000b, 0x0, 0x8f420250,
54660x8f430254, 0x934405c0, 0x8f460270, 0x8f470274,
54670x38840020, 0xaf4301b4, 0xaf4701b8, 0x10000033,
54680xa34405c0, 0x934205c0, 0x1000002f, 0x34420020,
54690x934205c0, 0x8f4300d4, 0x34420020, 0xa34205c0,
54700x24620001, 0x10000023, 0x28630033, 0x8f4200e4,
54710x8f4300e0, 0x24420001, 0xaf4200e4, 0x43102a,
54720x14400006, 0x24030001, 0x8f4200e8, 0x14430002,
54730xaf4000e4, 0x24030004, 0xaf4300e8, 0x8f420004,
54740x30420001, 0x1040000d, 0x3c020400, 0x2c21024,
54750x10400007, 0x0, 0x934205c0, 0x34420040,
54760xa34205c0, 0x934205c0, 0x1000000f, 0x304200df,
54770x934205c0, 0x1000000c, 0x34420060, 0x934205c0,
54780x8f4300d4, 0x34420020, 0xa34205c0, 0x24620001,
54790x286300fb, 0x14600005, 0xaf4200d4, 0x934205c0,
54800xaf4000d4, 0x38420040, 0xa34205c0, 0x934205c0,
54810x8f4300e8, 0x3042007f, 0xa34205c0, 0x24020001,
54820x14620005, 0x0, 0x934405c0, 0x42102,
54830x10000003, 0x348400f0, 0x934405c0, 0x3484000f,
54840xc005640, 0x0, 0x2402ff7f, 0x282a024,
54850x8fbf0048, 0x8fbe0044, 0x8fb50040, 0x8fb3003c,
54860x8fb20038, 0x8fb10034, 0x8fb00030, 0x3e00008,
54870x27bd0050, 0x3e00008, 0x0, 0x27bdffb0,
54880x274401c0, 0x26e30028, 0x24650400, 0x65102b,
54890xafbf0048, 0xafbe0044, 0xafb50040, 0xafb3003c,
54900xafb20038, 0xafb10034, 0x10400007, 0xafb00030,
54910x8c820000, 0xac620000, 0x24630004, 0x65102b,
54920x1440fffb, 0x24840004, 0x8c020080, 0xaee20044,
54930x8c0200c0, 0xaee20040, 0x8c020084, 0xaee20030,
54940x8c020084, 0xaee2023c, 0x8c020088, 0xaee20240,
54950x8c02008c, 0xaee20244, 0x8c020090, 0xaee20248,
54960x8c020094, 0xaee2024c, 0x8c020098, 0xaee20250,
54970x8c02009c, 0xaee20254, 0x8c0200a0, 0xaee20258,
54980x8c0200a4, 0xaee2025c, 0x8c0200a8, 0xaee20260,
54990x8c0200ac, 0xaee20264, 0x8c0200b0, 0xaee20268,
55000x8c0200b4, 0xaee2026c, 0x8c0200b8, 0xaee20270,
55010x8c0200bc, 0x24040001, 0xaee20274, 0xaee00034,
55020x41080, 0x571021, 0x8ee30034, 0x8c42023c,
55030x24840001, 0x621821, 0x2c82000f, 0xaee30034,
55040x1440fff8, 0x41080, 0x8c0200cc, 0xaee20048,
55050x8c0200d0, 0xaee2004c, 0x8c0200e0, 0xaee201f8,
55060x8c0200e4, 0xaee201fc, 0x8c0200e8, 0xaee20200,
55070x8c0200ec, 0xaee20204, 0x8c0200f0, 0xaee20208,
55080x8ee400c0, 0x8ee500c4, 0x8c0200fc, 0x45102b,
55090x1040000b, 0x0, 0x8ee200c0, 0x8ee300c4,
55100x24040001, 0x24050000, 0x651821, 0x65302b,
55110x441021, 0x461021, 0xaee200c0, 0xaee300c4,
55120x8c0200fc, 0x8ee400c0, 0x8ee500c4, 0x2408ffff,
55130x24090000, 0x401821, 0x1021, 0x882024,
55140xa92824, 0x822025, 0xa32825, 0xaee400c0,
55150xaee500c4, 0x8ee400d0, 0x8ee500d4, 0x8c0200f4,
55160x45102b, 0x1040000b, 0x0, 0x8ee200d0,
55170x8ee300d4, 0x24040001, 0x24050000, 0x651821,
55180x65302b, 0x441021, 0x461021, 0xaee200d0,
55190xaee300d4, 0x8c0200f4, 0x8ee400d0, 0x8ee500d4,
55200x401821, 0x1021, 0x882024, 0xa92824,
55210x822025, 0xa32825, 0xaee400d0, 0xaee500d4,
55220x8ee400c8, 0x8ee500cc, 0x8c0200f8, 0x45102b,
55230x1040000b, 0x0, 0x8ee200c8, 0x8ee300cc,
55240x24040001, 0x24050000, 0x651821, 0x65302b,
55250x441021, 0x461021, 0xaee200c8, 0xaee300cc,
55260x8c0200f8, 0x8ee400c8, 0x8ee500cc, 0x401821,
55270x1021, 0x882024, 0xa92824, 0x822025,
55280xa32825, 0x24020008, 0xaee400c8, 0xaee500cc,
55290xafa20010, 0xafa00014, 0x8f42000c, 0x8c040208,
55300x8c05020c, 0xafa20018, 0x8f42010c, 0x26e60028,
55310x40f809, 0x24070400, 0x104000f0, 0x3c020400,
55320xafa20020, 0x934205c6, 0x10400089, 0x1821,
55330x8f5e0018, 0x27aa0020, 0x240200ff, 0x13c20002,
55340xafaa002c, 0x27c30001, 0x8c020228, 0x609021,
55350x1642000e, 0x1e38c0, 0x8f42033c, 0x24420001,
55360xaf42033c, 0x8f42033c, 0x8c020228, 0x3c040001,
55370x2484610c, 0x3c050009, 0xafa00014, 0xafa20010,
55380x8fa60020, 0x1000006b, 0x34a50500, 0xf71021,
55390x8fa30020, 0x8fa40024, 0xac4304c0, 0xac4404c4,
55400x8f830054, 0x8f820054, 0x247003e8, 0x2021023,
55410x2c4203e9, 0x1040001b, 0x9821, 0xe08821,
55420x263504c0, 0x8f440178, 0x8f45017c, 0x2201821,
55430x240a0004, 0xafaa0010, 0xafb20014, 0x8f48000c,
55440x1021, 0x2f53021, 0xafa80018, 0x8f48010c,
55450x24070008, 0xa32821, 0xa3482b, 0x822021,
55460x100f809, 0x892021, 0x54400006, 0x24130001,
55470x8f820054, 0x2021023, 0x2c4203e9, 0x1440ffe9,
55480x0, 0x326200ff, 0x54400017, 0xaf520018,
55490x8f420378, 0x24420001, 0xaf420378, 0x8f420378,
55500x8f820120, 0x8faa002c, 0xafa20010, 0x8f820124,
55510x3c040001, 0x24846118, 0x3c050009, 0xafa20014,
55520x8d460000, 0x10000033, 0x34a50600, 0x8f420308,
55530x24130001, 0x24420001, 0xaf420308, 0x8f420308,
55540x1000001c, 0x326200ff, 0x8f830054, 0x8f820054,
55550x247003e8, 0x2021023, 0x2c4203e9, 0x10400014,
55560x9821, 0x24110010, 0x8f42000c, 0x8f440160,
55570x8f450164, 0x8f860120, 0xafb10010, 0xafb20014,
55580xafa20018, 0x8f42010c, 0x24070008, 0x40f809,
55590x24c6001c, 0x1440ffe5, 0x0, 0x8f820054,
55600x2021023, 0x2c4203e9, 0x1440ffef, 0x0,
55610x326200ff, 0x54400012, 0x24020001, 0x8f420378,
55620x24420001, 0xaf420378, 0x8f420378, 0x8f820120,
55630x8faa002c, 0xafa20010, 0x8f820124, 0x3c040001,
55640x24846120, 0x3c050009, 0xafa20014, 0x8d460000,
55650x34a50700, 0xc002b3b, 0x3c03821, 0x1021,
55660x1440005b, 0x24020001, 0x10000065, 0x0,
55670x8f510018, 0x240200ff, 0x12220002, 0x8021,
55680x26300001, 0x8c020228, 0x1602000e, 0x1130c0,
55690x8f42033c, 0x24420001, 0xaf42033c, 0x8f42033c,
55700x8c020228, 0x3c040001, 0x248460f4, 0x3c050009,
55710xafa00014, 0xafa20010, 0x8fa60020, 0x1000003f,
55720x34a50100, 0xd71021, 0x8fa30020, 0x8fa40024,
55730xac4304c0, 0xac4404c4, 0xc01821, 0x8f440178,
55740x8f45017c, 0x1021, 0x24070004, 0xafa70010,
55750xafb00014, 0x8f48000c, 0x24c604c0, 0x2e63021,
55760xafa80018, 0x8f48010c, 0x24070008, 0xa32821,
55770xa3482b, 0x822021, 0x100f809, 0x892021,
55780x1440000b, 0x24070008, 0x8f820120, 0xafa20010,
55790x8f820124, 0x3c040001, 0x248460fc, 0x3c050009,
55800xafa20014, 0x8fa60020, 0x1000001c, 0x34a50200,
55810x8f440160, 0x8f450164, 0x8f43000c, 0xaf500018,
55820x8f860120, 0x24020010, 0xafa20010, 0xafb00014,
55830xafa30018, 0x8f42010c, 0x40f809, 0x24c6001c,
55840x54400011, 0x24020001, 0x8f420340, 0x24420001,
55850xaf420340, 0x8f420340, 0x8f820120, 0xafa20010,
55860x8f820124, 0x3c040001, 0x24846104, 0x3c050009,
55870xafa20014, 0x8fa60020, 0x34a50300, 0xc002b3b,
55880x2203821, 0x1021, 0x1040000d, 0x24020001,
55890x8f4202e8, 0xa34005c6, 0xaf4001b0, 0x24420001,
55900xaf4202e8, 0x8f4202e8, 0x8ee20150, 0x24420001,
55910xaee20150, 0x10000003, 0x8ee20150, 0x24020001,
55920xa34205c6, 0x8fbf0048, 0x8fbe0044, 0x8fb50040,
55930x8fb3003c, 0x8fb20038, 0x8fb10034, 0x8fb00030,
55940x3e00008, 0x27bd0050, 0x27bdffd8, 0xafbf0020,
55950x8f8200b0, 0x30420004, 0x10400068, 0x0,
55960x8f430128, 0x8f820104, 0x14620005, 0x0,
55970x8f430130, 0x8f8200b4, 0x10620006, 0x0,
55980x8f820104, 0xaf420128, 0x8f8200b4, 0x1000005b,
55990xaf420130, 0x8f8200b0, 0x3c030080, 0x431024,
56000x1040000d, 0x0, 0x8f82011c, 0x34420002,
56010xaf82011c, 0x8f8200b0, 0x2403fffb, 0x431024,
56020xaf8200b0, 0x8f82011c, 0x2403fffd, 0x431024,
56030x1000004a, 0xaf82011c, 0x8f430128, 0x8f820104,
56040x14620005, 0x0, 0x8f430130, 0x8f8200b4,
56050x10620010, 0x0, 0x8f820104, 0xaf420128,
56060x8f8200b4, 0x8f430128, 0xaf420130, 0xafa30010,
56070x8f420130, 0x3c040001, 0x24846144, 0xafa20014,
56080x8f86011c, 0x8f8700b0, 0x3c050005, 0x10000031,
56090x34a50900, 0x8f420128, 0xafa20010, 0x8f420130,
56100x3c040001, 0x24846150, 0xafa20014, 0x8f86011c,
56110x8f8700b0, 0x3c050005, 0xc002b3b, 0x34a51000,
56120x8f82011c, 0x34420002, 0xaf82011c, 0x8f830104,
56130x8f8200b0, 0x34420001, 0xaf8200b0, 0x24020008,
56140xaf830104, 0xafa20010, 0xafa00014, 0x8f42000c,
56150x8c040208, 0x8c05020c, 0xafa20018, 0x8f42010c,
56160x26e60028, 0x40f809, 0x24070400, 0x8f82011c,
56170x2403fffd, 0x431024, 0xaf82011c, 0x8ee201dc,
56180x24420001, 0xaee201dc, 0x8ee201dc, 0x8f420128,
56190xafa20010, 0x8f420130, 0x3c040001, 0x2484615c,
56200xafa20014, 0x8f86011c, 0x8f8700b0, 0x3c050005,
56210x34a51100, 0xc002b3b, 0x0, 0x8f8200a0,
56220x30420004, 0x10400069, 0x0, 0x8f43012c,
56230x8f820124, 0x14620005, 0x0, 0x8f430134,
56240x8f8200a4, 0x10620006, 0x0, 0x8f820124,
56250xaf42012c, 0x8f8200a4, 0x1000005c, 0xaf420134,
56260x8f8200a0, 0x3c030080, 0x431024, 0x1040000d,
56270x0, 0x8f82011c, 0x34420002, 0xaf82011c,
56280x8f8200a0, 0x2403fffb, 0x431024, 0xaf8200a0,
56290x8f82011c, 0x2403fffd, 0x431024, 0x1000004b,
56300xaf82011c, 0x8f43012c, 0x8f820124, 0x14620005,
56310x0, 0x8f430134, 0x8f8200a4, 0x10620010,
56320x0, 0x8f820124, 0xaf42012c, 0x8f8200a4,
56330x8f43012c, 0xaf420134, 0xafa30010, 0x8f420134,
56340x3c040001, 0x24846168, 0xafa20014, 0x8f86011c,
56350x8f8700a0, 0x3c050005, 0x10000032, 0x34a51200,
56360x8f42012c, 0xafa20010, 0x8f420134, 0x3c040001,
56370x24846174, 0xafa20014, 0x8f86011c, 0x8f8700a0,
56380x3c050005, 0xc002b3b, 0x34a51300, 0x8f82011c,
56390x34420002, 0xaf82011c, 0x8f830124, 0x8f8200a0,
56400x34420001, 0xaf8200a0, 0x24020080, 0xaf830124,
56410xafa20010, 0xafa00014, 0x8f420014, 0x8c040208,
56420x8c05020c, 0xafa20018, 0x8f420108, 0x3c060001,
56430x24c66ed8, 0x40f809, 0x24070004, 0x8f82011c,
56440x2403fffd, 0x431024, 0xaf82011c, 0x8ee201dc,
56450x24420001, 0xaee201dc, 0x8ee201dc, 0x8f42012c,
56460xafa20010, 0x8f420134, 0x3c040001, 0x24846180,
56470xafa20014, 0x8f86011c, 0x8f8700a0, 0x3c050005,
56480x34a51400, 0xc002b3b, 0x0, 0x8fbf0020,
56490x3e00008, 0x27bd0028, 0x3c081000, 0x24070001,
56500x3c060080, 0x3c050100, 0x8f820070, 0x481024,
56510x1040fffd, 0x0, 0x8f820054, 0x24420005,
56520xaf820078, 0x8c040234, 0x10800016, 0x1821,
56530x3c020001, 0x571021, 0x8c4240e8, 0x24420005,
56540x3c010001, 0x370821, 0xac2240e8, 0x3c020001,
56550x571021, 0x8c4240e8, 0x44102b, 0x14400009,
56560x0, 0x3c030080, 0x3c010001, 0x370821,
56570xac2040e8, 0x3c010001, 0x370821, 0x1000000b,
56580xa02740f0, 0x3c020001, 0x571021, 0x904240f0,
56590x54400006, 0x661825, 0x3c020001, 0x571021,
56600x904240f1, 0x54400001, 0x661825, 0x8c040230,
56610x10800013, 0x0, 0x3c020001, 0x571021,
56620x8c4240ec, 0x24420005, 0x3c010001, 0x370821,
56630xac2240ec, 0x3c020001, 0x571021, 0x8c4240ec,
56640x44102b, 0x14400006, 0x0, 0x3c010001,
56650x370821, 0xac2040ec, 0x10000006, 0x651825,
56660x3c020001, 0x571021, 0x904240f2, 0x54400001,
56670x651825, 0x1060ffbc, 0x0, 0x8f420000,
56680x10400007, 0x0, 0xaf80004c, 0x8f82004c,
56690x1040fffd, 0x0, 0x10000005, 0x0,
56700xaf800048, 0x8f820048, 0x1040fffd, 0x0,
56710x8f820060, 0x431025, 0xaf820060, 0x8f420000,
56720x10400003, 0x0, 0x1000ffa7, 0xaf80004c,
56730x1000ffa5, 0xaf800048, 0x3e00008, 0x0,
56740x0, 0x0, 0x0, 0x27bdffe0,
56750xafbf0018, 0x8f860064, 0x30c20004, 0x10400025,
56760x24040004, 0x8c020114, 0xaf420020, 0xaf840064,
56770x8f4202fc, 0x24420001, 0xaf4202fc, 0x8f4202fc,
56780x8f820064, 0x30420004, 0x14400005, 0x0,
56790x8c030114, 0x8f420020, 0x1462fff2, 0x0,
56800x8f420000, 0x10400007, 0x8f43003c, 0xaf80004c,
56810x8f82004c, 0x1040fffd, 0x0, 0x10000005,
56820x0, 0xaf800048, 0x8f820048, 0x1040fffd,
56830x0, 0x8f820060, 0x431025, 0xaf820060,
56840x8f420000, 0x10400073, 0x0, 0x1000006f,
56850x0, 0x30c20008, 0x10400020, 0x24040008,
56860x8c02011c, 0xaf420048, 0xaf840064, 0x8f4202a8,
56870x24420001, 0xaf4202a8, 0x8f4202a8, 0x8f820064,
56880x30420008, 0x14400005, 0x0, 0x8c03011c,
56890x8f420048, 0x1462fff2, 0x0, 0x8f420000,
56900x10400007, 0x0, 0xaf80004c, 0x8f82004c,
56910x1040fffd, 0x0, 0x10000005, 0x0,
56920xaf800048, 0x8f820048, 0x1040fffd, 0x0,
56930x8f820060, 0x1000ffd9, 0x34420200, 0x30c20020,
56940x10400023, 0x24040020, 0x8c02012c, 0xaf420068,
56950xaf840064, 0x8f4202d8, 0x24420001, 0xaf4202d8,
56960x8f4202d8, 0x8f820064, 0x30420020, 0x14400005,
56970x32c24000, 0x8c03012c, 0x8f420068, 0x1462fff2,
56980x32c24000, 0x14400002, 0x3c020001, 0x2c2b025,
56990x8f420000, 0x10400007, 0x0, 0xaf80004c,
57000x8f82004c, 0x1040fffd, 0x0, 0x10000005,
57010x0, 0xaf800048, 0x8f820048, 0x1040fffd,
57020x0, 0x8f820060, 0x1000ffb4, 0x34420800,
57030x30c20010, 0x10400029, 0x24040010, 0x8c020124,
57040xaf420058, 0xaf840064, 0x8f4202d4, 0x24420001,
57050xaf4202d4, 0x8f4202d4, 0x8f820064, 0x30420010,
57060x14400005, 0x32c22000, 0x8c030124, 0x8f420058,
57070x1462fff2, 0x32c22000, 0x50400001, 0x36d68000,
57080x8f420000, 0x10400007, 0x0, 0xaf80004c,
57090x8f82004c, 0x1040fffd, 0x0, 0x10000005,
57100x0, 0xaf800048, 0x8f820048, 0x1040fffd,
57110x0, 0x8f820060, 0x34420100, 0xaf820060,
57120x8f420000, 0x10400003, 0x0, 0x1000006c,
57130xaf80004c, 0x1000006a, 0xaf800048, 0x30c20001,
57140x10400004, 0x24020001, 0xaf820064, 0x10000064,
57150x0, 0x30c20002, 0x1440000b, 0x3c050003,
57160x3c040001, 0x24846244, 0x34a50500, 0x3821,
57170xafa00010, 0xc002b3b, 0xafa00014, 0x2402ffc0,
57180x10000057, 0xaf820064, 0x8c05022c, 0x8c02010c,
57190x10a20048, 0x51080, 0x8c460300, 0x24a20001,
57200x3045003f, 0x24020003, 0xac05022c, 0x61e02,
57210x10620005, 0x24020010, 0x1062001d, 0x30c20fff,
57220x10000039, 0x0, 0x8f4302a8, 0x8f440000,
57230x30c20fff, 0xaf420048, 0x24630001, 0xaf4302a8,
57240x10800007, 0x8f4202a8, 0xaf80004c, 0x8f82004c,
57250x1040fffd, 0x0, 0x10000005, 0x0,
57260xaf800048, 0x8f820048, 0x1040fffd, 0x0,
57270x8f820060, 0x34420200, 0xaf820060, 0x8f420000,
57280x1040001f, 0x0, 0x1000001b, 0x0,
57290xaf420058, 0x32c22000, 0x50400001, 0x36d68000,
57300x8f4202d4, 0x8f430000, 0x24420001, 0xaf4202d4,
57310x10600007, 0x8f4202d4, 0xaf80004c, 0x8f82004c,
57320x1040fffd, 0x0, 0x10000005, 0x0,
57330xaf800048, 0x8f820048, 0x1040fffd, 0x0,
57340x8f820060, 0x34420100, 0xaf820060, 0x8f420000,
57350x10400003, 0x0, 0x10000006, 0xaf80004c,
57360x10000004, 0xaf800048, 0xc002196, 0xc02021,
57370x402821, 0x8c02010c, 0x14a20002, 0x24020002,
57380xaf820064, 0x8f820064, 0x30420002, 0x14400004,
57390x0, 0x8c02010c, 0x14a2ffac, 0x0,
57400x8fbf0018, 0x3e00008, 0x27bd0020, 0x3e00008,
57410x0, 0x27bdffa0, 0xafb00040, 0x808021,
57420x101602, 0x2442ffff, 0x304300ff, 0x2c620013,
57430xafbf0058, 0xafbe0054, 0xafb50050, 0xafb3004c,
57440xafb20048, 0xafb10044, 0x104001f3, 0xafa50034,
57450x31080, 0x3c010001, 0x220821, 0x8c226288,
57460x400008, 0x0, 0x101302, 0x30440fff,
57470x24020001, 0x10820005, 0x24020002, 0x1082000c,
57480x2402fffe, 0x10000024, 0x3c050003, 0x8f430004,
57490x3c020001, 0x8c426f04, 0xaf440200, 0xaf440204,
57500x3c040001, 0x8c846e80, 0x10000009, 0x34630001,
57510x8f430004, 0xaf440200, 0xaf440204, 0x3c040001,
57520x8c846e80, 0x621824, 0x3c020001, 0x2442ca28,
57530x21100, 0x21182, 0xaf430004, 0x3c030800,
57540x431025, 0xac820038, 0x8f840054, 0x41442,
57550x41c82, 0x431021, 0x41cc2, 0x431023,
57560x41d02, 0x431021, 0x41d42, 0x431023,
57570x10000009, 0xaf420208, 0x3c040001, 0x24846250,
57580x34a51000, 0x2003021, 0x3821, 0xafa00010,
57590xc002b3b, 0xafa00014, 0x8f4202a0, 0x24420001,
57600xaf4202a0, 0x1000021f, 0x8f4202a0, 0x27b00028,
57610x2002021, 0x24050210, 0xc002bbf, 0x24060008,
57620xc002518, 0x2002021, 0x10000216, 0x0,
57630x8faa0034, 0x27a40028, 0xa1880, 0x25420001,
57640x3042003f, 0xafa20034, 0x8c650300, 0x8faa0034,
57650x21080, 0x8c430300, 0x25420001, 0x3042003f,
57660xafa20034, 0xac02022c, 0xafa50028, 0xc002518,
57670xafa3002c, 0x10000203, 0x0, 0x27b00028,
57680x2002021, 0x24050210, 0xc002bbf, 0x24060008,
57690xc002657, 0x2002021, 0x100001fa, 0x0,
57700x8faa0034, 0x27a40028, 0xa1880, 0x25420001,
57710x3042003f, 0xafa20034, 0x8c650300, 0x8faa0034,
57720x21080, 0x8c430300, 0x25420001, 0x3042003f,
57730xafa20034, 0xac02022c, 0xafa50028, 0xc002657,
57740xafa3002c, 0x100001e7, 0x0, 0x101302,
57750x30430fff, 0x24020001, 0x10620005, 0x24020002,
57760x1062001e, 0x3c020002, 0x10000033, 0x3c050003,
57770x3c030002, 0x2c31024, 0x54400037, 0x2c3b025,
57780x8f820228, 0x3c010001, 0x370821, 0xac2238d8,
57790x8f82022c, 0x3c010001, 0x370821, 0xac2238dc,
57800x8f820230, 0x3c010001, 0x370821, 0xac2238e0,
57810x8f820234, 0x3c010001, 0x370821, 0xac2238e4,
57820x2402ffff, 0xaf820228, 0xaf82022c, 0xaf820230,
57830xaf820234, 0x10000020, 0x2c3b025, 0x2c21024,
57840x10400012, 0x3c02fffd, 0x3c020001, 0x571021,
57850x8c4238d8, 0xaf820228, 0x3c020001, 0x571021,
57860x8c4238dc, 0xaf82022c, 0x3c020001, 0x571021,
57870x8c4238e0, 0xaf820230, 0x3c020001, 0x571021,
57880x8c4238e4, 0xaf820234, 0x3c02fffd, 0x3442ffff,
57890x10000009, 0x2c2b024, 0x3c040001, 0x2484625c,
57900x34a51100, 0x2003021, 0x3821, 0xafa00010,
57910xc002b3b, 0xafa00014, 0x8f4202cc, 0x24420001,
57920xaf4202cc, 0x1000019f, 0x8f4202cc, 0x101302,
57930x30450fff, 0x24020001, 0x10a20005, 0x24020002,
57940x10a2000d, 0x3c0408ff, 0x10000014, 0x3c050003,
57950x3c0208ff, 0x3442ffff, 0x8f830220, 0x3c040004,
57960x2c4b025, 0x621824, 0x34630008, 0xaf830220,
57970x10000012, 0xaf450298, 0x3484fff7, 0x3c03fffb,
57980x8f820220, 0x3463ffff, 0x2c3b024, 0x441024,
57990xaf820220, 0x10000009, 0xaf450298, 0x3c040001,
58000x24846268, 0x34a51200, 0x2003021, 0x3821,
58010xafa00010, 0xc002b3b, 0xafa00014, 0x8f4202bc,
58020x24420001, 0xaf4202bc, 0x10000176, 0x8f4202bc,
58030x27840208, 0x24050200, 0xc002bbf, 0x24060008,
58040x27440224, 0x24050200, 0xc002bbf, 0x24060008,
58050x8f4202c4, 0x24420001, 0xaf4202c4, 0x10000169,
58060x8f4202c4, 0x101302, 0x30430fff, 0x24020001,
58070x10620011, 0x28620002, 0x50400005, 0x24020002,
58080x10600007, 0x0, 0x10000017, 0x0,
58090x1062000f, 0x0, 0x10000013, 0x0,
58100x8c060248, 0x2021, 0xc005104, 0x24050004,
58110x10000007, 0x0, 0x8c060248, 0x2021,
58120xc005104, 0x24050004, 0x10000010, 0x0,
58130x8c06024c, 0x2021, 0xc005104, 0x24050001,
58140x1000000a, 0x0, 0x3c040001, 0x24846274,
58150x3c050003, 0x34a51300, 0x2003021, 0x3821,
58160xafa00010, 0xc002b3b, 0xafa00014, 0x8f4202c0,
58170x24420001, 0xaf4202c0, 0x1000013a, 0x8f4202c0,
58180xc002426, 0x0, 0x10000136, 0x0,
58190x24020001, 0xa34205c5, 0x24100100, 0x8f4401a8,
58200x8f4501ac, 0xafb00010, 0xafa00014, 0x8f420014,
58210xafa20018, 0x8f420108, 0x26e60028, 0x40f809,
58220x24070400, 0x1040fff5, 0x0, 0x10000125,
58230x0, 0x3c03ffff, 0x34637fff, 0x8f420368,
58240x8f440360, 0x2c3b024, 0x1821, 0xaf400058,
58250xaf40005c, 0xaf400060, 0xaf400064, 0x441023,
58260xaf420368, 0x3c020900, 0xaf400360, 0xafa20020,
58270x8f5e0018, 0x27aa0020, 0x240200ff, 0x13c20002,
58280xafaa003c, 0x27c30001, 0x8c020228, 0x609021,
58290x1642000e, 0x1e38c0, 0x8f42033c, 0x24420001,
58300xaf42033c, 0x8f42033c, 0x8c020228, 0x3c040001,
58310x2484620c, 0x3c050009, 0xafa00014, 0xafa20010,
58320x8fa60020, 0x1000006b, 0x34a50500, 0xf71021,
58330x8fa30020, 0x8fa40024, 0xac4304c0, 0xac4404c4,
58340x8f830054, 0x8f820054, 0x247003e8, 0x2021023,
58350x2c4203e9, 0x1040001b, 0x9821, 0xe08821,
58360x263504c0, 0x8f440178, 0x8f45017c, 0x2201821,
58370x240a0004, 0xafaa0010, 0xafb20014, 0x8f48000c,
58380x1021, 0x2f53021, 0xafa80018, 0x8f48010c,
58390x24070008, 0xa32821, 0xa3482b, 0x822021,
58400x100f809, 0x892021, 0x54400006, 0x24130001,
58410x8f820054, 0x2021023, 0x2c4203e9, 0x1440ffe9,
58420x0, 0x326200ff, 0x54400017, 0xaf520018,
58430x8f420378, 0x24420001, 0xaf420378, 0x8f420378,
58440x8f820120, 0x8faa003c, 0xafa20010, 0x8f820124,
58450x3c040001, 0x24846218, 0x3c050009, 0xafa20014,
58460x8d460000, 0x10000033, 0x34a50600, 0x8f420308,
58470x24130001, 0x24420001, 0xaf420308, 0x8f420308,
58480x1000001c, 0x326200ff, 0x8f830054, 0x8f820054,
58490x247003e8, 0x2021023, 0x2c4203e9, 0x10400014,
58500x9821, 0x24110010, 0x8f42000c, 0x8f440160,
58510x8f450164, 0x8f860120, 0xafb10010, 0xafb20014,
58520xafa20018, 0x8f42010c, 0x24070008, 0x40f809,
58530x24c6001c, 0x1440ffe5, 0x0, 0x8f820054,
58540x2021023, 0x2c4203e9, 0x1440ffef, 0x0,
58550x326200ff, 0x14400011, 0x0, 0x8f420378,
58560x24420001, 0xaf420378, 0x8f420378, 0x8f820120,
58570x8faa003c, 0xafa20010, 0x8f820124, 0x3c040001,
58580x24846220, 0x3c050009, 0xafa20014, 0x8d460000,
58590x34a50700, 0xc002b3b, 0x3c03821, 0x8f4202b0,
58600x24420001, 0xaf4202b0, 0x8f4202b0, 0x8f4202f8,
58610x24420001, 0xaf4202f8, 0x1000008a, 0x8f4202f8,
58620x8c02025c, 0x27440224, 0xaf4201f0, 0x8c020260,
58630x24050200, 0x24060008, 0xc002bbf, 0xaf4201f8,
58640x8f820220, 0x30420008, 0x14400002, 0x24020001,
58650x24020002, 0xaf420298, 0x8f4202ac, 0x24420001,
58660xaf4202ac, 0x10000077, 0x8f4202ac, 0x3c0200ff,
58670x3442ffff, 0x2021824, 0x32c20180, 0x14400006,
58680x3402fffb, 0x43102b, 0x14400003, 0x0,
58690x1000006c, 0xaf4300bc, 0x3c040001, 0x24846280,
58700x3c050003, 0x34a51500, 0x2003021, 0x3821,
58710xafa00010, 0xc002b3b, 0xafa00014, 0x3c020700,
58720x34421000, 0x101e02, 0x621825, 0xafa30020,
58730x8f510018, 0x240200ff, 0x12220002, 0x8021,
58740x26300001, 0x8c020228, 0x1602000e, 0x1130c0,
58750x8f42033c, 0x24420001, 0xaf42033c, 0x8f42033c,
58760x8c020228, 0x3c040001, 0x248461f4, 0x3c050009,
58770xafa00014, 0xafa20010, 0x8fa60020, 0x1000003f,
58780x34a50100, 0xd71021, 0x8fa30020, 0x8fa40024,
58790xac4304c0, 0xac4404c4, 0xc01821, 0x8f440178,
58800x8f45017c, 0x1021, 0x24070004, 0xafa70010,
58810xafb00014, 0x8f48000c, 0x24c604c0, 0x2e63021,
58820xafa80018, 0x8f48010c, 0x24070008, 0xa32821,
58830xa3482b, 0x822021, 0x100f809, 0x892021,
58840x1440000b, 0x24070008, 0x8f820120, 0xafa20010,
58850x8f820124, 0x3c040001, 0x248461fc, 0x3c050009,
58860xafa20014, 0x8fa60020, 0x1000001c, 0x34a50200,
58870x8f440160, 0x8f450164, 0x8f43000c, 0xaf500018,
58880x8f860120, 0x24020010, 0xafa20010, 0xafb00014,
58890xafa30018, 0x8f42010c, 0x40f809, 0x24c6001c,
58900x14400010, 0x0, 0x8f420340, 0x24420001,
58910xaf420340, 0x8f420340, 0x8f820120, 0xafa20010,
58920x8f820124, 0x3c040001, 0x24846204, 0x3c050009,
58930xafa20014, 0x8fa60020, 0x34a50300, 0xc002b3b,
58940x2203821, 0x8f4202e0, 0x24420001, 0xaf4202e0,
58950x8f4202e0, 0x8f4202f0, 0x24420001, 0xaf4202f0,
58960x8f4202f0, 0x8fa20034, 0x8fbf0058, 0x8fbe0054,
58970x8fb50050, 0x8fb3004c, 0x8fb20048, 0x8fb10044,
58980x8fb00040, 0x3e00008, 0x27bd0060, 0x27bdfff8,
58990x2408ffff, 0x10a00014, 0x4821, 0x3c0aedb8,
59000x354a8320, 0x90870000, 0x24840001, 0x3021,
59010x1071026, 0x30420001, 0x10400002, 0x81842,
59020x6a1826, 0x604021, 0x24c60001, 0x2cc20008,
59030x1440fff7, 0x73842, 0x25290001, 0x125102b,
59040x1440fff0, 0x0, 0x1001021, 0x3e00008,
59050x27bd0008, 0x27bdffb0, 0xafbf0048, 0xafbe0044,
59060xafb50040, 0xafb3003c, 0xafb20038, 0xafb10034,
59070xafb00030, 0x8f870220, 0xafa70024, 0x8f870200,
59080xafa7002c, 0x8f820220, 0x3c0308ff, 0x3463ffff,
59090x431024, 0x34420004, 0xaf820220, 0x8f820200,
59100x3c03c0ff, 0x3463ffff, 0x431024, 0x34420004,
59110xaf820200, 0x8f530358, 0x8f55035c, 0x8f5e0360,
59120x8f470364, 0xafa70014, 0x8f470368, 0xafa7001c,
59130x8f4202d0, 0x274401c0, 0x24420001, 0xaf4202d0,
59140x8f5002d0, 0x8f510204, 0x8f520200, 0xc002ba8,
59150x24050400, 0xaf530358, 0xaf55035c, 0xaf5e0360,
59160x8fa70014, 0xaf470364, 0x8fa7001c, 0xaf470368,
59170xaf5002d0, 0xaf510204, 0xaf520200, 0x8c02025c,
59180x27440224, 0xaf4201f0, 0x8c020260, 0x24050200,
59190x24060008, 0xaf4201f8, 0x24020006, 0xc002bbf,
59200xaf4201f4, 0x3c023b9a, 0x3442ca00, 0xaf4201fc,
59210x240203e8, 0x24040002, 0x24030001, 0xaf420294,
59220xaf440290, 0xaf43029c, 0x8f820220, 0x30420008,
59230x10400004, 0x0, 0xaf430298, 0x10000003,
59240x3021, 0xaf440298, 0x3021, 0x3c030001,
59250x661821, 0x90636d00, 0x3461021, 0x24c60001,
59260xa043022c, 0x2cc2000f, 0x1440fff8, 0x3461821,
59270x24c60001, 0x8f820040, 0x24040080, 0x24050080,
59280x21702, 0x24420030, 0xa062022c, 0x3461021,
59290xc002ba8, 0xa040022c, 0x8fa70024, 0x30e20004,
59300x14400006, 0x0, 0x8f820220, 0x3c0308ff,
59310x3463fffb, 0x431024, 0xaf820220, 0x8fa7002c,
59320x30e20004, 0x14400006, 0x0, 0x8f820200,
59330x3c03c0ff, 0x3463fffb, 0x431024, 0xaf820200,
59340x8fbf0048, 0x8fbe0044, 0x8fb50040, 0x8fb3003c,
59350x8fb20038, 0x8fb10034, 0x8fb00030, 0x3e00008,
59360x27bd0050, 0x0, 0x0, 0xaf400104,
59370x24040001, 0x410c0, 0x2e21821, 0x24820001,
59380x3c010001, 0x230821, 0xa42234d0, 0x402021,
59390x2c820080, 0x1440fff8, 0x410c0, 0x24020001,
59400x3c010001, 0x370821, 0xa42038d0, 0xaf420100,
59410xaf800228, 0xaf80022c, 0xaf800230, 0xaf800234,
59420x3e00008, 0x0, 0x27bdffe8, 0xafbf0014,
59430xafb00010, 0x8f420104, 0x28420005, 0x10400026,
59440x808021, 0x3c020001, 0x8f430104, 0x344230d0,
59450x2e22021, 0x318c0, 0x621821, 0x2e31821,
59460x83102b, 0x10400015, 0x1021, 0x96070000,
59470x24840006, 0x24660006, 0x9482fffc, 0x14470009,
59480x2821, 0x9483fffe, 0x96020002, 0x14620006,
59490xa01021, 0x94820000, 0x96030004, 0x431026,
59500x2c450001, 0xa01021, 0x14400009, 0x24840008,
59510x86102b, 0x1440fff0, 0x1021, 0x304200ff,
59520x14400030, 0x24020001, 0x1000002e, 0x1021,
59530x1000fffa, 0x24020001, 0x2002021, 0xc00240c,
59540x24050006, 0x3042007f, 0x218c0, 0x2e31021,
59550x3c010001, 0x220821, 0x942230d0, 0x1040fff2,
59560x2e31021, 0x3c060001, 0xc23021, 0x94c630d0,
59570x10c0ffed, 0x3c080001, 0x350834d2, 0x96070000,
59580x610c0, 0x572021, 0x882021, 0x94820000,
59590x14470009, 0x2821, 0x94830002, 0x96020002,
59600x14620006, 0xa01021, 0x94820004, 0x96030004,
59610x431026, 0x2c450001, 0xa01021, 0x14400007,
59620x610c0, 0x2e21021, 0x3c060001, 0xc23021,
59630x94c634d0, 0x14c0ffeb, 0x610c0, 0x10c0ffd2,
59640x24020001, 0x8fbf0014, 0x8fb00010, 0x3e00008,
59650x27bd0018, 0x3e00008, 0x0, 0x27bdffb0,
59660x801021, 0xafb00030, 0x24500002, 0x2002021,
59670x24050006, 0xafb10034, 0x408821, 0xafbf0048,
59680xafbe0044, 0xafb50040, 0xafb3003c, 0xc00240c,
59690xafb20038, 0x3047007f, 0x710c0, 0x2e21021,
59700x3c050001, 0xa22821, 0x94a530d0, 0x50a0001c,
59710xa03021, 0x3c090001, 0x352934d2, 0x96280002,
59720x510c0, 0x572021, 0x892021, 0x94820000,
59730x14480009, 0x3021, 0x94830002, 0x96020002,
59740x14620006, 0xc01021, 0x94820004, 0x96030004,
59750x431026, 0x2c460001, 0xc01021, 0x14400007,
59760x510c0, 0x2e21021, 0x3c050001, 0xa22821,
59770x94a534d0, 0x14a0ffeb, 0x510c0, 0xa03021,
59780x10c00014, 0x610c0, 0x571821, 0x3c010001,
59790x230821, 0x8c2334d0, 0x571021, 0xafa30010,
59800x3c010001, 0x220821, 0x8c2234d4, 0x3c040001,
59810x24846394, 0xafa20014, 0x8e260000, 0x8e270004,
59820x3c050004, 0xc002b3b, 0x34a50400, 0x10000063,
59830x3c020800, 0x8f450100, 0x10a00006, 0x510c0,
59840x2e21021, 0x3c010001, 0x220821, 0x942234d0,
59850xaf420100, 0xa03021, 0x14c00011, 0x628c0,
59860x710c0, 0x2e21021, 0xafa70010, 0x3c010001,
59870x220821, 0x942230d0, 0x3c040001, 0x248463a0,
59880xafa20014, 0x8e260000, 0x8e270004, 0x3c050004,
59890xc002b3b, 0x34a50500, 0x10000048, 0x3c020800,
59900xb71821, 0x3c020001, 0x96040000, 0x344234d2,
59910x621821, 0xa4640000, 0x8e020002, 0x720c0,
59920xac620002, 0x2e41021, 0x3c030001, 0x621821,
59930x946330d0, 0x2e51021, 0x3c010001, 0x220821,
59940xa42334d0, 0x2e41021, 0x3c010001, 0x220821,
59950xa42630d0, 0x8f420104, 0x24420001, 0x28420080,
59960x1040000f, 0x3c020002, 0x8f420104, 0x3c040001,
59970x348430d2, 0x96030000, 0x210c0, 0x571021,
59980x441021, 0xa4430000, 0x8e030002, 0xac430002,
59990x8f420104, 0x24420001, 0xaf420104, 0x3c020002,
60000x2c21024, 0x10400011, 0x72142, 0x3c030001,
60010x346338d8, 0x24020003, 0x441023, 0x21080,
60020x572021, 0x832021, 0x571021, 0x431021,
60030x30e5001f, 0x8c430000, 0x24020001, 0xa21004,
60040x621825, 0x1000000c, 0xac830000, 0x24020003,
60050x441023, 0x21080, 0x5c2821, 0x5c1021,
60060x30e4001f, 0x8c430228, 0x24020001, 0x821004,
60070x621825, 0xaca30228, 0x3c020800, 0x34421000,
60080x1821, 0xafa20020, 0x8f5e0018, 0x27aa0020,
60090x240200ff, 0x13c20002, 0xafaa002c, 0x27c30001,
60100x8c020228, 0x609021, 0x1642000e, 0x1e38c0,
60110x8f42033c, 0x24420001, 0xaf42033c, 0x8f42033c,
60120x8c020228, 0x3c040001, 0x2484635c, 0x3c050009,
60130xafa00014, 0xafa20010, 0x8fa60020, 0x1000006b,
60140x34a50500, 0xf71021, 0x8fa30020, 0x8fa40024,
60150xac4304c0, 0xac4404c4, 0x8f830054, 0x8f820054,
60160x247003e8, 0x2021023, 0x2c4203e9, 0x1040001b,
60170x9821, 0xe08821, 0x263504c0, 0x8f440178,
60180x8f45017c, 0x2201821, 0x240a0004, 0xafaa0010,
60190xafb20014, 0x8f48000c, 0x1021, 0x2f53021,
60200xafa80018, 0x8f48010c, 0x24070008, 0xa32821,
60210xa3482b, 0x822021, 0x100f809, 0x892021,
60220x54400006, 0x24130001, 0x8f820054, 0x2021023,
60230x2c4203e9, 0x1440ffe9, 0x0, 0x326200ff,
60240x54400017, 0xaf520018, 0x8f420378, 0x24420001,
60250xaf420378, 0x8f420378, 0x8f820120, 0x8faa002c,
60260xafa20010, 0x8f820124, 0x3c040001, 0x24846368,
60270x3c050009, 0xafa20014, 0x8d460000, 0x10000033,
60280x34a50600, 0x8f420308, 0x24130001, 0x24420001,
60290xaf420308, 0x8f420308, 0x1000001c, 0x326200ff,
60300x8f830054, 0x8f820054, 0x247003e8, 0x2021023,
60310x2c4203e9, 0x10400014, 0x9821, 0x24110010,
60320x8f42000c, 0x8f440160, 0x8f450164, 0x8f860120,
60330xafb10010, 0xafb20014, 0xafa20018, 0x8f42010c,
60340x24070008, 0x40f809, 0x24c6001c, 0x1440ffe5,
60350x0, 0x8f820054, 0x2021023, 0x2c4203e9,
60360x1440ffef, 0x0, 0x326200ff, 0x14400011,
60370x0, 0x8f420378, 0x24420001, 0xaf420378,
60380x8f420378, 0x8f820120, 0x8faa002c, 0xafa20010,
60390x8f820124, 0x3c040001, 0x24846370, 0x3c050009,
60400xafa20014, 0x8d460000, 0x34a50700, 0xc002b3b,
60410x3c03821, 0x8f4202b4, 0x24420001, 0xaf4202b4,
60420x8f4202b4, 0x8f4202f4, 0x24420001, 0xaf4202f4,
60430x8f4202f4, 0x8fbf0048, 0x8fbe0044, 0x8fb50040,
60440x8fb3003c, 0x8fb20038, 0x8fb10034, 0x8fb00030,
60450x3e00008, 0x27bd0050, 0x27bdffa0, 0x801021,
60460xafb00040, 0x24500002, 0x2002021, 0x24050006,
60470xafb10044, 0x408821, 0xafbf0058, 0xafbe0054,
60480xafb50050, 0xafb3004c, 0xc00240c, 0xafb20048,
60490x3048007f, 0x810c0, 0x2e21021, 0x3c060001,
60500xc23021, 0x94c630d0, 0x10c0001c, 0x3821,
60510x3c0a0001, 0x354a34d2, 0x96290002, 0x610c0,
60520x572021, 0x8a2021, 0x94820000, 0x14490009,
60530x2821, 0x94830002, 0x96020002, 0x14620006,
60540xa01021, 0x94820004, 0x96030004, 0x431026,
60550x2c450001, 0xa01021, 0x14400008, 0x610c0,
60560xc03821, 0x2e21021, 0x3c060001, 0xc23021,
60570x94c634d0, 0x14c0ffea, 0x610c0, 0x14c00011,
60580xafa70028, 0x810c0, 0x2e21021, 0xafa80010,
60590x3c010001, 0x220821, 0x942230d0, 0x3c040001,
60600x248463ac, 0xafa20014, 0x8e260000, 0x8e270004,
60610x3c050004, 0xc002b3b, 0x34a50900, 0x10000075,
60620x3c020800, 0x10e0000c, 0x610c0, 0x2e21021,
60630x3c030001, 0x621821, 0x946334d0, 0x710c0,
60640x2e21021, 0x3c010001, 0x220821, 0xa42334d0,
60650x1000000b, 0x3c040001, 0x2e21021, 0x3c030001,
60660x621821, 0x946334d0, 0x810c0, 0x2e21021,
60670x3c010001, 0x220821, 0xa42330d0, 0x3c040001,
60680x348430d0, 0x8f430100, 0x610c0, 0x2e21021,
60690x3c010001, 0x220821, 0xa42334d0, 0x8f420104,
60700x2e43821, 0x2821, 0x18400029, 0xaf460100,
60710x24e60006, 0x94c3fffc, 0x96020000, 0x14620009,
60720x2021, 0x94c3fffe, 0x96020002, 0x14620006,
60730x801021, 0x94c20000, 0x96030004, 0x431026,
60740x2c440001, 0x801021, 0x50400014, 0x24a50001,
60750x8f420104, 0x2442ffff, 0xa2102a, 0x1040000b,
60760x24e40004, 0x94820006, 0x8c830008, 0xa482fffe,
60770xac830000, 0x8f420104, 0x24a50001, 0x2442ffff,
60780xa2102a, 0x1440fff7, 0x24840008, 0x8f420104,
60790x2442ffff, 0x10000006, 0xaf420104, 0x8f420104,
60800x24c60008, 0xa2102a, 0x1440ffda, 0x24e70008,
60810x810c0, 0x2e21021, 0x3c010001, 0x220821,
60820x942230d0, 0x14400023, 0x3c020800, 0x3c020002,
60830x2c21024, 0x10400012, 0x82142, 0x3c030001,
60840x346338d8, 0x24020003, 0x441023, 0x21080,
60850x572021, 0x832021, 0x571021, 0x431021,
60860x3105001f, 0x24030001, 0x8c420000, 0xa31804,
60870x31827, 0x431024, 0x1000000d, 0xac820000,
60880x24020003, 0x441023, 0x21080, 0x5c2821,
60890x5c1021, 0x3104001f, 0x24030001, 0x8c420228,
60900x831804, 0x31827, 0x431024, 0xaca20228,
60910x3c020800, 0x34422000, 0x1821, 0xafa20020,
60920x8f5e0018, 0x27ab0020, 0x240200ff, 0x13c20002,
60930xafab0034, 0x27c30001, 0x8c020228, 0x609021,
60940x1642000e, 0x1e38c0, 0x8f42033c, 0x24420001,
60950xaf42033c, 0x8f42033c, 0x8c020228, 0x3c040001,
60960x2484635c, 0x3c050009, 0xafa00014, 0xafa20010,
60970x8fa60020, 0x1000006b, 0x34a50500, 0xf71021,
60980x8fa30020, 0x8fa40024, 0xac4304c0, 0xac4404c4,
60990x8f830054, 0x8f820054, 0x247003e8, 0x2021023,
61000x2c4203e9, 0x1040001b, 0x9821, 0xe08821,
61010x263504c0, 0x8f440178, 0x8f45017c, 0x2201821,
61020x240b0004, 0xafab0010, 0xafb20014, 0x8f48000c,
61030x1021, 0x2f53021, 0xafa80018, 0x8f48010c,
61040x24070008, 0xa32821, 0xa3482b, 0x822021,
61050x100f809, 0x892021, 0x54400006, 0x24130001,
61060x8f820054, 0x2021023, 0x2c4203e9, 0x1440ffe9,
61070x0, 0x326200ff, 0x54400017, 0xaf520018,
61080x8f420378, 0x24420001, 0xaf420378, 0x8f420378,
61090x8f820120, 0x8fab0034, 0xafa20010, 0x8f820124,
61100x3c040001, 0x24846368, 0x3c050009, 0xafa20014,
61110x8d660000, 0x10000033, 0x34a50600, 0x8f420308,
61120x24130001, 0x24420001, 0xaf420308, 0x8f420308,
61130x1000001c, 0x326200ff, 0x8f830054, 0x8f820054,
61140x247003e8, 0x2021023, 0x2c4203e9, 0x10400014,
61150x9821, 0x24110010, 0x8f42000c, 0x8f440160,
61160x8f450164, 0x8f860120, 0xafb10010, 0xafb20014,
61170xafa20018, 0x8f42010c, 0x24070008, 0x40f809,
61180x24c6001c, 0x1440ffe5, 0x0, 0x8f820054,
61190x2021023, 0x2c4203e9, 0x1440ffef, 0x0,
61200x326200ff, 0x14400011, 0x0, 0x8f420378,
61210x24420001, 0xaf420378, 0x8f420378, 0x8f820120,
61220x8fab0034, 0xafa20010, 0x8f820124, 0x3c040001,
61230x24846370, 0x3c050009, 0xafa20014, 0x8d660000,
61240x34a50700, 0xc002b3b, 0x3c03821, 0x8f4202b8,
61250x24420001, 0xaf4202b8, 0x8f4202b8, 0x8f4202f4,
61260x24420001, 0xaf4202f4, 0x8f4202f4, 0x8fbf0058,
61270x8fbe0054, 0x8fb50050, 0x8fb3004c, 0x8fb20048,
61280x8fb10044, 0x8fb00040, 0x3e00008, 0x27bd0060,
61290x0, 0x0, 0x0, 0x27bdffe0,
61300x27644000, 0xafbf0018, 0xc002ba8, 0x24051000,
61310x3c030001, 0x34632cc0, 0x3c040001, 0x34842ec8,
61320x24020020, 0xaf82011c, 0x2e31021, 0xaf800100,
61330xaf800104, 0xaf800108, 0xaf800110, 0xaf800114,
61340xaf800118, 0xaf800120, 0xaf800124, 0xaf800128,
61350xaf800130, 0xaf800134, 0xaf800138, 0xaf4200ec,
61360x2e31021, 0xaf4200f0, 0x2e41021, 0xaf4200f4,
61370x2e41021, 0xaf4200f8, 0x3c020001, 0x571021,
61380x904240f4, 0x1440001c, 0x3c050001, 0x8f82011c,
61390x3c040001, 0x24846470, 0x3c050001, 0x34420001,
61400xaf82011c, 0xafa00010, 0xafa00014, 0x8f86011c,
61410x34a50100, 0xc002b3b, 0x3821, 0x8c020218,
61420x30420040, 0x10400014, 0x0, 0x8f82011c,
61430x3c040001, 0x2484647c, 0x3c050001, 0x34420004,
61440xaf82011c, 0xafa00010, 0xafa00014, 0x8f86011c,
61450x10000007, 0x34a50200, 0x3c040001, 0x24846484,
61460xafa00010, 0xafa00014, 0x8f86011c, 0x34a50300,
61470xc002b3b, 0x3821, 0x8fbf0018, 0x3e00008,
61480x27bd0020, 0x8fa90010, 0x8f83012c, 0x8faa0014,
61490x8fab0018, 0x1060000a, 0x27624fe0, 0x14620002,
61500x24680020, 0x27684800, 0x8f820128, 0x11020004,
61510x0, 0x8f820124, 0x15020007, 0x0,
61520x8f430334, 0x1021, 0x24630001, 0xaf430334,
61530x10000039, 0x8f430334, 0xac640000, 0xac650004,
61540xac660008, 0xa467000e, 0xac690018, 0xac6a001c,
61550xac6b0010, 0xac620014, 0xaf880120, 0x8f4200fc,
61560x8f4400f4, 0x2442ffff, 0xaf4200fc, 0x8c820000,
61570x10490005, 0x3042ff8f, 0x10400019, 0x3122ff8f,
61580x10400018, 0x3c020001, 0x8c830004, 0x2c620010,
61590x10400013, 0x3c020001, 0x24630001, 0xac830004,
61600x8f4300f8, 0x344230c8, 0x2e21021, 0x54620004,
61610x24620008, 0x3c020001, 0x34422ec8, 0x2e21021,
61620x14440015, 0x24020001, 0x8f820128, 0x24420020,
61630xaf820128, 0x8f820128, 0x1000000f, 0x24020001,
61640x3c020001, 0x344230c8, 0x2e21021, 0x54820004,
61650x24820008, 0x3c020001, 0x34422ec8, 0x2e21021,
61660x402021, 0x24020001, 0xaf4400f4, 0xac890000,
61670xac820004, 0x24020001, 0x3e00008, 0x0,
61680x3e00008, 0x0, 0x8fa90010, 0x8f83010c,
61690x8faa0014, 0x8fab0018, 0x1060000a, 0x276247e0,
61700x14620002, 0x24680020, 0x27684000, 0x8f820108,
61710x11020004, 0x0, 0x8f820104, 0x15020007,
61720x0, 0x8f430338, 0x1021, 0x24630001,
61730xaf430338, 0x10000035, 0x8f430338, 0xac640000,
61740xac650004, 0xac660008, 0xa467000e, 0xac690018,
61750xac6a001c, 0xac6b0010, 0xac620014, 0xaf880100,
61760x8f4400ec, 0x8c820000, 0x30420006, 0x10400019,
61770x31220006, 0x10400018, 0x3c020001, 0x8c830004,
61780x2c620010, 0x10400013, 0x3c020001, 0x24630001,
61790xac830004, 0x8f4300f0, 0x34422ec0, 0x2e21021,
61800x54620004, 0x24620008, 0x3c020001, 0x34422cc0,
61810x2e21021, 0x14440015, 0x24020001, 0x8f820108,
61820x24420020, 0xaf820108, 0x8f820108, 0x1000000f,
61830x24020001, 0x3c020001, 0x34422ec0, 0x2e21021,
61840x54820004, 0x24820008, 0x3c020001, 0x34422cc0,
61850x2e21021, 0x402021, 0x24020001, 0xaf4400ec,
61860xac890000, 0xac820004, 0x24020001, 0x3e00008,
61870x0, 0x3e00008, 0x0, 0x27bdffd8,
61880x3c040001, 0x2484648c, 0x3c050001, 0xafbf0024,
61890xafb20020, 0xafb1001c, 0xafb00018, 0x8f900104,
61900x8f9100b0, 0x8f92011c, 0x34a52500, 0x8f820100,
61910x2403021, 0x2203821, 0xafa20010, 0xc002b3b,
61920xafb00014, 0x8e020008, 0xafa20010, 0x8e02000c,
61930x3c040001, 0x24846498, 0xafa20014, 0x8e060000,
61940x8e070004, 0x3c050001, 0xc002b3b, 0x34a52510,
61950x8e020018, 0xafa20010, 0x8e02001c, 0x3c040001,
61960x248464a4, 0xafa20014, 0x8e060010, 0x8e070014,
61970x3c050001, 0xc002b3b, 0x34a52520, 0x3c027f00,
61980x2221024, 0x3c030800, 0x54430016, 0x3c030200,
61990x8f82009c, 0x3042ffff, 0x14400012, 0x3c030200,
62000x3c040001, 0x248464b0, 0x3c050002, 0x34a5f030,
62010x3021, 0x3821, 0x36420002, 0xaf82011c,
62020x36220001, 0xaf8200b0, 0xaf900104, 0xaf92011c,
62030xafa00010, 0xc002b3b, 0xafa00014, 0x10000024,
62040x0, 0x2c31024, 0x1040000d, 0x2231024,
62050x1040000b, 0x36420002, 0xaf82011c, 0x36220001,
62060xaf8200b0, 0xaf900104, 0xaf92011c, 0x8f420330,
62070x24420001, 0xaf420330, 0x10000015, 0x8f420330,
62080x3c040001, 0x248464b8, 0x240202a9, 0xafa20010,
62090xafa00014, 0x8f860144, 0x3c070001, 0x24e764c0,
62100xc002b3b, 0x3405dead, 0x8f82011c, 0x34420002,
62110xaf82011c, 0x8f820220, 0x34420004, 0xaf820220,
62120x8f820140, 0x3c030001, 0x431025, 0xaf820140,
62130x8fbf0024, 0x8fb20020, 0x8fb1001c, 0x8fb00018,
62140x3e00008, 0x27bd0028, 0x27bdffd8, 0x3c040001,
62150x248464e8, 0x3c050001, 0xafbf0024, 0xafb20020,
62160xafb1001c, 0xafb00018, 0x8f900124, 0x8f9100a0,
62170x8f92011c, 0x34a52600, 0x8f820120, 0x2403021,
62180x2203821, 0xafa20010, 0xc002b3b, 0xafb00014,
62190x8e020008, 0xafa20010, 0x8e02000c, 0x3c040001,
62200x248464f4, 0xafa20014, 0x8e060000, 0x8e070004,
62210x3c050001, 0xc002b3b, 0x34a52610, 0x8e020018,
62220xafa20010, 0x8e02001c, 0x3c040001, 0x24846500,
62230xafa20014, 0x8e060010, 0x8e070014, 0x3c050001,
62240xc002b3b, 0x34a52620, 0x3c027f00, 0x2221024,
62250x3c030800, 0x54430016, 0x3c030200, 0x8f8200ac,
62260x3042ffff, 0x14400012, 0x3c030200, 0x3c040001,
62270x2484650c, 0x3c050001, 0x34a5f030, 0x3021,
62280x3821, 0x36420002, 0xaf82011c, 0x36220001,
62290xaf8200a0, 0xaf900124, 0xaf92011c, 0xafa00010,
62300xc002b3b, 0xafa00014, 0x10000024, 0x0,
62310x2c31024, 0x1040000d, 0x2231024, 0x1040000b,
62320x36420002, 0xaf82011c, 0x36220001, 0xaf8200a0,
62330xaf900124, 0xaf92011c, 0x8f42032c, 0x24420001,
62340xaf42032c, 0x10000015, 0x8f42032c, 0x3c040001,
62350x248464b8, 0x240202e2, 0xafa20010, 0xafa00014,
62360x8f860144, 0x3c070001, 0x24e764c0, 0xc002b3b,
62370x3405dead, 0x8f82011c, 0x34420002, 0xaf82011c,
62380x8f820220, 0x34420004, 0xaf820220, 0x8f820140,
62390x3c030001, 0x431025, 0xaf820140, 0x8fbf0024,
62400x8fb20020, 0x8fb1001c, 0x8fb00018, 0x3e00008,
62410x27bd0028, 0x6021, 0x5021, 0x3021,
62420x2821, 0x6821, 0x4821, 0x7821,
62430x7021, 0x8f880124, 0x8f870104, 0x1580002e,
62440x8f8b011c, 0x11a00014, 0x31620800, 0x8f820120,
62450x10460029, 0x0, 0x3c040001, 0x8c846ee4,
62460x8cc20000, 0x8cc30004, 0xac820000, 0xac830004,
62470x8cc20008, 0xac820008, 0x94c2000e, 0xa482000e,
62480x8cc20010, 0x240c0001, 0xac820010, 0x8cc20014,
62490x10000012, 0x24c60020, 0x10400017, 0x0,
62500x3c040001, 0x8c846ee4, 0x8d020000, 0x8d030004,
62510xac820000, 0xac830004, 0x8d020008, 0xac820008,
62520x9502000e, 0xa482000e, 0x8d020010, 0x25060020,
62530xac820010, 0x8d020014, 0x240c0001, 0xc01821,
62540xac820014, 0x27624fe0, 0x43102b, 0x54400001,
62550x27634800, 0x603021, 0x1540002f, 0x31620100,
62560x11200014, 0x31628000, 0x8f820100, 0x1045002a,
62570x31620100, 0x3c040001, 0x8c846ee0, 0x8ca20000,
62580x8ca30004, 0xac820000, 0xac830004, 0x8ca20008,
62590xac820008, 0x94a2000e, 0xa482000e, 0x8ca20010,
62600x240a0001, 0xac820010, 0x8ca20014, 0x10000012,
62610x24a50020, 0x10400018, 0x31620100, 0x3c040001,
62620x8c846ee0, 0x8ce20000, 0x8ce30004, 0xac820000,
62630xac830004, 0x8ce20008, 0xac820008, 0x94e2000e,
62640xa482000e, 0x8ce20010, 0x24e50020, 0xac820010,
62650x8ce20014, 0x240a0001, 0xa01821, 0xac820014,
62660x276247e0, 0x43102b, 0x54400001, 0x27634000,
62670x602821, 0x31620100, 0x5440001d, 0x31621000,
62680x11a00009, 0x31a20800, 0x10400004, 0x25020020,
62690x8f8200a8, 0xa5e20000, 0x25020020, 0xaf820124,
62700x8f880124, 0x6821, 0x11800011, 0x31621000,
62710x3c040001, 0x8c846ee4, 0x8c820000, 0x8c830004,
62720xaf820080, 0xaf830084, 0x8c820008, 0xaf8200a4,
62730x9482000e, 0xaf8200ac, 0x8c820010, 0x6021,
62740xaf8200a0, 0x8c8d0010, 0x8c8f0014, 0x31621000,
62750x1440ff82, 0x0, 0x1120000f, 0x31220800,
62760x10400004, 0x3c020002, 0x8f8200b8, 0xa5c20000,
62770x3c020002, 0x1221024, 0x10400004, 0x24e20020,
62780x8f8200b4, 0xaf8200d4, 0x24e20020, 0xaf820104,
62790x8f870104, 0x4821, 0x1140ff70, 0x0,
62800x3c040001, 0x8c846ee0, 0x8c820000, 0x8c830004,
62810xaf820090, 0xaf830094, 0x8c820008, 0xaf8200b4,
62820x9482000e, 0xaf82009c, 0x8c820010, 0x5021,
62830xaf8200b0, 0x8c890010, 0x1000ff60, 0x8c8e0014,
62840x3e00008, 0x0, 0x6021, 0x5821,
62850x3021, 0x2821, 0x6821, 0x5021,
62860x7821, 0x7021, 0x8f880124, 0x8f870104,
62870x3c180100, 0x1580002e, 0x8f89011c, 0x11a00014,
62880x31220800, 0x8f820120, 0x10460029, 0x0,
62890x3c040001, 0x8c846ee4, 0x8cc20000, 0x8cc30004,
62900xac820000, 0xac830004, 0x8cc20008, 0xac820008,
62910x94c2000e, 0xa482000e, 0x8cc20010, 0x240c0001,
62920xac820010, 0x8cc20014, 0x10000012, 0x24c60020,
62930x10400017, 0x0, 0x3c040001, 0x8c846ee4,
62940x8d020000, 0x8d030004, 0xac820000, 0xac830004,
62950x8d020008, 0xac820008, 0x9502000e, 0xa482000e,
62960x8d020010, 0x25060020, 0xac820010, 0x8d020014,
62970x240c0001, 0xc01821, 0xac820014, 0x27624fe0,
62980x43102b, 0x54400001, 0x27634800, 0x603021,
62990x1560002f, 0x31220100, 0x11400014, 0x31228000,
63000x8f820100, 0x1045002a, 0x31220100, 0x3c040001,
63010x8c846ee0, 0x8ca20000, 0x8ca30004, 0xac820000,
63020xac830004, 0x8ca20008, 0xac820008, 0x94a2000e,
63030xa482000e, 0x8ca20010, 0x240b0001, 0xac820010,
63040x8ca20014, 0x10000012, 0x24a50020, 0x10400018,
63050x31220100, 0x3c040001, 0x8c846ee0, 0x8ce20000,
63060x8ce30004, 0xac820000, 0xac830004, 0x8ce20008,
63070xac820008, 0x94e2000e, 0xa482000e, 0x8ce20010,
63080x24e50020, 0xac820010, 0x8ce20014, 0x240b0001,
63090xa01821, 0xac820014, 0x276247e0, 0x43102b,
63100x54400001, 0x27634000, 0x602821, 0x31220100,
63110x5440001d, 0x31221000, 0x11a00009, 0x31a20800,
63120x10400004, 0x25020020, 0x8f8200a8, 0xa5e20000,
63130x25020020, 0xaf820124, 0x8f880124, 0x6821,
63140x11800011, 0x31221000, 0x3c040001, 0x8c846ee4,
63150x8c820000, 0x8c830004, 0xaf820080, 0xaf830084,
63160x8c820008, 0xaf8200a4, 0x9482000e, 0xaf8200ac,
63170x8c820010, 0x6021, 0xaf8200a0, 0x8c8d0010,
63180x8c8f0014, 0x31221000, 0x14400022, 0x0,
63190x1140000f, 0x31420800, 0x10400004, 0x3c020002,
63200x8f8200b8, 0xa5c20000, 0x3c020002, 0x1421024,
63210x10400004, 0x24e20020, 0x8f8200b4, 0xaf8200d4,
63220x24e20020, 0xaf820104, 0x8f870104, 0x5021,
63230x11600010, 0x0, 0x3c040001, 0x8c846ee0,
63240x8c820000, 0x8c830004, 0xaf820090, 0xaf830094,
63250x8c820008, 0xaf8200b4, 0x9482000e, 0xaf82009c,
63260x8c820010, 0x5821, 0xaf8200b0, 0x8c8a0010,
63270x8c8e0014, 0x8f820070, 0x3c031000, 0x431024,
63280x1040ff5c, 0x0, 0x8f820054, 0x24420005,
63290xaf820078, 0x8c040234, 0x10800016, 0x1821,
63300x3c020001, 0x571021, 0x8c4240e8, 0x24420005,
63310x3c010001, 0x370821, 0xac2240e8, 0x3c020001,
63320x571021, 0x8c4240e8, 0x44102b, 0x14400009,
63330x24020001, 0x3c030080, 0x3c010001, 0x370821,
63340xac2040e8, 0x3c010001, 0x370821, 0x1000000c,
63350xa02240f0, 0x3c020001, 0x571021, 0x904240f0,
63360x14400006, 0x3c020080, 0x3c020001, 0x571021,
63370x904240f1, 0x10400002, 0x3c020080, 0x621825,
63380x8c040230, 0x10800013, 0x0, 0x3c020001,
63390x571021, 0x8c4240ec, 0x24420005, 0x3c010001,
63400x370821, 0xac2240ec, 0x3c020001, 0x571021,
63410x8c4240ec, 0x44102b, 0x14400006, 0x0,
63420x3c010001, 0x370821, 0xac2040ec, 0x10000006,
63430x781825, 0x3c020001, 0x571021, 0x904240f2,
63440x54400001, 0x781825, 0x1060ff1a, 0x0,
63450x8f420000, 0x10400007, 0x0, 0xaf80004c,
63460x8f82004c, 0x1040fffd, 0x0, 0x10000005,
63470x0, 0xaf800048, 0x8f820048, 0x1040fffd,
63480x0, 0x8f820060, 0x431025, 0xaf820060,
63490x8f420000, 0x10400003, 0x0, 0x1000ff05,
63500xaf80004c, 0x1000ff03, 0xaf800048, 0x3e00008,
63510x0, 0x0, 0x0, 0x3c020001,
63520x8c426d28, 0x27bdffe8, 0xafbf0014, 0x14400012,
63530xafb00010, 0x3c100001, 0x26106f90, 0x2002021,
63540xc002ba8, 0x24052000, 0x26021fe0, 0x3c010001,
63550xac226eec, 0x3c010001, 0xac226ee8, 0xac020250,
63560x24022000, 0xac100254, 0xac020258, 0x24020001,
63570x3c010001, 0xac226d28, 0x8fbf0014, 0x8fb00010,
63580x3e00008, 0x27bd0018, 0x3c090001, 0x8d296eec,
63590x8c820000, 0x8fa30010, 0x8fa80014, 0xad220000,
63600x8c820004, 0xad250008, 0xad220004, 0x8f820054,
63610xad260010, 0xad270014, 0xad230018, 0xad28001c,
63620xad22000c, 0x2529ffe0, 0x3c020001, 0x24426f90,
63630x122102b, 0x10400003, 0x0, 0x3c090001,
63640x8d296ee8, 0x3c020001, 0x8c426d10, 0xad220000,
63650x3c020001, 0x8c426d10, 0x3c010001, 0xac296eec,
63660xad220004, 0xac090250, 0x3e00008, 0x0,
63670x27bdffd0, 0xafb00010, 0x3c100001, 0x8e106eec,
63680x3c020001, 0x8c426d10, 0xafb10014, 0x808821,
63690xafbe0024, 0x8fbe0040, 0x8fa40048, 0xafb20018,
63700xa09021, 0xafbf0028, 0xafb50020, 0xafb3001c,
63710xae020000, 0x3c020001, 0x8c426d10, 0xc09821,
63720xe0a821, 0x10800006, 0xae020004, 0x26050008,
63730xc002bb3, 0x24060018, 0x10000005, 0x2610ffe0,
63740x26040008, 0xc002ba8, 0x24050018, 0x2610ffe0,
63750x3c030001, 0x24636f90, 0x203102b, 0x10400003,
63760x0, 0x3c100001, 0x8e106ee8, 0x8e220000,
63770xae020000, 0x8e220004, 0xae120008, 0xae020004,
63780x8f820054, 0xae130010, 0xae150014, 0xae1e0018,
63790x8fa80044, 0xae08001c, 0xae02000c, 0x2610ffe0,
63800x203102b, 0x10400003, 0x0, 0x3c100001,
63810x8e106ee8, 0x3c020001, 0x8c426d10, 0xae020000,
63820x3c020001, 0x8c426d10, 0x3c010001, 0xac306eec,
63830xae020004, 0xac100250, 0x8fbf0028, 0x8fbe0024,
63840x8fb50020, 0x8fb3001c, 0x8fb20018, 0x8fb10014,
63850x8fb00010, 0x3e00008, 0x27bd0030, 0x851821,
63860x83102b, 0x10400006, 0x0, 0xac800000,
63870x24840004, 0x83102b, 0x5440fffd, 0xac800000,
63880x3e00008, 0x0, 0xa61821, 0xa3102b,
63890x10400007, 0x0, 0x8c820000, 0xaca20000,
63900x24a50004, 0xa3102b, 0x1440fffb, 0x24840004,
63910x3e00008, 0x0, 0x861821, 0x83102b,
63920x10400007, 0x0, 0x8ca20000, 0xac820000,
63930x24840004, 0x83102b, 0x1440fffb, 0x24a50004,
63940x3e00008, 0x0, 0x63080, 0x861821,
63950x83102b, 0x10400006, 0x0, 0xac850000,
63960x24840004, 0x83102b, 0x5440fffd, 0xac850000,
63970x3e00008, 0x0, 0x0, 0x26e50028,
63980xa03021, 0x274301c0, 0x8f4d0358, 0x8f47035c,
63990x8f480360, 0x8f490364, 0x8f4a0368, 0x8f4b0204,
64000x8f4c0200, 0x24640400, 0x64102b, 0x10400008,
64010x3c0208ff, 0x8cc20000, 0xac620000, 0x24630004,
64020x64102b, 0x1440fffb, 0x24c60004, 0x3c0208ff,
64030x3442ffff, 0x3c03c0ff, 0xaf4d0358, 0xaf47035c,
64040xaf480360, 0xaf490364, 0xaf4a0368, 0xaf4b0204,
64050xaf4c0200, 0x8f840220, 0x3463ffff, 0x8f860200,
64060x821024, 0x34420004, 0xc31824, 0x34630004,
64070xaf820220, 0xaf830200, 0x8ca20214, 0xac020084,
64080x8ca20218, 0xac020088, 0x8ca2021c, 0xac02008c,
64090x8ca20220, 0xac020090, 0x8ca20224, 0xac020094,
64100x8ca20228, 0xac020098, 0x8ca2022c, 0xac02009c,
64110x8ca20230, 0xac0200a0, 0x8ca20234, 0xac0200a4,
64120x8ca20238, 0xac0200a8, 0x8ca2023c, 0xac0200ac,
64130x8ca20240, 0xac0200b0, 0x8ca20244, 0xac0200b4,
64140x8ca20248, 0xac0200b8, 0x8ca2024c, 0xac0200bc,
64150x8ca2001c, 0xac020080, 0x8ca20018, 0xac0200c0,
64160x8ca20020, 0xac0200cc, 0x8ca20024, 0xac0200d0,
64170x8ca201d0, 0xac0200e0, 0x8ca201d4, 0xac0200e4,
64180x8ca201d8, 0xac0200e8, 0x8ca201dc, 0xac0200ec,
64190x8ca201e0, 0xac0200f0, 0x8ca20098, 0x8ca3009c,
64200xac0300fc, 0x8ca200a8, 0x8ca300ac, 0xac0300f4,
64210x8ca200a0, 0x8ca300a4, 0x30840004, 0xac0300f8,
64220x14800007, 0x30c20004, 0x8f820220, 0x3c0308ff,
64230x3463fffb, 0x431024, 0xaf820220, 0x30c20004,
64240x14400006, 0x0, 0x8f820200, 0x3c03c0ff,
64250x3463fffb, 0x431024, 0xaf820200, 0x8f4202dc,
64260xa34005c5, 0x24420001, 0xaf4202dc, 0x8f4202dc,
64270x3e00008, 0x0, 0x27bdffd8, 0xafbf0024,
64280xafb00020, 0x8f430024, 0x8f420020, 0x10620038,
64290x0, 0x8f430020, 0x8f420024, 0x622023,
64300x4810003, 0x0, 0x8f420040, 0x822021,
64310x8f430030, 0x8f420024, 0x43102b, 0x14400005,
64320x0, 0x8f430040, 0x8f420024, 0x10000005,
64330x621023, 0x8f420030, 0x8f430024, 0x431023,
64340x2442ffff, 0x406021, 0x8c102a, 0x54400001,
64350x806021, 0x8f4a0024, 0x8f490040, 0x8f480024,
64360x8f440180, 0x8f450184, 0x8f460024, 0x8f4b001c,
64370x24070001, 0xafa70010, 0x84100, 0x1001821,
64380x14c5021, 0x2529ffff, 0x1498024, 0xafb00014,
64390x8f470014, 0x1021, 0x63100, 0xafa70018,
64400xa32821, 0xa3382b, 0x822021, 0x872021,
64410x8f420108, 0x1663021, 0x40f809, 0xc3900,
64420x54400001, 0xaf500024, 0x8f430024, 0x8f420020,
64430x14620018, 0x0, 0x8f420000, 0x10400007,
64440x0, 0xaf80004c, 0x8f82004c, 0x1040fffd,
64450x0, 0x10000005, 0x0, 0xaf800048,
64460x8f820048, 0x1040fffd, 0x0, 0x8f820060,
64470x2403ffef, 0x431024, 0xaf820060, 0x8f420000,
64480x10400003, 0x0, 0x10000002, 0xaf80004c,
64490xaf800048, 0x8fbf0024, 0x8fb00020, 0x3e00008,
64500x27bd0028, 0x3e00008, 0x0, 0x27bdffc0,
64510x32c20020, 0xafbf0038, 0xafb30034, 0xafb20030,
64520xafb1002c, 0x10400004, 0xafb00028, 0x8f530028,
64530x10000002, 0x0, 0x8f530020, 0x8f420030,
64540x105300eb, 0x21100, 0x8f43001c, 0x628021,
64550x8e040000, 0x8e050004, 0x96120008, 0x8f420090,
64560x9611000a, 0x3246ffff, 0x46102a, 0x10400017,
64570x0, 0x8f8200d8, 0x8f430098, 0x431023,
64580x2442dcbe, 0xaf420090, 0x8f420090, 0x2842dcbf,
64590x10400005, 0x0, 0x8f420090, 0x8f430144,
64600x431021, 0xaf420090, 0x8f420090, 0x46102a,
64610x10400006, 0x0, 0x8f420348, 0x24420001,
64620xaf420348, 0x100000e1, 0x8f420348, 0x8f8200fc,
64630x14400006, 0x0, 0x8f420344, 0x24420001,
64640xaf420344, 0x100000d9, 0x8f420344, 0x934205c2,
64650x1040000b, 0x32c20008, 0x10400008, 0x32220200,
64660x10400006, 0x3c034000, 0x9602000e, 0xaf4300ac,
64670x21400, 0x10000002, 0xaf4200b0, 0xaf4000ac,
64680x32220004, 0x1040007f, 0x32220800, 0x10400003,
64690x3247ffff, 0x10000002, 0x24020020, 0x24020004,
64700xafa20010, 0x8f420030, 0xafa20014, 0x8f420010,
64710x3c030002, 0x431025, 0xafa20018, 0x8f460098,
64720x8f420108, 0x40f809, 0x0, 0x104000b7,
64730x0, 0x8f42009c, 0x8f430094, 0x2421021,
64740xaf42009c, 0xae03000c, 0x8f4200ac, 0x10400008,
64750x3c034000, 0x8f420094, 0x431025, 0xafa20020,
64760x8f42009c, 0x8f4300b0, 0x10000004, 0x431025,
64770x8f420094, 0xafa20020, 0x8f42009c, 0xafa20024,
64780x8f8200fc, 0x8fa30020, 0x8fa40024, 0xac430000,
64790xac440004, 0x24420008, 0xaf8200f0, 0x8f42009c,
64800x8f440270, 0x8f450274, 0x401821, 0x1021,
64810xa32821, 0xa3302b, 0x822021, 0x862021,
64820x32230060, 0x24020040, 0xaf440270, 0xaf450274,
64830x10620017, 0x2c620041, 0x10400005, 0x24020020,
64840x10620008, 0x24020001, 0x10000026, 0x0,
64850x24020060, 0x10620019, 0x24020001, 0x10000021,
64860x0, 0x8f420278, 0x8f43027c, 0x24630001,
64870x2c640001, 0x441021, 0xaf420278, 0xaf43027c,
64880x8f420278, 0x8f43027c, 0x10000016, 0x24020001,
64890x8f420280, 0x8f430284, 0x24630001, 0x2c640001,
64900x441021, 0xaf420280, 0xaf430284, 0x8f420280,
64910x8f430284, 0x1000000b, 0x24020001, 0x8f420288,
64920x8f43028c, 0x24630001, 0x2c640001, 0x441021,
64930xaf420288, 0xaf43028c, 0x8f420288, 0x8f43028c,
64940x24020001, 0xa34205c2, 0x8f420098, 0x3244ffff,
64950x2406fff8, 0x8f45013c, 0x441021, 0x24420007,
64960x461024, 0x24840007, 0xaf420094, 0x8f420090,
64970x8f430094, 0x862024, 0x441023, 0x65182b,
64980x14600005, 0xaf420090, 0x8f420094, 0x8f430144,
64990x431023, 0xaf420094, 0x8f420094, 0x10000023,
65000xaf40009c, 0x3247ffff, 0x50e00022, 0x32c20020,
65010x14400002, 0x24020010, 0x24020002, 0xafa20010,
65020x8f420030, 0xafa20014, 0x8f420010, 0xafa20018,
65030x8f460098, 0x8f420108, 0x40f809, 0x0,
65040x1040003a, 0x3245ffff, 0x8f420098, 0x8f430090,
65050x8f46013c, 0x451021, 0xaf420098, 0x8f42009c,
65060x8f440098, 0xa34005c2, 0x651823, 0xaf430090,
65070x451021, 0x86202b, 0x14800005, 0xaf42009c,
65080x8f420098, 0x8f430144, 0x431023, 0xaf420098,
65090x32c20020, 0x10400005, 0x0, 0x8f420358,
65100x2442ffff, 0xaf420358, 0x8f420358, 0x8f420030,
65110x8f430040, 0x24420001, 0x2463ffff, 0x431024,
65120xaf420030, 0x8f420030, 0x14530018, 0x0,
65130x8f420000, 0x10400007, 0x0, 0xaf80004c,
65140x8f82004c, 0x1040fffd, 0x0, 0x10000005,
65150x0, 0xaf800048, 0x8f820048, 0x1040fffd,
65160x0, 0x8f820060, 0x2403fff7, 0x431024,
65170xaf820060, 0x8f420000, 0x10400003, 0x0,
65180x10000002, 0xaf80004c, 0xaf800048, 0x8fbf0038,
65190x8fb30034, 0x8fb20030, 0x8fb1002c, 0x8fb00028,
65200x3e00008, 0x27bd0040, 0x3e00008, 0x0,
65210x27bdffd0, 0x32c20020, 0xafbf002c, 0xafb20028,
65220xafb10024, 0x10400004, 0xafb00020, 0x8f520028,
65230x10000002, 0x0, 0x8f520020, 0x8f420030,
65240x105200b5, 0x21100, 0x8f43001c, 0x628021,
65250x8e040000, 0x8e050004, 0x96110008, 0x8f420090,
65260x9607000a, 0x3226ffff, 0x46102a, 0x10400017,
65270x0, 0x8f8200d8, 0x8f430098, 0x431023,
65280x2442dc46, 0xaf420090, 0x8f420090, 0x2842dc47,
65290x10400005, 0x0, 0x8f420090, 0x8f430144,
65300x431021, 0xaf420090, 0x8f420090, 0x46102a,
65310x10400006, 0x0, 0x8f420348, 0x24420001,
65320xaf420348, 0x100000ab, 0x8f420348, 0x8f8600fc,
65330x10c0000c, 0x0, 0x8f8200f4, 0x2403fff8,
65340x431024, 0x461023, 0x218c3, 0x58600001,
65350x24630100, 0x8f42008c, 0x43102b, 0x14400006,
65360x712c2, 0x8f420344, 0x24420001, 0xaf420344,
65370x10000098, 0x8f420344, 0x934305c2, 0x1060000f,
65380x30460001, 0x8f420010, 0x34480400, 0x32c20008,
65390x10400008, 0x30e20200, 0x10400006, 0x3c034000,
65400x9602000e, 0xaf4300ac, 0x21400, 0x10000004,
65410xaf4200b0, 0x10000002, 0xaf4000ac, 0x8f480010,
65420x30e20004, 0x10400045, 0x3227ffff, 0x8f4900ac,
65430x11200005, 0x30c200ff, 0x14400006, 0x24020040,
65440x10000004, 0x24020008, 0x14400002, 0x24020020,
65450x24020004, 0xafa20010, 0x8f430030, 0x11200004,
65460xafa30014, 0x8f4200b0, 0x621025, 0xafa20014,
65470x3c020002, 0x1021025, 0xafa20018, 0x8f460098,
65480x8f420108, 0x40f809, 0x0, 0x10400069,
65490x3224ffff, 0x8f42008c, 0x8f430094, 0x24420001,
65500xaf42008c, 0x24020001, 0xae03000c, 0xa34205c2,
65510x8f420098, 0x2406fff8, 0x8f45013c, 0x441021,
65520x24420007, 0x461024, 0x24840007, 0xaf420094,
65530x8f420090, 0x8f430094, 0x862024, 0x441023,
65540x65182b, 0x14600005, 0xaf420090, 0x8f420094,
65550x8f430144, 0x431023, 0xaf420094, 0x8f430094,
65560x8f420140, 0x43102b, 0x10400009, 0x0,
65570x8f43013c, 0x8f440094, 0x8f420090, 0x8f450138,
65580x641823, 0x431023, 0xaf420090, 0xaf450094,
65590x8f420094, 0x1000001f, 0xaf420098, 0x10e0001d,
65600x30c200ff, 0x14400002, 0x24020010, 0x24020002,
65610xafa20010, 0x8f420030, 0xafa80018, 0xafa20014,
65620x8f460098, 0x8f420108, 0x40f809, 0x0,
65630x10400030, 0x3225ffff, 0x8f420098, 0x8f44013c,
65640x451021, 0xaf420098, 0x8f420090, 0x8f430098,
65650xa34005c2, 0x451023, 0x64182b, 0x14600005,
65660xaf420090, 0x8f420098, 0x8f430144, 0x431023,
65670xaf420098, 0x8f420030, 0x8f430040, 0x24420001,
65680x2463ffff, 0x431024, 0xaf420030, 0x8f420030,
65690x14520018, 0x0, 0x8f420000, 0x10400007,
65700x0, 0xaf80004c, 0x8f82004c, 0x1040fffd,
65710x0, 0x10000005, 0x0, 0xaf800048,
65720x8f820048, 0x1040fffd, 0x0, 0x8f820060,
65730x2403fff7, 0x431024, 0xaf820060, 0x8f420000,
65740x10400003, 0x0, 0x10000002, 0xaf80004c,
65750xaf800048, 0x8fbf002c, 0x8fb20028, 0x8fb10024,
65760x8fb00020, 0x3e00008, 0x27bd0030, 0x3e00008,
65770x0, 0x27bdffd8, 0x3c020001, 0x34422ec0,
65780xafbf0020, 0x8f4300f0, 0x8f840108, 0x2e21021,
65790x54620004, 0x24620008, 0x3c020001, 0x34422cc0,
65800x2e21021, 0x401821, 0xaf4300f0, 0xac600000,
65810x8f4200ec, 0x8c660004, 0x14620004, 0x3c020001,
65820x24820020, 0x1000000f, 0xaf820108, 0x8f4300f0,
65830x34422ec0, 0x2e21021, 0x54620004, 0x24620008,
65840x3c020001, 0x34422cc0, 0x2e21021, 0x401821,
65850x8c620004, 0x21140, 0x821021, 0xaf820108,
65860xac600000, 0x8c850018, 0x30a20036, 0x1040006c,
65870x30a20001, 0x8c82001c, 0x8f430040, 0x8f440034,
65880x24420001, 0x2463ffff, 0x431024, 0x862021,
65890xaf42002c, 0x30a20030, 0x14400006, 0xaf440034,
65900x8f420034, 0x8c03023c, 0x43102b, 0x144000b4,
65910x0, 0x32c20010, 0x10400028, 0x24070008,
65920x8f440170, 0x8f450174, 0x8f43002c, 0x8f48000c,
65930x8f860120, 0x24020080, 0xafa20010, 0xafa30014,
65940xafa80018, 0x8f42010c, 0x40f809, 0x24c6001c,
65950x14400011, 0x24020001, 0x3c010001, 0x370821,
65960xa02240f1, 0x8f820124, 0xafa20010, 0x8f820128,
65970x3c040001, 0x248467c4, 0xafa20014, 0x8f46002c,
65980x8f870120, 0x3c050009, 0xc002b3b, 0x34a51100,
65990x10000036, 0x0, 0x8f420300, 0x8f43002c,
66000x24420001, 0xaf420300, 0x8f420300, 0x24020001,
66010xa34205c1, 0x10000026, 0xaf430038, 0x8f440170,
66020x8f450174, 0x8f43002c, 0x8f48000c, 0x8f860120,
66030x24020020, 0xafa20010, 0xafa30014, 0xafa80018,
66040x8f42010c, 0x40f809, 0x24c6001c, 0x14400011,
66050x24020001, 0x3c010001, 0x370821, 0xa02240f0,
66060x8f820124, 0xafa20010, 0x8f820128, 0x3c040001,
66070x248467b8, 0xafa20014, 0x8f46002c, 0x8f870120,
66080x3c050009, 0xc002b3b, 0x34a50900, 0x1000000f,
66090x0, 0x8f420300, 0x24420001, 0xaf420300,
66100x8f420300, 0x8f42002c, 0xa34005c1, 0xaf420038,
66110x3c010001, 0x370821, 0xa02040f1, 0x3c010001,
66120x370821, 0xa02040f0, 0xaf400034, 0x8f420314,
66130x24420001, 0xaf420314, 0x10000059, 0x8f420314,
66140x10400022, 0x30a27000, 0x8c85001c, 0x8f420028,
66150xa22023, 0x4810003, 0x0, 0x8f420040,
66160x822021, 0x8f420358, 0x8f430000, 0xaf450028,
66170x441021, 0x10600007, 0xaf420358, 0xaf80004c,
66180x8f82004c, 0x1040fffd, 0x0, 0x10000005,
66190x0, 0xaf800048, 0x8f820048, 0x1040fffd,
66200x0, 0x8f820060, 0x34420008, 0xaf820060,
66210x8f420000, 0x10400003, 0x0, 0x10000038,
66220xaf80004c, 0x10000036, 0xaf800048, 0x1040002f,
66230x30a21000, 0x1040000c, 0x30a24000, 0x8c83001c,
66240x8f420050, 0x622023, 0x4820001, 0x24840200,
66250x8f42035c, 0x441021, 0xaf42035c, 0x8f420368,
66260x1000001a, 0xaf430050, 0x1040000c, 0x32c28000,
66270x8c83001c, 0x8f420070, 0x622023, 0x4820001,
66280x24840400, 0x8f420364, 0x441021, 0xaf420364,
66290x8f420368, 0x1000000d, 0xaf430070, 0x1040000e,
66300x3c020800, 0x8c83001c, 0x8f420060, 0x622023,
66310x4820001, 0x24840100, 0x8f420360, 0x441021,
66320xaf420360, 0x8f420368, 0xaf430060, 0x441021,
66330xaf420368, 0x3c020800, 0x2c21024, 0x50400008,
66340x36940040, 0x10000006, 0x0, 0x30a20100,
66350x10400003, 0x0, 0xc002bd8, 0x0,
66360x8fbf0020, 0x3e00008, 0x27bd0028, 0x3e00008,
66370x0, 0x27bdffa8, 0xafbf0050, 0xafbe004c,
66380xafb50048, 0xafb30044, 0xafb20040, 0xafb1003c,
66390xafb00038, 0x8f910108, 0x26220020, 0xaf820108,
66400x8e320018, 0xa821, 0x32420024, 0x104001ba,
66410xf021, 0x8e26001c, 0x8f43001c, 0x61100,
66420x621821, 0x8c70000c, 0x9604000c, 0x962d0016,
66430x9473000a, 0x2c8305dd, 0x38828870, 0x2c420001,
66440x621825, 0x10600015, 0x2821, 0x32c20040,
66450x10400015, 0x24020800, 0x96030014, 0x14620012,
66460x3402aaaa, 0x9603000e, 0x14620007, 0x2021,
66470x96030010, 0x24020300, 0x14620004, 0x801021,
66480x96020012, 0x2c440001, 0x801021, 0x54400006,
66490x24050016, 0x10000004, 0x0, 0x24020800,
66500x50820001, 0x2405000e, 0x934205c3, 0x14400008,
66510x5821, 0x240b0001, 0x32620180, 0xaf4500a8,
66520xaf5000a0, 0x10400002, 0xaf4600a4, 0xa34b05c3,
66530x10a00085, 0x2054021, 0x91020000, 0x3821,
66540x3042000f, 0x25080, 0x32c20002, 0x10400012,
66550x10a1821, 0x32620002, 0x10400010, 0x32c20001,
66560x1002021, 0x94820000, 0x24840002, 0xe23821,
66570x83102b, 0x1440fffb, 0x30e2ffff, 0x71c02,
66580x623821, 0x71c02, 0x30e2ffff, 0x623821,
66590x71027, 0xa502000a, 0x32c20001, 0x1040006a,
66600x32620001, 0x10400068, 0x0, 0x8f4200a8,
66610x10400065, 0x0, 0x8f4200a0, 0x8f4300a8,
66620x431021, 0x904c0009, 0x318900ff, 0x39230006,
66630x3182b, 0x39220011, 0x2102b, 0x621824,
66640x1060000c, 0x3c050006, 0x8f4200a4, 0x3c040001,
66650x248467d4, 0xafa20010, 0x8f4200a0, 0x34a54600,
66660x1203821, 0xc002b3b, 0xafa20014, 0x1000004e,
66670x0, 0x32c20004, 0x14400013, 0x2821,
66680x316200ff, 0x14400004, 0x0, 0x95020002,
66690x1000000d, 0x4a2823, 0x9505000c, 0x9502000e,
66700x95030010, 0xa22821, 0xa32821, 0x95030012,
66710x91040009, 0x95020002, 0xa32821, 0xa42821,
66720x4a1023, 0xa22821, 0x2002021, 0x94820000,
66730x24840002, 0xe23821, 0x88102b, 0x1440fffb,
66740x71c02, 0x30e2ffff, 0x623821, 0x71c02,
66750x30e2ffff, 0x623821, 0x1a52821, 0x51c02,
66760x30a2ffff, 0x622821, 0x51c02, 0x30a2ffff,
66770x622821, 0xa72823, 0x51402, 0xa22821,
66780x30a5ffff, 0x50a00001, 0x3405ffff, 0x316200ff,
66790x14400008, 0x318300ff, 0x8f4300a0, 0x8f4200a8,
66800x624021, 0x91020000, 0x3042000f, 0x25080,
66810x318300ff, 0x24020006, 0x14620003, 0x10a1021,
66820x10000002, 0x24440010, 0x24440006, 0x316200ff,
66830x14400006, 0x0, 0x94820000, 0xa22821,
66840x51c02, 0x30a2ffff, 0x622821, 0x934205c3,
66850x10400003, 0x32620100, 0x50400003, 0xa4850000,
66860x52827, 0xa4850000, 0x9622000e, 0x8f43009c,
66870x621821, 0x32a200ff, 0x10400007, 0xaf43009c,
66880x3c024000, 0x2021025, 0xafa20020, 0x8f42009c,
66890x10000003, 0x5e1025, 0xafb00020, 0x8f42009c,
66900xafa20024, 0x32620080, 0x10400010, 0x32620100,
66910x8f4200b4, 0x24430001, 0x210c0, 0x571021,
66920xaf4300b4, 0x8fa30020, 0x8fa40024, 0x3c010001,
66930x220821, 0xac2338e8, 0x3c010001, 0x220821,
66940xac2438ec, 0x100000a5, 0x32c20020, 0x10400064,
66950x0, 0x8f4200b4, 0x24430001, 0x210c0,
66960x571021, 0xaf4300b4, 0x8fa30020, 0x8fa40024,
66970x3c010001, 0x220821, 0xac2338e8, 0x3c010001,
66980x220821, 0xac2438ec, 0x8f4200b4, 0x10400051,
66990x3821, 0x3c090001, 0x352938e8, 0x3c08001f,
67000x3508ffff, 0x240bffff, 0x340affff, 0x710c0,
67010x571021, 0x491021, 0x8c430000, 0x8c440004,
67020xafa30028, 0xafa4002c, 0x8f8200fc, 0x8fa30028,
67030x8fa4002c, 0xac430000, 0xac440004, 0x24420008,
67040xaf8200f0, 0x8f42008c, 0x2442ffff, 0xaf42008c,
67050x97a2002e, 0x8f440270, 0x8f450274, 0x401821,
67060x1021, 0xa32821, 0xa3302b, 0x822021,
67070x862021, 0xaf440270, 0xaf450274, 0x8fa20028,
67080x481024, 0x90430000, 0x30630001, 0x1460000b,
67090x402021, 0x8f420278, 0x8f43027c, 0x24630001,
67100x2c640001, 0x441021, 0xaf420278, 0xaf43027c,
67110x8f420278, 0x1000001a, 0x8f43027c, 0x8c820000,
67120x144b000e, 0x0, 0x94820004, 0x144a000b,
67130x0, 0x8f420288, 0x8f43028c, 0x24630001,
67140x2c640001, 0x441021, 0xaf420288, 0xaf43028c,
67150x8f420288, 0x1000000a, 0x8f43028c, 0x8f420280,
67160x8f430284, 0x24630001, 0x2c640001, 0x441021,
67170xaf420280, 0xaf430284, 0x8f420280, 0x8f430284,
67180x8f4200b4, 0x24e70001, 0xe2102b, 0x1440ffb8,
67190x710c0, 0xa34005c3, 0x1000003f, 0xaf4000b4,
67200x8f8200fc, 0x8fa30020, 0x8fa40024, 0xac430000,
67210xac440004, 0x24420008, 0xaf8200f0, 0x8f42009c,
67220x8f46008c, 0x8f440270, 0x8f450274, 0x401821,
67230x1021, 0x24c6ffff, 0xaf46008c, 0xa32821,
67240xa3302b, 0x822021, 0x862021, 0xaf440270,
67250xaf450274, 0x92020000, 0x30420001, 0x1440000c,
67260x2402ffff, 0x8f420278, 0x8f43027c, 0x24630001,
67270x2c640001, 0x441021, 0xaf420278, 0xaf43027c,
67280x8f420278, 0x8f43027c, 0x1000001c, 0x32c20020,
67290x8e030000, 0x1462000f, 0x3402ffff, 0x96030004,
67300x1462000c, 0x0, 0x8f420288, 0x8f43028c,
67310x24630001, 0x2c640001, 0x441021, 0xaf420288,
67320xaf43028c, 0x8f420288, 0x8f43028c, 0x1000000b,
67330x32c20020, 0x8f420280, 0x8f430284, 0x24630001,
67340x2c640001, 0x441021, 0xaf420280, 0xaf430284,
67350x8f420280, 0x8f430284, 0x32c20020, 0x10400005,
67360xaf40009c, 0x8f420358, 0x2442ffff, 0xaf420358,
67370x8f420358, 0x8e22001c, 0x8f430040, 0x24420001,
67380x2463ffff, 0x431024, 0xaf42002c, 0x32420060,
67390x14400008, 0x32c20010, 0x8f420034, 0x24420001,
67400xaf420034, 0x8c03023c, 0x43102b, 0x14400102,
67410x32c20010, 0x10400018, 0x24070008, 0x8f440170,
67420x8f450174, 0x8f43002c, 0x8f48000c, 0x8f860120,
67430x24020080, 0xafa20010, 0xafa30014, 0xafa80018,
67440x8f42010c, 0x40f809, 0x24c6001c, 0x10400047,
67450x24020001, 0x8f420300, 0x8f43002c, 0x24420001,
67460xaf420300, 0x8f420300, 0x24020001, 0xa34205c1,
67470x1000007c, 0xaf430038, 0x8f440170, 0x8f450174,
67480x8f43002c, 0x8f48000c, 0x8f860120, 0x24020020,
67490xafa20010, 0xafa30014, 0xafa80018, 0x8f42010c,
67500x40f809, 0x24c6001c, 0x10400057, 0x24020001,
67510x10000065, 0x0, 0x32420012, 0x10400075,
67520x32420001, 0x9622000e, 0x8f43009c, 0x621821,
67530x32c20020, 0x10400005, 0xaf43009c, 0x8f420358,
67540x2442ffff, 0xaf420358, 0x8f420358, 0x8e22001c,
67550x8f430040, 0x24420001, 0x2463ffff, 0x431024,
67560xaf42002c, 0x32420010, 0x14400008, 0x32c20010,
67570x8f420034, 0x24420001, 0xaf420034, 0x8c03023c,
67580x43102b, 0x144000bc, 0x32c20010, 0x10400028,
67590x24070008, 0x8f440170, 0x8f450174, 0x8f43002c,
67600x8f48000c, 0x8f860120, 0x24020080, 0xafa20010,
67610xafa30014, 0xafa80018, 0x8f42010c, 0x40f809,
67620x24c6001c, 0x14400011, 0x24020001, 0x3c010001,
67630x370821, 0xa02240f1, 0x8f820124, 0xafa20010,
67640x8f820128, 0x3c040001, 0x248467c4, 0xafa20014,
67650x8f46002c, 0x8f870120, 0x3c050009, 0xc002b3b,
67660x34a51100, 0x10000036, 0x0, 0x8f420300,
67670x8f43002c, 0x24420001, 0xaf420300, 0x8f420300,
67680x24020001, 0xa34205c1, 0x10000026, 0xaf430038,
67690x8f440170, 0x8f450174, 0x8f43002c, 0x8f48000c,
67700x8f860120, 0x24020020, 0xafa20010, 0xafa30014,
67710xafa80018, 0x8f42010c, 0x40f809, 0x24c6001c,
67720x14400011, 0x24020001, 0x3c010001, 0x370821,
67730xa02240f0, 0x8f820124, 0xafa20010, 0x8f820128,
67740x3c040001, 0x248467b8, 0xafa20014, 0x8f46002c,
67750x8f870120, 0x3c050009, 0xc002b3b, 0x34a50900,
67760x1000000f, 0x0, 0x8f420300, 0x24420001,
67770xaf420300, 0x8f420300, 0x8f42002c, 0xa34005c1,
67780xaf420038, 0x3c010001, 0x370821, 0xa02040f1,
67790x3c010001, 0x370821, 0xa02040f0, 0xaf400034,
67800x8f420314, 0x24420001, 0xaf420314, 0x10000062,
67810x8f420314, 0x10400022, 0x32427000, 0x8e25001c,
67820x8f420028, 0xa22023, 0x4810003, 0x0,
67830x8f420040, 0x822021, 0x8f420358, 0x8f430000,
67840xaf450028, 0x441021, 0x10600007, 0xaf420358,
67850xaf80004c, 0x8f82004c, 0x1040fffd, 0x0,
67860x10000005, 0x0, 0xaf800048, 0x8f820048,
67870x1040fffd, 0x0, 0x8f820060, 0x34420008,
67880xaf820060, 0x8f420000, 0x10400003, 0x0,
67890x10000041, 0xaf80004c, 0x1000003f, 0xaf800048,
67900x1040002f, 0x32421000, 0x1040000c, 0x32424000,
67910x8e23001c, 0x8f420050, 0x622023, 0x4820001,
67920x24840200, 0x8f42035c, 0x441021, 0xaf42035c,
67930x8f420368, 0x1000001a, 0xaf430050, 0x1040000c,
67940x32c28000, 0x8e23001c, 0x8f420070, 0x622023,
67950x4820001, 0x24840400, 0x8f420364, 0x441021,
67960xaf420364, 0x8f420368, 0x1000000d, 0xaf430070,
67970x1040000e, 0x3c020800, 0x8e23001c, 0x8f420060,
67980x622023, 0x4820001, 0x24840100, 0x8f420360,
67990x441021, 0xaf420360, 0x8f420368, 0xaf430060,
68000x441021, 0xaf420368, 0x3c020800, 0x2c21024,
68010x50400011, 0x36940040, 0x1000000f, 0x0,
68020x32420048, 0x10400007, 0x24150001, 0x8e22001c,
68030x3c03ffff, 0x43f024, 0x3042ffff, 0x1000fd75,
68040xae22001c, 0x32420100, 0x10400003, 0x0,
68050xc002bd8, 0x0, 0x8fbf0050, 0x8fbe004c,
68060x8fb50048, 0x8fb30044, 0x8fb20040, 0x8fb1003c,
68070x8fb00038, 0x3e00008, 0x27bd0058, 0x3e00008,
68080x0, 0x0, 0x0, 0x8f8300e4,
68090x8f8200e0, 0x2404fff8, 0x441024, 0x621026,
68100x2102b, 0x21023, 0x3e00008, 0x621024,
68110x3e00008, 0x0, 0x27bdffe0, 0xafbf001c,
68120xafb00018, 0x8f8600c4, 0x8f8400e0, 0x8f8500e4,
68130x2402fff8, 0x821824, 0x10a30009, 0x27623ff8,
68140x14a20002, 0x24a20008, 0x27623000, 0x408021,
68150x16030005, 0x30820004, 0x10400004, 0xc02021,
68160x10000022, 0x1021, 0x8e040000, 0x8f42011c,
68170x14a20003, 0x0, 0x8f420120, 0xaf420114,
68180x8ca30000, 0x8f420148, 0x831823, 0x43102b,
68190x10400003, 0x0, 0x8f420148, 0x621821,
68200x94a20006, 0x24420050, 0x62102b, 0x1440000f,
68210xa01021, 0xafa40010, 0xafa30014, 0x8ca60000,
68220x8ca70004, 0x3c040001, 0xc002b3b, 0x24846894,
68230x8f42020c, 0x24420001, 0xaf42020c, 0x8f42020c,
68240x1021, 0xaf9000e8, 0xaf9000e4, 0x8fbf001c,
68250x8fb00018, 0x3e00008, 0x27bd0020, 0x3e00008,
68260x0, 0x8f8400e0, 0x8f8800c4, 0x8f8300e8,
68270x2402fff8, 0x823824, 0xe32023, 0x2c821000,
68280x50400001, 0x24841000, 0x420c2, 0x801821,
68290x8f440258, 0x8f45025c, 0x1021, 0xa32821,
68300xa3302b, 0x822021, 0x862021, 0xaf440258,
68310xaf45025c, 0x8f8300c8, 0x8f420148, 0x1032023,
68320x82102b, 0x14400004, 0x801821, 0x8f420148,
68330x822021, 0x801821, 0x8f440250, 0x8f450254,
68340x1021, 0xa32821, 0xa3302b, 0x822021,
68350x862021, 0xaf440250, 0xaf450254, 0xaf8800c8,
68360xaf8700e4, 0xaf8700e8, 0x3e00008, 0x0,
68370x27bdff30, 0x240a0001, 0xafbf00c8, 0xafbe00c4,
68380xafb500c0, 0xafb300bc, 0xafb200b8, 0xafb100b4,
68390xafb000b0, 0xa3a00097, 0xafa00044, 0xafaa005c,
68400x934205c4, 0xa7a0008e, 0x1040000a, 0xa7a00086,
68410x8f4b00c4, 0xafab0064, 0x8f4a00c0, 0xafaa006c,
68420x8f4b00cc, 0xafab0074, 0x8f4a00c8, 0x10000129,
68430xafaa007c, 0x8f420114, 0x40f809, 0x0,
68440x403021, 0x10c0034f, 0x0, 0x8cc20000,
68450x8cc30004, 0xafa20020, 0xafa30024, 0x8fab0024,
68460x8faa0020, 0x3162ffff, 0x2442fffc, 0xafa2006c,
68470x3c020006, 0x2c21024, 0xafab007c, 0x14400015,
68480xafaa0064, 0x91420000, 0x30420001, 0x10400011,
68490x2402ffff, 0x8d430000, 0x14620004, 0x3402ffff,
68500x95430004, 0x1062000b, 0x0, 0xc0024bb,
68510x8fa40064, 0x304200ff, 0x14400006, 0x0,
68520x8f420118, 0x40f809, 0x0, 0x1000032d,
68530x0, 0x8fa20024, 0x3c03ffbf, 0x3463ffff,
68540x431024, 0x3c03ffff, 0x431824, 0x14600003,
68550xafa20024, 0x10000040, 0x1821, 0x3c020080,
68560x621024, 0x10400007, 0x0, 0x8f42038c,
68570x24420001, 0xaf42038c, 0x8f42038c, 0x10000036,
68580x24030001, 0x8f420210, 0x24420001, 0xaf420210,
68590x8f420210, 0x3c020001, 0x621024, 0x10400006,
68600x3c020002, 0x8f4201c4, 0x24420001, 0xaf4201c4,
68610x8f4201c4, 0x3c020002, 0x621024, 0x10400006,
68620x3c020004, 0x8f42037c, 0x24420001, 0xaf42037c,
68630x8f42037c, 0x3c020004, 0x621024, 0x10400006,
68640x3c020008, 0x8f420380, 0x24420001, 0xaf420380,
68650x8f420380, 0x3c020008, 0x621024, 0x10400006,
68660x3c020010, 0x8f420384, 0x24420001, 0xaf420384,
68670x8f420384, 0x3c020010, 0x621024, 0x10400006,
68680x3c020020, 0x8f4201c0, 0x24420001, 0xaf4201c0,
68690x8f4201c0, 0x3c020020, 0x621024, 0x10400006,
68700x24030001, 0x8f420388, 0x24420001, 0xaf420388,
68710x8f420388, 0x24030001, 0x8c020260, 0x8fab006c,
68720x4b102b, 0x10400014, 0x307000ff, 0x8f4201e8,
68730x24420001, 0xaf4201e8, 0x8f4201e8, 0x8faa007c,
68740x8f8200e0, 0x354a0100, 0xafaa007c, 0xafa20010,
68750x8f8200e4, 0x24100001, 0x3c040001, 0x248468a0,
68760xafa20014, 0x8fa60020, 0x8fa70024, 0x3c050007,
68770xc002b3b, 0x34a50800, 0x12000010, 0x3c020080,
68780x2c21024, 0x1440000e, 0x32c20400, 0x8fab007c,
68790x3c020080, 0x34420100, 0x1621024, 0x10400005,
68800x0, 0x8f42020c, 0x24420001, 0xaf42020c,
68810x8f42020c, 0x100002b0, 0x8fa3006c, 0x32c20400,
68820x10400015, 0x34028100, 0x8faa0064, 0x9543000c,
68830x14620012, 0x3c020100, 0x240b0200, 0xa7ab008e,
68840x9542000e, 0x8d430008, 0x8d440004, 0x8d450000,
68850x8faa006c, 0x8fab0064, 0x254afffc, 0xafaa006c,
68860xa7a20086, 0xad63000c, 0xad640008, 0xad650004,
68870x256b0004, 0xafab0064, 0x3c020100, 0x2c21024,
68880x10400004, 0x0, 0x8faa006c, 0x254a0004,
68890xafaa006c, 0x8f4200bc, 0x5040000a, 0xafa00074,
68900x8fab006c, 0x4b102b, 0x50400006, 0xafa00074,
68910x8f4200bc, 0x1621023, 0xafa20074, 0x8f4a00bc,
68920xafaa006c, 0x8f420080, 0x8fab006c, 0x4b102b,
68930x10400056, 0x32c28000, 0x1040005e, 0x240a0003,
68940x32c21000, 0x1040005b, 0xafaa005c, 0x10000058,
68950x240b0004, 0x8f420350, 0x2403ffbf, 0x283a024,
68960x24420001, 0xaf420350, 0x1000024f, 0x8f420350,
68970x2c2b025, 0x2402ffbf, 0x282a024, 0x8f830128,
68980x3c040001, 0x248468d0, 0x26620001, 0xafa20014,
68990xafa30010, 0x8f860120, 0x8f870124, 0x3c050007,
69000xc002b3b, 0x34a52250, 0x1000023f, 0x0,
69010x2c2b025, 0x2402ffbf, 0x282a024, 0x8f830128,
69020x3c040001, 0x248468d0, 0x24020002, 0xafa20014,
69030xafa30010, 0x8f860120, 0x8f870124, 0x3c050007,
69040xc002b3b, 0x34a52450, 0x1000022f, 0x0,
69050x8ea20000, 0x8ea30004, 0x3c040001, 0x248468e8,
69060xafb00010, 0xafbe0014, 0x8ea70018, 0x34a52800,
69070xc002b3b, 0x603021, 0x10000223, 0x0,
69080xa6b1000a, 0x8f820124, 0x3c040001, 0x248468f0,
69090xafbe0014, 0xafa20010, 0x8f460044, 0x8f870120,
69100x3c050007, 0xc002b3b, 0x34a53000, 0x10000216,
69110x0, 0xa6b1000a, 0xa6b2000e, 0x8f820124,
69120x3c040001, 0x248468fc, 0xafbe0014, 0xafa20010,
69130x8f460044, 0x8f870120, 0x3c050007, 0xc002b3b,
69140x34a53200, 0x10000208, 0x0, 0x8f420084,
69150x8faa006c, 0x4a102b, 0x14400007, 0x3c020001,
69160x2c21024, 0x10400004, 0x0, 0x240b0002,
69170xafab005c, 0x8faa006c, 0x1140021b, 0x27ab0020,
69180xafab00a4, 0x3c0a001f, 0x354affff, 0xafaa009c,
69190x8fab005c, 0x240a0001, 0x556a0021, 0x240a0002,
69200x8f430054, 0x8f420050, 0x1062000b, 0x274b0054,
69210x8f5e0054, 0x3403ecc0, 0xafab004c, 0x27c20001,
69220x304201ff, 0xafa20054, 0x1e1140, 0x431021,
69230x1000006b, 0x2e2a821, 0x8f420044, 0x8faa006c,
69240x3c040001, 0x248468ac, 0xafaa0014, 0xafa20010,
69250x8f460054, 0x8f470050, 0x3c050007, 0xc002b3b,
69260x34a51300, 0x8f430350, 0x2402ffbf, 0x282a024,
69270x24630001, 0xaf430350, 0x100001d3, 0x8f420350,
69280x156a001d, 0x0, 0x8f430074, 0x8f420070,
69290x1062000a, 0x274b0074, 0x8f5e0074, 0xafab004c,
69300x27c20001, 0x304203ff, 0xafa20054, 0x1e1140,
69310x24426cc0, 0x1000004a, 0x2e2a821, 0x8f420044,
69320x8faa006c, 0x3c040001, 0x248468b8, 0x3c050007,
69330xafaa0014, 0xafa20010, 0x8f460074, 0x8f470070,
69340x34a51500, 0x240b0001, 0xc002b3b, 0xafab005c,
69350x1000ffc3, 0x0, 0x8f430064, 0x8f420060,
69360x1062001a, 0x274a0064, 0x8f5e0064, 0x8fab005c,
69370xafaa004c, 0x27c20001, 0x304200ff, 0xafa20054,
69380x24020004, 0x1562000e, 0x1e1140, 0x1e1180,
69390x24420cc0, 0x2e21021, 0xafa20044, 0x9442002a,
69400x8faa0044, 0x8fab006c, 0x4b102b, 0x10400024,
69410x25550020, 0x240a0001, 0x10000021, 0xa3aa0097,
69420x24424cc0, 0x1000001e, 0x2e2a821, 0x8f420044,
69430x8fab006c, 0x3c040001, 0x248468c4, 0xafab0014,
69440xafa20010, 0x8f460064, 0x8f470060, 0x3c050007,
69450xc002b3b, 0x34a51800, 0x3c020008, 0x2c21024,
69460x1440ff34, 0x0, 0x8f420370, 0x240a0001,
69470xafaa005c, 0x24420001, 0xaf420370, 0x1000ff90,
69480x8f420370, 0x27a30036, 0x131040, 0x621821,
69490x94620000, 0x441021, 0x10000020, 0xa4620000,
69500x8fab0064, 0xaeab0018, 0x93a20097, 0x10400072,
69510x9821, 0x8faa0044, 0x8fa4006c, 0x8fa300a4,
69520x25420020, 0xafa20028, 0x25420008, 0xafa20030,
69530x25420010, 0xafaa002c, 0xafa20034, 0x9542002a,
69540xa7a20038, 0x95420018, 0xa7a2003a, 0x9542001a,
69550xa7a2003c, 0x9542001c, 0xa7a2003e, 0x94620018,
69560x24630002, 0x822023, 0x1880ffde, 0x26730001,
69570x2e620004, 0x1440fff9, 0x0, 0x8f4200fc,
69580x26650001, 0xa2102a, 0x1440002b, 0x24030001,
69590x8f83012c, 0x10600023, 0x0, 0x8f820124,
69600x431023, 0x22143, 0x58800001, 0x24840040,
69610x8f820128, 0x431023, 0x21943, 0x58600001,
69620x24630040, 0x64102a, 0x54400001, 0x602021,
69630xaf4400fc, 0x8f4200fc, 0xa2102a, 0x10400011,
69640x24030001, 0x10000015, 0x306200ff, 0x8fab0064,
69650x96070018, 0xafab0010, 0x8e220008, 0x3c040001,
69660x248468dc, 0x8c430004, 0x8c420000, 0x34a52400,
69670x2403021, 0xc002b3b, 0xafa30014, 0x1000002b,
69680x0, 0x8f420334, 0x1821, 0x24420001,
69690xaf420334, 0x8f420334, 0x306200ff, 0x5040fedc,
69700x3c020800, 0x12600021, 0x9021, 0x8fb100a4,
69710x2208021, 0x8e220008, 0x96070018, 0x8fa60064,
69720x8c440000, 0x8c450004, 0x240a0001, 0xafaa0010,
69730xafbe0014, 0x8f420008, 0xafa20018, 0x8f42010c,
69740x40f809, 0x0, 0x1040ffd8, 0x3c050007,
69750x96020018, 0x8fab0064, 0x8faa009c, 0x1625821,
69760x14b102b, 0x10400004, 0xafab0064, 0x8f420148,
69770x1625823, 0xafab0064, 0x26100002, 0x26520001,
69780x253102b, 0x1440ffe3, 0x26310004, 0x8fb0006c,
69790x10000036, 0x97b10038, 0x8f4200fc, 0x24050002,
69800xa2102a, 0x1440001b, 0x24030001, 0x8f83012c,
69810x10600013, 0x0, 0x8f820124, 0x431023,
69820x22143, 0x58800001, 0x24840040, 0x8f820128,
69830x431023, 0x21943, 0x58600001, 0x24630040,
69840x64102a, 0x54400001, 0x602021, 0xaf4400fc,
69850x8f4200fc, 0xa2102a, 0x14400006, 0x24030001,
69860x8f420334, 0x1821, 0x24420001, 0xaf420334,
69870x8f420334, 0x306200ff, 0x1040fea5, 0x3c020800,
69880x96b1000a, 0x8fb0006c, 0x3223ffff, 0x70102b,
69890x54400001, 0x608021, 0x8ea40000, 0x8ea50004,
69900x240b0001, 0xafab0010, 0xafbe0014, 0x8f420008,
69910x8fa60064, 0xafa20018, 0x8f42010c, 0x40f809,
69920x2003821, 0x1040fea2, 0x3c050007, 0x96a3000e,
69930x97aa008e, 0x11400007, 0x609021, 0x934205c4,
69940x14400004, 0x0, 0x97ab0086, 0x6a1825,
69950xa6ab0016, 0x8faa007c, 0x3c02ffff, 0x1421024,
69960x10400003, 0xa1402, 0x34630400, 0xa6a20014,
69970x8fab006c, 0x560b0072, 0xa6a3000e, 0x34620004,
69980xa6a2000e, 0x8faa0074, 0x16a1021, 0xa6a2000a,
69990x8f430044, 0x8f4401a0, 0x8f4501a4, 0x34028000,
70000xafa20010, 0x8f420044, 0x2a03021, 0x24070020,
70010xafa20014, 0x8f42000c, 0x31940, 0x604821,
70020xafa20018, 0x8f42010c, 0x4021, 0xa92821,
70030xa9182b, 0x882021, 0x40f809, 0x832021,
70040x5040fe7f, 0xa6b2000e, 0x8f420368, 0xafa0006c,
70050xa34005c4, 0x2442ffff, 0xaf420368, 0x8fab005c,
70060x240a0001, 0x8f420368, 0x156a0006, 0x240a0002,
70070x8f42035c, 0x2442ffff, 0xaf42035c, 0x1000000c,
70080x8f42035c, 0x156a0006, 0x0, 0x8f420364,
70090x2442ffff, 0xaf420364, 0x10000005, 0x8f420364,
70100x8f420360, 0x2442ffff, 0xaf420360, 0x8f420360,
70110x8faa0054, 0x8fab004c, 0xad6a0000, 0x8f420044,
70120x8f440088, 0x8f430078, 0x24420001, 0x441024,
70130x24630001, 0xaf420044, 0xaf430078, 0x8c020240,
70140x62182b, 0x14600075, 0x24070008, 0x8f440168,
70150x8f45016c, 0x8f430044, 0x8f48000c, 0x8f860120,
70160x24020040, 0xafa20010, 0xafa30014, 0xafa80018,
70170x8f42010c, 0x40f809, 0x24c6001c, 0x14400011,
70180x240b0001, 0x3c010001, 0x370821, 0xa02b40f2,
70190x8f820124, 0xafa20010, 0x8f820128, 0x3c040001,
70200x2484688c, 0xafa20014, 0x8f460044, 0x8f870120,
70210x3c050009, 0xc002b3b, 0x34a51300, 0x1000000b,
70220x0, 0x8f420304, 0x24420001, 0xaf420304,
70230x8f420304, 0x8f420044, 0xaf42007c, 0x3c010001,
70240x370821, 0xa02040f2, 0xaf400078, 0x8f420318,
70250x24420001, 0xaf420318, 0x10000048, 0x8f420318,
70260xa6b0000a, 0x8f430044, 0x8f4401a0, 0x8f4501a4,
70270x34028000, 0xafa20010, 0x8f420044, 0x2a03021,
70280x24070020, 0xafa20014, 0x8f42000c, 0x31940,
70290x604821, 0xafa20018, 0x8f42010c, 0x4021,
70300xa92821, 0xa9182b, 0x882021, 0x40f809,
70310x832021, 0x1040fe1f, 0x240a0001, 0xa34a05c4,
70320x8fab006c, 0x8faa0064, 0x1705823, 0xafab006c,
70330x8fab009c, 0x1505021, 0x16a102b, 0x10400004,
70340xafaa0064, 0x8f420148, 0x1425023, 0xafaa0064,
70350x8f420368, 0x2442ffff, 0xaf420368, 0x8faa005c,
70360x240b0001, 0x8f420368, 0x154b0006, 0x240b0002,
70370x8f42035c, 0x2442ffff, 0xaf42035c, 0x1000000c,
70380x8f42035c, 0x114b0006, 0x0, 0x8f420360,
70390x2442ffff, 0xaf420360, 0x10000005, 0x8f420360,
70400x8f420364, 0x2442ffff, 0xaf420364, 0x8f420364,
70410x8fab0054, 0x8faa004c, 0xad4b0000, 0x8f420044,
70420x8f440088, 0x8f430078, 0x24420001, 0x441024,
70430x24630001, 0xaf420044, 0xaf430078, 0x8faa006c,
70440x1540fe0b, 0x0, 0x8fab006c, 0x1160001e,
70450x0, 0x934205c4, 0x10400009, 0x0,
70460x8faa0064, 0xaf4a00c4, 0xaf4b00c0, 0x8fab007c,
70470xaf4b00c8, 0x8faa0074, 0x1000000e, 0xaf4a00cc,
70480x97ab008e, 0x1160000b, 0x34038100, 0x8fa20020,
70490x8c46000c, 0xa443000c, 0x97aa0086, 0x8c440004,
70500x8c450008, 0xa44a000e, 0xac440000, 0xac450004,
70510xac460008, 0x8f42034c, 0x24420001, 0xaf42034c,
70520x10000010, 0x8f42034c, 0x8fab007c, 0x3164ffff,
70530x2484fffc, 0x801821, 0x8f440250, 0x8f450254,
70540x8f460118, 0x1021, 0xa32821, 0xa3382b,
70550x822021, 0x872021, 0xaf440250, 0xc0f809,
70560xaf450254, 0x8fbf00c8, 0x8fbe00c4, 0x8fb500c0,
70570x8fb300bc, 0x8fb200b8, 0x8fb100b4, 0x8fb000b0,
70580x3e00008, 0x27bd00d0, 0x3e00008, 0x0,
70590x27bdff38, 0x240b0001, 0xafbf00c0, 0xafbe00bc,
70600xafb500b8, 0xafb300b4, 0xafb200b0, 0xafb100ac,
70610xafb000a8, 0xa3a00087, 0xafa00044, 0xafab005c,
70620x934205c4, 0xa7a00076, 0x10400007, 0xa7a0007e,
70630x8f4c00c0, 0xafac0064, 0x8f4b00c8, 0x8f5e00c4,
70640x10000130, 0xafab006c, 0x8f420114, 0x40f809,
70650x0, 0x403021, 0x10c002a1, 0x0,
70660x8cc20000, 0x8cc30004, 0xafa20020, 0xafa30024,
70670x8fac0024, 0x8fbe0020, 0x3182ffff, 0x2442fffc,
70680xafa20064, 0x3c020006, 0x2c21024, 0x14400015,
70690xafac006c, 0x93c20000, 0x30420001, 0x10400011,
70700x2402ffff, 0x8fc30000, 0x14620004, 0x3402ffff,
70710x97c30004, 0x1062000b, 0x0, 0xc0024bb,
70720x3c02021, 0x304200ff, 0x14400006, 0x0,
70730x8f420118, 0x40f809, 0x0, 0x10000280,
70740x0, 0x8fa20024, 0x3c03ffbf, 0x3463ffff,
70750x431024, 0x3c03ffff, 0x431824, 0x14600003,
70760xafa20024, 0x10000040, 0x8021, 0x3c020080,
70770x621024, 0x10400007, 0x0, 0x8f42038c,
70780x24420001, 0xaf42038c, 0x8f42038c, 0x10000036,
70790x24100001, 0x8f420210, 0x24420001, 0xaf420210,
70800x8f420210, 0x3c020001, 0x621024, 0x10400006,
70810x3c020002, 0x8f4201c4, 0x24420001, 0xaf4201c4,
70820x8f4201c4, 0x3c020002, 0x621024, 0x10400006,
70830x3c020004, 0x8f42037c, 0x24420001, 0xaf42037c,
70840x8f42037c, 0x3c020004, 0x621024, 0x10400006,
70850x3c020008, 0x8f420380, 0x24420001, 0xaf420380,
70860x8f420380, 0x3c020008, 0x621024, 0x10400006,
70870x3c020010, 0x8f420384, 0x24420001, 0xaf420384,
70880x8f420384, 0x3c020010, 0x621024, 0x10400006,
70890x3c020020, 0x8f4201c0, 0x24420001, 0xaf4201c0,
70900x8f4201c0, 0x3c020020, 0x621024, 0x10400006,
70910x24100001, 0x8f420388, 0x24420001, 0xaf420388,
70920x8f420388, 0x24100001, 0x8c020260, 0x8fab0064,
70930x4b102b, 0x10400015, 0x320200ff, 0x8f4201e8,
70940x24420001, 0xaf4201e8, 0x8f4201e8, 0x8fac006c,
70950x8f8200e0, 0x358c0100, 0xafac006c, 0xafa20010,
70960x8f8200e4, 0x24100001, 0x3c040001, 0x248468a0,
70970xafa20014, 0x8fa60020, 0x8fa70024, 0x3c050007,
70980xc002b3b, 0x34a53600, 0x320200ff, 0x10400010,
70990x3c020080, 0x2c21024, 0x1440000e, 0x32c20400,
71000x8fab006c, 0x3c020080, 0x34420100, 0x1621024,
71010x10400005, 0x0, 0x8f42020c, 0x24420001,
71020xaf42020c, 0x8f42020c, 0x10000202, 0x8fa30064,
71030x32c20400, 0x10400012, 0x34028100, 0x97c3000c,
71040x1462000f, 0x0, 0x240c0200, 0xa7ac0076,
71050x97c2000e, 0x8fc30008, 0x8fc40004, 0x8fab0064,
71060x8fc50000, 0x256bfffc, 0xafab0064, 0xa7a2007e,
71070xafc3000c, 0xafc40008, 0xafc50004, 0x27de0004,
71080x8fa70064, 0x320200ff, 0x14400034, 0x3c020100,
71090x97c4000c, 0x2c8305dd, 0x38828870, 0x2c420001,
71100x621825, 0x10600015, 0x2821, 0x32c20800,
71110x10400015, 0x24020800, 0x97c30014, 0x14620012,
71120x3402aaaa, 0x97c3000e, 0x14620007, 0x2021,
71130x97c30010, 0x24020300, 0x14620004, 0x801021,
71140x97c20012, 0x2c440001, 0x801021, 0x54400006,
71150x24050016, 0x10000004, 0x0, 0x24020800,
71160x50820001, 0x2405000e, 0x10a00013, 0x3c52021,
71170x24830009, 0x3c02001f, 0x3442ffff, 0x43102b,
71180x10400003, 0x0, 0x8f420148, 0x621823,
71190x90620000, 0x38430006, 0x2c630001, 0x38420011,
71200x2c420001, 0x621825, 0x10600004, 0x3c020100,
71210x94820002, 0x453821, 0x3c020100, 0x2c21024,
71220x5040000e, 0xafa70064, 0x8fac0064, 0x10ec0008,
71230x3c050007, 0x3c040001, 0x24846908, 0x8fa60064,
71240x34a54000, 0xafa00010, 0xc002b3b, 0xafa00014,
71250x8fab0064, 0x256b0004, 0xafab0064, 0x8f420080,
71260x8fac0064, 0x4c102b, 0x1040002c, 0x32c28000,
71270x10400034, 0x240b0003, 0x32c21000, 0x10400031,
71280xafab005c, 0x1000002e, 0x240c0004, 0x8f420350,
71290x2403ffbf, 0x283a024, 0x24420001, 0xaf420350,
71300x10000173, 0x8f420350, 0x3c020800, 0x2c2b025,
71310x2402ffbf, 0x282a024, 0x8f830128, 0x3c040001,
71320x248468d0, 0x26620001, 0xafa20014, 0xafa30010,
71330x8f860120, 0x8f870124, 0x3c050007, 0xc002b3b,
71340x34a55300, 0x10000162, 0x0, 0x8ea20000,
71350x8ea30004, 0x3c040001, 0x248468e8, 0xafb00010,
71360xafb10014, 0x8ea70018, 0x34a55900, 0xc002b3b,
71370x603021, 0x10000156, 0x0, 0x8f420084,
71380x8fab0064, 0x4b102b, 0x14400007, 0x3c020001,
71390x2c21024, 0x10400004, 0x0, 0x240c0002,
71400xafac005c, 0x8fab0064, 0x11600166, 0x27ac0020,
71410xafac008c, 0x8fab005c, 0x240c0001, 0x556c0021,
71420x240c0002, 0x8f430054, 0x8f420050, 0x1062000b,
71430x274b0054, 0x8f510054, 0x3403ecc0, 0xafab004c,
71440x26220001, 0x304201ff, 0xafa20054, 0x111140,
71450x431021, 0x1000006b, 0x2e2a821, 0x8f420044,
71460x8fac0064, 0x3c040001, 0x248468ac, 0xafac0014,
71470xafa20010, 0x8f460054, 0x8f470050, 0x3c050007,
71480xc002b3b, 0x34a54300, 0x8f430350, 0x2402ffbf,
71490x282a024, 0x24630001, 0xaf430350, 0x10000124,
71500x8f420350, 0x156c001d, 0x0, 0x8f430074,
71510x8f420070, 0x1062000a, 0x274b0074, 0x8f510074,
71520xafab004c, 0x26220001, 0x304203ff, 0xafa20054,
71530x111140, 0x24426cc0, 0x1000004a, 0x2e2a821,
71540x8f420044, 0x8fac0064, 0x3c040001, 0x248468b8,
71550x3c050007, 0xafac0014, 0xafa20010, 0x8f460074,
71560x8f470070, 0x34a54500, 0x240b0001, 0xc002b3b,
71570xafab005c, 0x1000ffc3, 0x0, 0x8f430064,
71580x8f420060, 0x1062001a, 0x274c0064, 0x8f510064,
71590x8fab005c, 0xafac004c, 0x26220001, 0x304200ff,
71600xafa20054, 0x24020004, 0x1562000e, 0x111140,
71610x111180, 0x24420cc0, 0x2e21021, 0xafa20044,
71620x9442002a, 0x8fac0044, 0x8fab0064, 0x4b102b,
71630x10400024, 0x25950020, 0x240c0001, 0x10000021,
71640xa3ac0087, 0x24424cc0, 0x1000001e, 0x2e2a821,
71650x8f420044, 0x8fab0064, 0x3c040001, 0x248468c4,
71660xafab0014, 0xafa20010, 0x8f460064, 0x8f470060,
71670x3c050007, 0xc002b3b, 0x34a54800, 0x3c020008,
71680x2c21024, 0x1440ff61, 0x0, 0x8f420370,
71690x240c0001, 0xafac005c, 0x24420001, 0xaf420370,
71700x1000ff90, 0x8f420370, 0x27a30036, 0x131040,
71710x621821, 0x94620000, 0x441021, 0x1000001f,
71720xa4620000, 0xaebe0018, 0x93a20087, 0x10400084,
71730x9821, 0x8fab0044, 0x8fa40064, 0x8fa3008c,
71740x25620020, 0xafa20028, 0x25620008, 0xafa20030,
71750x25620010, 0xafab002c, 0xafa20034, 0x9562002a,
71760xa7a20038, 0x95620018, 0xa7a2003a, 0x9562001a,
71770xa7a2003c, 0x9562001c, 0xa7a2003e, 0x94620018,
71780x24630002, 0x822023, 0x1880ffdf, 0x26730001,
71790x2e620004, 0x1440fff9, 0x0, 0x8f4200fc,
71800x262102a, 0x14400030, 0x24030001, 0x8f83012c,
71810x10600028, 0x0, 0x8f820124, 0x431023,
71820x22143, 0x58800001, 0x24840040, 0x8f820128,
71830x431023, 0x21943, 0x58600001, 0x24630040,
71840x64102a, 0x54400001, 0x602021, 0xaf4400fc,
71850x8f4200fc, 0x262102a, 0x10400016, 0x24030001,
71860x1000001a, 0x306200ff, 0x8fac008c, 0x101040,
71870x4c1021, 0x94470018, 0x101080, 0x4c1021,
71880xafbe0010, 0x8c420008, 0x3c040001, 0x248468dc,
71890x3c050007, 0x8c430004, 0x8c420000, 0x34a55500,
71900x2003021, 0xc002b3b, 0xafa30014, 0x10000039,
71910x0, 0x8f420334, 0x1821, 0x24420001,
71920xaf420334, 0x8f420334, 0x306200ff, 0x1040ff06,
71930x8021, 0x8f430008, 0x2402fbff, 0x1260002d,
71940x625024, 0x3c0b4000, 0x22b4025, 0x8fb1008c,
71950x2669ffff, 0x2209021, 0x8e420008, 0x96270018,
71960x8c440000, 0x8c450004, 0x56090004, 0x240b0001,
71970x240c0002, 0x10000002, 0xafac0010, 0xafab0010,
71980x16000004, 0xafa80014, 0x8f420008, 0x10000002,
71990xafa20018, 0xafaa0018, 0x8f42010c, 0x3c03021,
72000xafa80098, 0xafa9009c, 0x40f809, 0xafaa00a0,
72010x8fa80098, 0x8fa9009c, 0x8faa00a0, 0x1040ffc2,
72020x3c02001f, 0x96230018, 0x3442ffff, 0x3c3f021,
72030x5e102b, 0x10400003, 0x26310002, 0x8f420148,
72040x3c2f023, 0x26100001, 0x213102b, 0x1440ffda,
72050x26520004, 0x8fb00064, 0x1000001a, 0x0,
72060x96a3000a, 0x8fb00064, 0x70102b, 0x54400001,
72070x608021, 0x8ea40000, 0x8ea50004, 0x8fab005c,
72080x240c0002, 0xafac0010, 0x934305c4, 0xb1700,
72090x10600003, 0x2223025, 0x3c020800, 0xc23025,
72100xafa60014, 0x8f420008, 0xafa20018, 0x8f42010c,
72110x3c03021, 0x40f809, 0x2003821, 0x1040fecb,
72120x3c050007, 0x97ac0076, 0x11800007, 0x96a3000e,
72130x934205c4, 0x14400004, 0x0, 0x97ab007e,
72140x6c1825, 0xa6ab0016, 0x8fac006c, 0x3c02ffff,
72150x1821024, 0x10400003, 0xc1402, 0x34630400,
72160xa6a20014, 0xa6b0000a, 0x8fab0064, 0x560b0006,
72170x3d0f021, 0x34620004, 0xafa00064, 0xa6a2000e,
72180x1000000d, 0xa34005c4, 0x8fac0064, 0x3c02001f,
72190x3442ffff, 0x5e102b, 0x1906023, 0xafac0064,
72200xa6a3000e, 0x240b0001, 0x10400003, 0xa34b05c4,
72210x8f420148, 0x3c2f023, 0x8fab0054, 0x8fac004c,
72220xad8b0000, 0x8fac0064, 0x1580feba, 0x0,
72230x8fab0064, 0x1160001b, 0x0, 0x934205c4,
72240x10400006, 0x0, 0xaf5e00c4, 0xaf4b00c0,
72250x8fac006c, 0x1000000e, 0xaf4c00c8, 0x97ab0076,
72260x1160000b, 0x34038100, 0x8fa20020, 0x8c46000c,
72270xa443000c, 0x97ac007e, 0x8c440004, 0x8c450008,
72280xa44c000e, 0xac440000, 0xac450004, 0xac460008,
72290x8f42034c, 0x24420001, 0xaf42034c, 0x10000010,
72300x8f42034c, 0x8fab006c, 0x3164ffff, 0x2484fffc,
72310x801821, 0x8f440250, 0x8f450254, 0x8f460118,
72320x1021, 0xa32821, 0xa3382b, 0x822021,
72330x872021, 0xaf440250, 0xc0f809, 0xaf450254,
72340x8fbf00c0, 0x8fbe00bc, 0x8fb500b8, 0x8fb300b4,
72350x8fb200b0, 0x8fb100ac, 0x8fb000a8, 0x3e00008,
72360x27bd00c8, 0x3e00008, 0x0, 0x27bdffd8,
72370xafbf0024, 0xafb00020, 0x8f43004c, 0x8f420048,
72380x10620034, 0x0, 0x8f430048, 0x8f42004c,
72390x622023, 0x4820001, 0x24840200, 0x8f430054,
72400x8f42004c, 0x43102b, 0x14400004, 0x24020200,
72410x8f43004c, 0x10000005, 0x431023, 0x8f420054,
72420x8f43004c, 0x431023, 0x2442ffff, 0x405021,
72430x8a102a, 0x54400001, 0x805021, 0x8f49004c,
72440x8f48004c, 0x8f440188, 0x8f45018c, 0x8f46004c,
72450x24071000, 0xafa70010, 0x84140, 0x1001821,
72460x12a4821, 0x313001ff, 0xafb00014, 0x8f470014,
72470x1021, 0x63140, 0xafa70018, 0xa32821,
72480xa3382b, 0x822021, 0x872021, 0x3402ecc0,
72490xc23021, 0x8f420108, 0x2e63021, 0x40f809,
72500xa3940, 0x54400001, 0xaf50004c, 0x8f43004c,
72510x8f420048, 0x14620018, 0x0, 0x8f420000,
72520x10400007, 0x0, 0xaf80004c, 0x8f82004c,
72530x1040fffd, 0x0, 0x10000005, 0x0,
72540xaf800048, 0x8f820048, 0x1040fffd, 0x0,
72550x8f820060, 0x2403fdff, 0x431024, 0xaf820060,
72560x8f420000, 0x10400003, 0x0, 0x10000002,
72570xaf80004c, 0xaf800048, 0x8fbf0024, 0x8fb00020,
72580x3e00008, 0x27bd0028, 0x3e00008, 0x0,
72590x27bdffd8, 0xafbf0024, 0xafb00020, 0x8f43005c,
72600x8f420058, 0x10620049, 0x0, 0x8f430058,
72610x8f42005c, 0x622023, 0x4820001, 0x24840100,
72620x8f430064, 0x8f42005c, 0x43102b, 0x14400004,
72630x24020100, 0x8f43005c, 0x10000005, 0x431023,
72640x8f420064, 0x8f43005c, 0x431023, 0x2442ffff,
72650x403821, 0x87102a, 0x54400001, 0x803821,
72660x8f42005c, 0x471021, 0x305000ff, 0x32c21000,
72670x10400015, 0x24082000, 0x8f49005c, 0x8f440190,
72680x8f450194, 0x8f46005c, 0x73980, 0xafa80010,
72690xafb00014, 0x8f480014, 0x94980, 0x1201821,
72700x1021, 0xa32821, 0xa3482b, 0x822021,
72710x892021, 0x63180, 0xafa80018, 0x8f420108,
72720x10000014, 0x24c60cc0, 0x8f49005c, 0x8f440190,
72730x8f450194, 0x8f46005c, 0x73940, 0xafa80010,
72740xafb00014, 0x8f480014, 0x94940, 0x1201821,
72750x1021, 0xa32821, 0xa3482b, 0x822021,
72760x892021, 0x63140, 0xafa80018, 0x8f420108,
72770x24c64cc0, 0x40f809, 0x2e63021, 0x54400001,
72780xaf50005c, 0x8f43005c, 0x8f420058, 0x14620018,
72790x0, 0x8f420000, 0x10400007, 0x0,
72800xaf80004c, 0x8f82004c, 0x1040fffd, 0x0,
72810x10000005, 0x0, 0xaf800048, 0x8f820048,
72820x1040fffd, 0x0, 0x8f820060, 0x2403feff,
72830x431024, 0xaf820060, 0x8f420000, 0x10400003,
72840x0, 0x10000002, 0xaf80004c, 0xaf800048,
72850x8fbf0024, 0x8fb00020, 0x3e00008, 0x27bd0028,
72860x3e00008, 0x0, 0x27bdffd8, 0xafbf0024,
72870xafb00020, 0x8f43006c, 0x8f420068, 0x10620033,
72880x0, 0x8f430068, 0x8f42006c, 0x622023,
72890x4820001, 0x24840400, 0x8f430074, 0x8f42006c,
72900x43102b, 0x14400004, 0x24020400, 0x8f43006c,
72910x10000005, 0x431023, 0x8f420074, 0x8f43006c,
72920x431023, 0x2442ffff, 0x405021, 0x8a102a,
72930x54400001, 0x805021, 0x8f49006c, 0x8f48006c,
72940x8f440198, 0x8f45019c, 0x8f46006c, 0x24074000,
72950xafa70010, 0x84140, 0x1001821, 0x12a4821,
72960x313003ff, 0xafb00014, 0x8f470014, 0x1021,
72970x63140, 0x24c66cc0, 0xafa70018, 0xa32821,
72980xa3382b, 0x822021, 0x872021, 0x8f420108,
72990x2e63021, 0x40f809, 0xa3940, 0x54400001,
73000xaf50006c, 0x8f43006c, 0x8f420068, 0x14620018,
73010x0, 0x8f420000, 0x10400007, 0x0,
73020xaf80004c, 0x8f82004c, 0x1040fffd, 0x0,
73030x10000005, 0x0, 0xaf800048, 0x8f820048,
73040x1040fffd, 0x0, 0x8f820060, 0x2403f7ff,
73050x431024, 0xaf820060, 0x8f420000, 0x10400003,
73060x0, 0x10000002, 0xaf80004c, 0xaf800048,
73070x8fbf0024, 0x8fb00020, 0x3e00008, 0x27bd0028,
73080x3e00008, 0x0, 0x8f4200fc, 0x3c030001,
73090x8f4400f8, 0x346330c8, 0x24420001, 0xaf4200fc,
73100x8f850128, 0x2e31021, 0x54820004, 0x24820008,
73110x3c020001, 0x34422ec8, 0x2e21021, 0x401821,
73120xaf4300f8, 0xac600000, 0x8f4200f4, 0x14620004,
73130x3c020001, 0x24a20020, 0x1000000f, 0xaf820128,
73140x8f4300f8, 0x344230c8, 0x2e21021, 0x54620004,
73150x24620008, 0x3c020001, 0x34422ec8, 0x2e21021,
73160x401821, 0x8c620004, 0x21140, 0xa21021,
73170xaf820128, 0xac600000, 0x8ca30018, 0x30620070,
73180x1040002d, 0x30620020, 0x10400004, 0x3c020010,
73190x2c21024, 0x1040000d, 0x0, 0x30620040,
73200x10400004, 0x3c020020, 0x2c21024, 0x10400007,
73210x0, 0x30620010, 0x1040001f, 0x3c020040,
73220x2c21024, 0x1440001c, 0x0, 0x8f820040,
73230x30420001, 0x14400008, 0x2021, 0x8c030104,
73240x24020001, 0x50620005, 0x24040001, 0x8c020264,
73250x10400003, 0x801021, 0x24040001, 0x801021,
73260x10400006, 0x0, 0x8f42030c, 0x24420001,
73270xaf42030c, 0x10000008, 0x8f42030c, 0x8f820044,
73280x34420004, 0xaf820044, 0x8f420308, 0x24420001,
73290xaf420308, 0x8f420308, 0x3e00008, 0x0,
73300x3e00008, 0x0, 0x27bdff98, 0xafbf0060,
73310xafbe005c, 0xafb50058, 0xafb30054, 0xafb20050,
73320xafb1004c, 0xafb00048, 0x8f4200fc, 0x24420001,
73330xaf4200fc, 0x8f880128, 0x25020020, 0xaf820128,
73340x8d030018, 0x30620070, 0x1040002e, 0x30620020,
73350x10400004, 0x3c020010, 0x2c21024, 0x1040000d,
73360x0, 0x30620040, 0x10400004, 0x3c020020,
73370x2c21024, 0x10400007, 0x0, 0x30620010,
73380x104001a9, 0x3c020040, 0x2c21024, 0x144001a6,
73390x0, 0x8f820040, 0x30420001, 0x14400008,
73400x2021, 0x8c030104, 0x24020001, 0x50620005,
73410x24040001, 0x8c020264, 0x10400003, 0x801021,
73420x24040001, 0x801021, 0x10400006, 0x0,
73430x8f42030c, 0x24420001, 0xaf42030c, 0x10000192,
73440x8f42030c, 0x8f820044, 0x34420004, 0xaf820044,
73450x8f420308, 0x24420001, 0xaf420308, 0x1000018a,
73460x8f420308, 0x30620002, 0x1040014b, 0x3c020800,
73470x8d1e001c, 0x1e5702, 0xafaa0034, 0x950a0016,
73480x3c22024, 0xafaa0024, 0x8faa0034, 0x24020001,
73490x15420006, 0x33deffff, 0x1e1140, 0x3403ecc0,
73500x431021, 0x10000010, 0x2e2a821, 0x24020002,
73510x15420005, 0x24020003, 0x1e1140, 0x24426cc0,
73520x10000009, 0x2e2a821, 0x15420005, 0x1e1180,
73530x1e1140, 0x24424cc0, 0x10000003, 0x2e2a821,
73540x571021, 0x24550ce0, 0x96a2000e, 0x304afffc,
73550x30420400, 0x10400003, 0xafaa002c, 0x100000e1,
73560x8821, 0x10800004, 0x8821, 0x97b10026,
73570x100000dd, 0xa6b10012, 0x8eb30018, 0x966a000c,
73580xa7aa003e, 0x97a5003e, 0x2ca305dd, 0x38a28870,
73590x2c420001, 0x621825, 0x10600015, 0x2021,
73600x32c20800, 0x10400015, 0x24020800, 0x96630014,
73610x14620012, 0x3402aaaa, 0x9663000e, 0x14620007,
73620x2821, 0x96630010, 0x24020300, 0x14620004,
73630xa01021, 0x96620012, 0x2c450001, 0xa01021,
73640x54400006, 0x24040016, 0x10000004, 0x0,
73650x24020800, 0x50a20001, 0x2404000e, 0x108000b9,
73660x2649021, 0x92420000, 0x3042000f, 0x28080,
73670x32c20100, 0x10400020, 0x2501821, 0x3c020020,
73680x43102b, 0x1440000e, 0x2402021, 0x2821,
73690x94820000, 0x24840002, 0xa22821, 0x83102b,
73700x1440fffb, 0x30a2ffff, 0x51c02, 0x622821,
73710x51c02, 0x30a2ffff, 0x10000009, 0x622821,
73720x8f470148, 0x8f420110, 0x102842, 0x3c060020,
73730x40f809, 0xafa80040, 0x3045ffff, 0x8fa80040,
73740x50a00001, 0x3405ffff, 0x8faa002c, 0x354a0002,
73750x10000002, 0xafaa002c, 0x2821, 0x32c20080,
73760x10400090, 0xa6a50010, 0x26430009, 0x3c02001f,
73770x3442ffff, 0x43102b, 0x10400003, 0x0,
73780x8f420148, 0x621823, 0x90660000, 0x30c200ff,
73790x38430006, 0x2c630001, 0x38420011, 0x2c420001,
73800x621825, 0x1060007f, 0x24020800, 0x8821,
73810x97a3003e, 0x1462000f, 0x2602021, 0x96710000,
73820x96620002, 0x96630004, 0x96640006, 0x2228821,
73830x2238821, 0x2248821, 0x96620008, 0x9663000a,
73840x9664000c, 0x2228821, 0x2238821, 0x10000007,
73850x2248821, 0x94820000, 0x24840002, 0x2228821,
73860x92102b, 0x1440fffb, 0x0, 0x111c02,
73870x3222ffff, 0x628821, 0x111c02, 0x3222ffff,
73880x628821, 0x32c20200, 0x10400003, 0x26440006,
73890x1000003e, 0x8021, 0x3c05001f, 0x34a5ffff,
73900xa4102b, 0x10400003, 0x0, 0x8f420148,
73910x822023, 0x94820000, 0x30421fff, 0x10400004,
73920x2644000c, 0x96420002, 0x10000030, 0x508023,
73930x96420002, 0x26430014, 0x508023, 0x3c020020,
73940x43102b, 0x1440000a, 0xd08021, 0x9642000c,
73950x2028021, 0x9642000e, 0x96430010, 0x96440012,
73960x2028021, 0x2038021, 0x10000020, 0x2048021,
73970xa4102b, 0x10400003, 0x0, 0x8f420148,
73980x822023, 0x94820000, 0x24840002, 0x2028021,
73990xa4102b, 0x10400003, 0x0, 0x8f420148,
74000x822023, 0x94820000, 0x24840002, 0x2028021,
74010xa4102b, 0x10400003, 0x0, 0x8f420148,
74020x822023, 0x94820000, 0x24840002, 0x2028021,
74030xa4102b, 0x10400003, 0x0, 0x8f420148,
74040x822023, 0x94820000, 0x2028021, 0x3c020100,
74050x2c21024, 0x1040000e, 0x0, 0x8faa002c,
74060x31420004, 0x1040000a, 0x0, 0x9504000e,
74070x2642021, 0xc003eec, 0x2484fffc, 0x3042ffff,
74080x2228821, 0x111c02, 0x3222ffff, 0x628821,
74090x8faa0024, 0x1518823, 0x111402, 0x2228821,
74100x2308821, 0x111402, 0x2228821, 0x3231ffff,
74110x52200001, 0x3411ffff, 0x8faa002c, 0x354a0001,
74120xafaa002c, 0xa6b10012, 0x97aa002e, 0xa6aa000e,
74130x8faa002c, 0x31420004, 0x10400002, 0x24091000,
74140x34098000, 0x8f480044, 0x8f4401a0, 0x8f4501a4,
74150xafa90010, 0x8f490044, 0x84140, 0x1001821,
74160xafa90014, 0x8f48000c, 0x2a03021, 0x24070020,
74170xafa80018, 0x8f48010c, 0x1021, 0xa32821,
74180xa3482b, 0x822021, 0x100f809, 0x892021,
74190x1440000b, 0x0, 0x8f820128, 0x3c040001,
74200x24846914, 0xafbe0014, 0xafa20010, 0x8f860124,
74210x8f870120, 0x3c050007, 0xc002b3b, 0x34a59920,
74220x8f420368, 0x2442ffff, 0xaf420368, 0x8f420044,
74230x8f430088, 0x24420001, 0x431024, 0xaf420044,
74240x8faa0034, 0x8f440368, 0x24020001, 0x15420006,
74250x24020002, 0x8f42035c, 0x2442ffff, 0xaf42035c,
74260x10000049, 0x8f42035c, 0x15420006, 0x0,
74270x8f420364, 0x2442ffff, 0xaf420364, 0x10000042,
74280x8f420364, 0x8f420360, 0x2442ffff, 0xaf420360,
74290x1000003d, 0x8f420360, 0x30621000, 0x10400005,
74300x30628000, 0x8f420078, 0x24420001, 0x10000036,
74310xaf420078, 0x10400034, 0x0, 0x8f420078,
74320x24420001, 0xaf420078, 0x8c030240, 0x43102b,
74330x1440002d, 0x24070008, 0x8f440168, 0x8f45016c,
74340x8f430044, 0x8f48000c, 0x8f860120, 0x24020040,
74350xafa20010, 0xafa30014, 0xafa80018, 0x8f42010c,
74360x40f809, 0x24c6001c, 0x14400011, 0x24020001,
74370x3c010001, 0x370821, 0xa02240f2, 0x8f820124,
74380xafa20010, 0x8f820128, 0x3c040001, 0x2484688c,
74390xafa20014, 0x8f460044, 0x8f870120, 0x3c050009,
74400xc002b3b, 0x34a51300, 0x1000000b, 0x0,
74410x8f420304, 0x24420001, 0xaf420304, 0x8f420304,
74420x8f420044, 0xaf42007c, 0x3c010001, 0x370821,
74430xa02040f2, 0xaf400078, 0x8f420318, 0x24420001,
74440xaf420318, 0x8f420318, 0x8fbf0060, 0x8fbe005c,
74450x8fb50058, 0x8fb30054, 0x8fb20050, 0x8fb1004c,
74460x8fb00048, 0x3e00008, 0x27bd0068, 0x3e00008,
74470x0, 0x0, 0x0, 0x8f42013c,
74480xaf8200c0, 0x8f42013c, 0xaf8200c4, 0x8f42013c,
74490xaf8200c8, 0x8f420138, 0xaf8200d0, 0x8f420138,
74500xaf8200d4, 0x8f420138, 0x3e00008, 0xaf8200d8,
74510x27bdffe0, 0x27840208, 0x24050200, 0xafbf0018,
74520xc002bbf, 0x24060008, 0x8c020204, 0xc004012,
74530xaf820210, 0x3c020001, 0x8c426d94, 0x30420002,
74540x1040000e, 0x2021, 0x8c060248, 0x24020002,
74550x3c010001, 0xac226d98, 0xc005104, 0x24050002,
74560x2021, 0x8c060248, 0x24020001, 0x3c010001,
74570xac226d98, 0x10000011, 0x24050001, 0x8c060248,
74580x24020004, 0x3c010001, 0xac226d98, 0xc005104,
74590x24050004, 0x3c020001, 0x8c426d94, 0x30420001,
74600x10400008, 0x24020001, 0x3c010001, 0xac226d98,
74610x2021, 0x24050001, 0x3c06601b, 0xc005104,
74620x0, 0x3c040001, 0x248469d0, 0x8f420150,
74630x8f430154, 0x3c050008, 0x8f460158, 0x21640,
74640x31940, 0x34630403, 0x431025, 0x633c0,
74650x461025, 0xaf82021c, 0xafa00010, 0xafa00014,
74660x8f86021c, 0x34a50200, 0xc002b3b, 0x3821,
74670x3c010001, 0xac206d90, 0x3c010001, 0xac206da8,
74680x8fbf0018, 0x3e00008, 0x27bd0020, 0x27bdffe0,
74690x3c050008, 0x34a50300, 0xafbf0018, 0xafa00010,
74700xafa00014, 0x8f860200, 0x3c040001, 0x248469dc,
74710xc002b3b, 0x3821, 0x8f420410, 0x24420001,
74720xaf420410, 0x8f420410, 0x8fbf0018, 0x3e00008,
74730x27bd0020, 0x27bdffd8, 0xafbf0020, 0xafb1001c,
74740xafb00018, 0x8f4203a4, 0x24420001, 0xaf4203a4,
74750x8f4203a4, 0x8f900220, 0x8f8200e0, 0xafa20010,
74760x8f8200e4, 0xafa20014, 0x8f8600c4, 0x8f8700c8,
74770x3c040001, 0x248469e8, 0xc002b3b, 0x2002821,
74780x3c044000, 0x2041024, 0x504000b4, 0x3c040100,
74790x8f4203bc, 0x24420001, 0xaf4203bc, 0x8f4203bc,
74800x8f8700c4, 0x8f8300c8, 0x8f420148, 0x671823,
74810x43102b, 0x10400003, 0x0, 0x8f420148,
74820x621821, 0x10600005, 0x0, 0x8f42014c,
74830x43102b, 0x1040000b, 0x0, 0x8f8200e0,
74840x8f430124, 0xaf42011c, 0xaf430114, 0x8f820220,
74850x3c0308ff, 0x3463fffb, 0x431024, 0x100000ce,
74860x441025, 0x8f820220, 0x3c0308ff, 0x3463ffff,
74870x431024, 0x34420004, 0xaf820220, 0x8f8200e0,
74880x8f430124, 0xaf42011c, 0xaf430114, 0x8f8600c8,
74890x8f840120, 0x8f830124, 0x10000005, 0x2821,
74900x14620002, 0x24620020, 0x27624800, 0x401821,
74910x1064000c, 0x30a200ff, 0x8c620018, 0x30420003,
74920x1040fff7, 0x27624fe0, 0x8f4203d0, 0x24050001,
74930x24420001, 0xaf4203d0, 0x8f4203d0, 0x8c660008,
74940x30a200ff, 0x14400058, 0x0, 0x934205c4,
74950x14400055, 0x0, 0x8f8700c4, 0x8f8800e0,
74960x8f8400e4, 0x2402fff8, 0x1024024, 0x1041023,
74970x218c3, 0x4620001, 0x24630200, 0x10600005,
74980x24020001, 0x10620009, 0x0, 0x1000001f,
74990x0, 0x8f4203c0, 0xe03021, 0x24420001,
75000xaf4203c0, 0x10000040, 0x8f4203c0, 0x8f4203c4,
75010x24420001, 0xaf4203c4, 0x8c860000, 0x8f420148,
75020x8f4303c4, 0xe61823, 0x43102b, 0x10400004,
75030x2c62233f, 0x8f420148, 0x621821, 0x2c62233f,
75040x14400031, 0x0, 0x8f42020c, 0x24420001,
75050xaf42020c, 0x8f42020c, 0xe03021, 0x24820008,
75060xaf8200e4, 0x10000028, 0xaf8200e8, 0x8f4203c8,
75070x24420001, 0xaf4203c8, 0x8f4203c8, 0x8c850000,
75080x8f420148, 0xa71823, 0x43102b, 0x10400003,
75090x0, 0x8f420148, 0x621821, 0x8f42014c,
75100x43102b, 0x5440000a, 0xa03021, 0x8f42020c,
75110x24420001, 0xaf42020c, 0x8f42020c, 0x24820008,
75120xaf8200e4, 0x8f8400e4, 0x1488ffec, 0xaf8400e8,
75130x1488000d, 0x27623000, 0x14820002, 0x2482fff8,
75140x27623ff8, 0x94430006, 0x3c02001f, 0x3442ffff,
75150xc33021, 0x46102b, 0x10400003, 0x0,
75160x8f420148, 0xc23023, 0xaf8600c8, 0x8f8300c4,
75170x8f420148, 0xc31823, 0x43102b, 0x10400003,
75180x0, 0x8f420148, 0x621821, 0x10600005,
75190x0, 0x8f42014c, 0x43102b, 0x50400008,
75200x3c02fdff, 0x8f820220, 0x3c0308ff, 0x3463fffb,
75210x431024, 0x3c034000, 0x1000003f, 0x431025,
75220x8f4303cc, 0x3442ffff, 0x282a024, 0x24630001,
75230xaf4303cc, 0x10000039, 0x8f4203cc, 0x2041024,
75240x1040000e, 0x3c110200, 0x8f4203a8, 0x24420001,
75250xaf4203a8, 0x8f4203a8, 0x8f820220, 0x3c0308ff,
75260x3463ffff, 0x431024, 0x441025, 0xc003daf,
75270xaf820220, 0x10000029, 0x0, 0x2111024,
75280x50400008, 0x3c110400, 0x8f4203ac, 0x24420001,
75290xaf4203ac, 0xc003daf, 0x8f4203ac, 0x10000019,
75300x0, 0x2111024, 0x1040001c, 0x0,
75310x8f830224, 0x24021402, 0x14620009, 0x3c050008,
75320x3c040001, 0x248469f4, 0xafa00010, 0xafa00014,
75330x8f860224, 0x34a50500, 0xc002b3b, 0x3821,
75340x8f4203b0, 0x24420001, 0xaf4203b0, 0x8f4203b0,
75350x8f820220, 0x2002021, 0x34420002, 0xc004e9c,
75360xaf820220, 0x8f820220, 0x3c0308ff, 0x3463ffff,
75370x431024, 0x511025, 0xaf820220, 0x8fbf0020,
75380x8fb1001c, 0x8fb00018, 0x3e00008, 0x27bd0028,
75390x3e00008, 0x0, 0x3c020001, 0x8c426da8,
75400x27bdffb0, 0xafbf0048, 0xafbe0044, 0xafb50040,
75410xafb3003c, 0xafb20038, 0xafb10034, 0x1040000f,
75420xafb00030, 0x3c040001, 0x24846a00, 0x3c050008,
75430xafa00010, 0xafa00014, 0x8f860220, 0x34a50600,
75440x24020001, 0x3c010001, 0xac206da8, 0x3c010001,
75450xac226d9c, 0xc002b3b, 0x3821, 0x3c037fff,
75460x8c020268, 0x3463ffff, 0x3c04fdff, 0x431024,
75470xac020268, 0x8f420004, 0x3484ffff, 0x30420002,
75480x10400092, 0x284a024, 0x3c040600, 0x34842000,
75490x8f420004, 0x2821, 0x2403fffd, 0x431024,
75500xaf420004, 0xafa40020, 0x8f5e0018, 0x27aa0020,
75510x240200ff, 0x13c20002, 0xafaa002c, 0x27c50001,
75520x8c020228, 0xa09021, 0x1642000e, 0x1e38c0,
75530x8f42033c, 0x24420001, 0xaf42033c, 0x8f42033c,
75540x8c020228, 0x3c040001, 0x24846998, 0x3c050009,
75550xafa00014, 0xafa20010, 0x8fa60020, 0x1000006d,
75560x34a50500, 0xf71021, 0x8fa30020, 0x8fa40024,
75570xac4304c0, 0xac4404c4, 0x8f830054, 0x8f820054,
75580x247003e8, 0x2021023, 0x2c4203e9, 0x1040001b,
75590x9821, 0xe08821, 0x263504c0, 0x8f440178,
75600x8f45017c, 0x2201821, 0x240a0004, 0xafaa0010,
75610xafb20014, 0x8f48000c, 0x1021, 0x2f53021,
75620xafa80018, 0x8f48010c, 0x24070008, 0xa32821,
75630xa3482b, 0x822021, 0x100f809, 0x892021,
75640x54400006, 0x24130001, 0x8f820054, 0x2021023,
75650x2c4203e9, 0x1440ffe9, 0x0, 0x326200ff,
75660x54400017, 0xaf520018, 0x8f420378, 0x24420001,
75670xaf420378, 0x8f420378, 0x8f820120, 0x8faa002c,
75680xafa20010, 0x8f820124, 0x3c040001, 0x248469a4,
75690x3c050009, 0xafa20014, 0x8d460000, 0x10000035,
75700x34a50600, 0x8f420308, 0x24130001, 0x24420001,
75710xaf420308, 0x8f420308, 0x1000001e, 0x326200ff,
75720x8f830054, 0x8f820054, 0x247003e8, 0x2021023,
75730x2c4203e9, 0x10400016, 0x9821, 0x3c150020,
75740x24110010, 0x8f42000c, 0x8f440160, 0x8f450164,
75750x8f860120, 0xafb10010, 0xafb20014, 0x551025,
75760xafa20018, 0x8f42010c, 0x24070008, 0x40f809,
75770x24c6001c, 0x1440ffe3, 0x0, 0x8f820054,
75780x2021023, 0x2c4203e9, 0x1440ffee, 0x0,
75790x326200ff, 0x14400011, 0x0, 0x8f420378,
75800x24420001, 0xaf420378, 0x8f420378, 0x8f820120,
75810x8faa002c, 0xafa20010, 0x8f820124, 0x3c040001,
75820x248469ac, 0x3c050009, 0xafa20014, 0x8d460000,
75830x34a50700, 0xc002b3b, 0x3c03821, 0x8f4202ec,
75840x24420001, 0xaf4202ec, 0x8f4202ec, 0x8fbf0048,
75850x8fbe0044, 0x8fb50040, 0x8fb3003c, 0x8fb20038,
75860x8fb10034, 0x8fb00030, 0x3e00008, 0x27bd0050,
75870x3c020001, 0x8c426da8, 0x27bdffe0, 0x1440000d,
75880xafbf0018, 0x3c040001, 0x24846a0c, 0x3c050008,
75890xafa00010, 0xafa00014, 0x8f860220, 0x34a50700,
75900x24020001, 0x3c010001, 0xac226da8, 0xc002b3b,
75910x3821, 0x3c020004, 0x2c21024, 0x10400007,
75920x0, 0x8f820220, 0x3c0308ff, 0x3463ffff,
75930x431024, 0x34420008, 0xaf820220, 0x3c050001,
75940x8ca56d98, 0x24020001, 0x14a20007, 0x2021,
75950xc00529b, 0x24050001, 0xac02026c, 0x8c03026c,
75960x10000006, 0x3c020007, 0xc00529b, 0x2021,
75970xac020268, 0x8c030268, 0x3c020007, 0x621824,
75980x3c020002, 0x5062000d, 0x3c0205f5, 0x43102b,
75990x14400006, 0x3c020004, 0x3c020001, 0x10620009,
76000x3c020098, 0x1000000b, 0x0, 0x14620009,
76010x3c023b9a, 0x10000004, 0x3442ca00, 0x10000002,
76020x3442e100, 0x34429680, 0xaf4201fc, 0x8f4201fc,
76030xaee20064, 0x8fbf0018, 0x3e00008, 0x27bd0020,
76040x0, 0x0, 0x0, 0x86102b,
76050x50400001, 0x872023, 0xc41023, 0x24843,
76060x125102b, 0x1040001b, 0x91040, 0x824021,
76070x88102b, 0x10400007, 0x1821, 0x94820000,
76080x24840002, 0x621821, 0x88102b, 0x1440fffb,
76090x0, 0x602021, 0xc73023, 0xa91023,
76100x21040, 0xc22821, 0xc5102b, 0x10400007,
76110x1821, 0x94c20000, 0x24c60002, 0x621821,
76120xc5102b, 0x1440fffb, 0x0, 0x1000000d,
76130x832021, 0x51040, 0x822821, 0x85102b,
76140x10400007, 0x1821, 0x94820000, 0x24840002,
76150x621821, 0x85102b, 0x1440fffb, 0x0,
76160x602021, 0x41c02, 0x3082ffff, 0x622021,
76170x41c02, 0x3082ffff, 0x622021, 0x3e00008,
76180x3082ffff, 0x3e00008, 0x0, 0x802821,
76190x30a20001, 0x1040002b, 0x3c03001f, 0x3463ffff,
76200x24a20004, 0x62102b, 0x54400007, 0x65102b,
76210x90a20001, 0x90a40003, 0x90a30000, 0x90a50002,
76220x1000002a, 0x441021, 0x10400003, 0x0,
76230x8f420148, 0xa22823, 0x90a40000, 0x24a50001,
76240x65102b, 0x10400003, 0x0, 0x8f420148,
76250xa22823, 0x90a20000, 0x24a50001, 0x21200,
76260x822021, 0x65102b, 0x10400003, 0x0,
76270x8f420148, 0xa22823, 0x90a20000, 0x24a50001,
76280x822021, 0x65102b, 0x10400003, 0x0,
76290x8f420148, 0xa22823, 0x90a20000, 0x1000002d,
76300x21200, 0x3463ffff, 0x24a20004, 0x62102b,
76310x5440000a, 0x65102b, 0x90a20000, 0x90a40002,
76320x90a30001, 0x90a50003, 0x441021, 0x21200,
76330x651821, 0x10000020, 0x432021, 0x10400003,
76340x0, 0x8f420148, 0xa22823, 0x90a20000,
76350x24a50001, 0x22200, 0x65102b, 0x10400003,
76360x0, 0x8f420148, 0xa22823, 0x90a20000,
76370x24a50001, 0x822021, 0x65102b, 0x10400003,
76380x0, 0x8f420148, 0xa22823, 0x90a20000,
76390x24a50001, 0x21200, 0x822021, 0x65102b,
76400x10400003, 0x0, 0x8f420148, 0xa22823,
76410x90a20000, 0x822021, 0x41c02, 0x3082ffff,
76420x622021, 0x41c02, 0x3082ffff, 0x622021,
76430x3e00008, 0x3082ffff, 0x0, 0x8f820220,
76440x34420002, 0xaf820220, 0x3c020002, 0x8c428ff8,
76450x30424000, 0x10400054, 0x24040001, 0x8f820200,
76460x24067fff, 0x8f830200, 0x30450002, 0x2402fffd,
76470x621824, 0xaf830200, 0xaf840204, 0x8f830054,
76480x8f820054, 0x10000002, 0x24630001, 0x8f820054,
76490x621023, 0x2c420002, 0x1440fffc, 0x0,
76500x8f820224, 0x1444004d, 0x42040, 0xc4102b,
76510x1040fff1, 0x0, 0x8f820200, 0x451025,
76520xaf820200, 0x8f820220, 0x34428000, 0xaf820220,
76530x8f830054, 0x8f820054, 0x10000002, 0x24630001,
76540x8f820054, 0x621023, 0x2c420002, 0x1440fffc,
76550x0, 0x8f820220, 0x3c030004, 0x431024,
76560x1440000f, 0x0, 0x8f820220, 0x3c03ffff,
76570x34637fff, 0x431024, 0xaf820220, 0x8f830054,
76580x8f820054, 0x10000002, 0x24630001, 0x8f820054,
76590x621023, 0x2c420002, 0x1440fffc, 0x0,
76600x8f820220, 0x3c030004, 0x431024, 0x1440000d,
76610x0, 0x8f820220, 0x34428000, 0xaf820220,
76620x8f830054, 0x8f820054, 0x10000002, 0x24630001,
76630x8f820054, 0x621023, 0x2c420002, 0x1440fffc,
76640x0, 0x8f820220, 0x3c030004, 0x431024,
76650x1040001b, 0x1021, 0x8f830220, 0x24020001,
76660x10000015, 0x3c04f700, 0x8f820220, 0x3c04f700,
76670x441025, 0xaf820220, 0x8f820220, 0x2403fffd,
76680x431024, 0xaf820220, 0x8f820220, 0x3c030300,
76690x431024, 0x14400003, 0x0, 0x10000008,
76700x1021, 0x8f820220, 0x34420002, 0xaf820220,
76710x8f830220, 0x24020001, 0x641825, 0xaf830220,
76720x3e00008, 0x0, 0x2021, 0x3c050100,
76730x24020001, 0xaf80021c, 0xaf820200, 0xaf820220,
76740x27625000, 0xaf8200c0, 0x27625000, 0xaf8200c4,
76750x27625000, 0xaf8200c8, 0x27625000, 0xaf8200d0,
76760x27625000, 0xaf8200d4, 0x27625000, 0xaf8200d8,
76770x27623000, 0xaf8200e0, 0x27623000, 0xaf8200e4,
76780x27623000, 0xaf8200e8, 0x27622800, 0xaf8200f0,
76790x27622800, 0xaf8200f4, 0x27622800, 0xaf8200f8,
76800x418c0, 0x24840001, 0x3631021, 0xac453004,
76810x3631021, 0xac403000, 0x28820200, 0x1440fff9,
76820x418c0, 0x2021, 0x418c0, 0x24840001,
76830x3631021, 0xac402804, 0x3631021, 0xac402800,
76840x28820100, 0x1440fff9, 0x418c0, 0xaf80023c,
76850x24030080, 0x24040100, 0xac600000, 0x24630004,
76860x64102b, 0x5440fffd, 0xac600000, 0x8f830040,
76870x3c02f000, 0x621824, 0x3c025000, 0x1062000c,
76880x43102b, 0x14400006, 0x3c026000, 0x3c024000,
76890x10620008, 0x24020800, 0x10000008, 0x0,
76900x10620004, 0x24020800, 0x10000004, 0x0,
76910x24020700, 0x3c010001, 0xac226dac, 0x3e00008,
76920x0, 0x3c020001, 0x8c426dbc, 0x27bdffd0,
76930xafbf002c, 0xafb20028, 0xafb10024, 0xafb00020,
76940x3c010001, 0x10400005, 0xac206d94, 0xc004d9e,
76950x0, 0x3c010001, 0xac206dbc, 0x8f830054,
76960x8f820054, 0x10000002, 0x24630064, 0x8f820054,
76970x621023, 0x2c420065, 0x1440fffc, 0x0,
76980xc004db9, 0x0, 0x24040001, 0x2821,
76990x27a60018, 0x34028000, 0xc0045be, 0xa7a20018,
77000x8f830054, 0x8f820054, 0x10000002, 0x24630064,
77010x8f820054, 0x621023, 0x2c420065, 0x1440fffc,
77020x24040001, 0x24050001, 0xc00457c, 0x27a60018,
77030x8f830054, 0x8f820054, 0x10000002, 0x24630064,
77040x8f820054, 0x621023, 0x2c420065, 0x1440fffc,
77050x24040001, 0x24050001, 0xc00457c, 0x27a60018,
77060x8f830054, 0x8f820054, 0x10000002, 0x24630064,
77070x8f820054, 0x621023, 0x2c420065, 0x1440fffc,
77080x24040001, 0x3c060001, 0x24c66f24, 0xc00457c,
77090x24050002, 0x8f830054, 0x8f820054, 0x10000002,
77100x24630064, 0x8f820054, 0x621023, 0x2c420065,
77110x1440fffc, 0x24040001, 0x24050003, 0x3c100001,
77120x26106f26, 0xc00457c, 0x2003021, 0x97a60018,
77130x3c070001, 0x94e76f24, 0x3c040001, 0x24846ae0,
77140xafa00014, 0x96020000, 0x3c05000d, 0x34a50100,
77150xc002b3b, 0xafa20010, 0x97a20018, 0x1040004d,
77160x24036040, 0x96020000, 0x3042fff0, 0x1443000c,
77170x24020020, 0x3c030001, 0x94636f24, 0x1462000b,
77180x24027830, 0x24020003, 0x3c010001, 0xac226d94,
77190x24020005, 0x3c010001, 0x1000003f, 0xac226f34,
77200x3c030001, 0x94636f24, 0x24027830, 0x1462000c,
77210x24030010, 0x3c020001, 0x94426f26, 0x3042fff0,
77220x14430007, 0x24020003, 0x3c010001, 0xac226d94,
77230x24020006, 0x3c010001, 0x1000002f, 0xac226f34,
77240x3c020001, 0x8c426d94, 0x3c030001, 0x94636f24,
77250x34420001, 0x3c010001, 0xac226d94, 0x24020015,
77260x1462000b, 0x0, 0x3c020001, 0x94426f26,
77270x3042fff0, 0x3843f420, 0x2c630001, 0x3842f430,
77280x2c420001, 0x621825, 0x1460001b, 0x24020003,
77290x3c030001, 0x94636f24, 0x24027810, 0x14620016,
77300x24020002, 0x3c020001, 0x94426f26, 0x3042fff0,
77310x14400011, 0x24020002, 0x1000000f, 0x24020004,
77320x3c020001, 0x8c426d94, 0x34420008, 0x3c010001,
77330xac226d94, 0x1000005e, 0x24020004, 0x3c020001,
77340x8c426d94, 0x34420004, 0x3c010001, 0x100000af,
77350xac226d94, 0x24020001, 0x3c010001, 0xac226f40,
77360x3c020001, 0x8c426d94, 0x30420002, 0x144000b2,
77370x3c09fff0, 0x24020e00, 0xaf820238, 0x8f840054,
77380x8f820054, 0x24030008, 0x3c010001, 0xac236d98,
77390x10000002, 0x248401f4, 0x8f820054, 0x821023,
77400x2c4201f5, 0x1440fffc, 0x3c0200c8, 0x344201fb,
77410xaf820238, 0x8f830054, 0x8f820054, 0x10000002,
77420x246301f4, 0x8f820054, 0x621023, 0x2c4201f5,
77430x1440fffc, 0x8021, 0x24120001, 0x24110009,
77440xc004482, 0x0, 0x3c010001, 0xac326db4,
77450xc004547, 0x0, 0x3c020001, 0x8c426db4,
77460x1451fffb, 0x3c0200c8, 0x344201f6, 0xaf820238,
77470x8f830054, 0x8f820054, 0x10000002, 0x2463000a,
77480x8f820054, 0x621023, 0x2c42000b, 0x1440fffc,
77490x0, 0x8f820220, 0x24040001, 0x34420002,
77500xaf820220, 0x8f830200, 0x24057fff, 0x2402fffd,
77510x621824, 0xaf830200, 0xaf840204, 0x8f830054,
77520x8f820054, 0x10000002, 0x24630001, 0x8f820054,
77530x621023, 0x2c420002, 0x1440fffc, 0x0,
77540x8f820224, 0x14440005, 0x34028000, 0x42040,
77550xa4102b, 0x1040fff0, 0x34028000, 0x1082ffa0,
77560x26100001, 0x2e020014, 0x1440ffcd, 0x24020004,
77570x3c010001, 0xac226d98, 0x8021, 0x24120009,
77580x3c11ffff, 0x36313f7f, 0xc004482, 0x0,
77590x24020001, 0x3c010001, 0xac226db4, 0xc004547,
77600x0, 0x3c020001, 0x8c426db4, 0x1452fffb,
77610x0, 0x8f820044, 0x511024, 0x34425080,
77620xaf820044, 0x8f830054, 0x8f820054, 0x10000002,
77630x2463000a, 0x8f820054, 0x621023, 0x2c42000b,
77640x1440fffc, 0x0, 0x8f820044, 0x511024,
77650x3442f080, 0xaf820044, 0x8f830054, 0x8f820054,
77660x10000002, 0x2463000a, 0x8f820054, 0x621023,
77670x2c42000b, 0x1440fffc, 0x0, 0x8f820220,
77680x3c03f700, 0x431025, 0xaf820220, 0x8f830054,
77690x8f820054, 0x10000002, 0x24630064, 0x8f820054,
77700x621023, 0x2c420065, 0x1440fffc, 0x0,
77710x8f820220, 0x24040001, 0x34420002, 0xaf820220,
77720x8f830200, 0x24057fff, 0x2402fffd, 0x621824,
77730xaf830200, 0xaf840204, 0x8f830054, 0x8f820054,
77740x10000002, 0x24630001, 0x8f820054, 0x621023,
77750x2c420002, 0x1440fffc, 0x0, 0x8f820224,
77760x14440005, 0x34028000, 0x42040, 0xa4102b,
77770x1040fff0, 0x34028000, 0x1082ff50, 0x26100001,
77780x2e020064, 0x1440ffb0, 0x0, 0x3c020001,
77790x8c426d94, 0x30420004, 0x14400007, 0x3c09fff0,
77800x8f820044, 0x3c03ffff, 0x34633f7f, 0x431024,
77810xaf820044, 0x3c09fff0, 0x3529bdc0, 0x3c060001,
77820x8cc66d94, 0x3c040001, 0x24846ae0, 0x24020001,
77830x3c010001, 0xac226d9c, 0x8f820054, 0x3c070001,
77840x8ce76f40, 0x3c030001, 0x94636f24, 0x3c080001,
77850x95086f26, 0x3c05000d, 0x34a50100, 0x3c010001,
77860xac206d98, 0x491021, 0x3c010001, 0xac226f30,
77870xafa30010, 0xc002b3b, 0xafa80014, 0x8fbf002c,
77880x8fb20028, 0x8fb10024, 0x8fb00020, 0x3e00008,
77890x27bd0030, 0x27bdffe8, 0x3c050001, 0x8ca56d98,
77900x24060004, 0x24020001, 0x14a20014, 0xafbf0010,
77910x3c020002, 0x8c428ffc, 0x30428000, 0x10400005,
77920x3c04000f, 0x3c030001, 0x8c636f40, 0x10000005,
77930x34844240, 0x3c040004, 0x3c030001, 0x8c636f40,
77940x348493e0, 0x24020005, 0x14620016, 0x0,
77950x3c04003d, 0x10000013, 0x34840900, 0x3c020002,
77960x8c428ff8, 0x30428000, 0x10400005, 0x3c04001e,
77970x3c030001, 0x8c636f40, 0x10000005, 0x34848480,
77980x3c04000f, 0x3c030001, 0x8c636f40, 0x34844240,
77990x24020005, 0x14620003, 0x0, 0x3c04007a,
78000x34841200, 0x3c020001, 0x8c426f30, 0x8f830054,
78010x441021, 0x431023, 0x44102b, 0x1440004c,
78020x0, 0x3c020001, 0x8c426da0, 0x14400048,
78030x0, 0x3c010001, 0x10c00025, 0xac206db0,
78040x3c090001, 0x8d296d94, 0x24070001, 0x3c044000,
78050x3c080002, 0x25088ffc, 0x250afffc, 0x52842,
78060x14a00002, 0x24c6ffff, 0x24050008, 0xa91024,
78070x10400010, 0x0, 0x14a70008, 0x0,
78080x8d020000, 0x441024, 0x1040000a, 0x0,
78090x3c010001, 0x10000007, 0xac256db0, 0x8d420000,
78100x441024, 0x10400003, 0x0, 0x3c010001,
78110xac276db0, 0x3c020001, 0x8c426db0, 0x6182b,
78120x2c420001, 0x431024, 0x5440ffe5, 0x52842,
78130x8f820054, 0x3c030001, 0x8c636db0, 0x3c010001,
78140xac226f30, 0x1060003b, 0x24020005, 0x3c030001,
78150x8c636f40, 0x3c010001, 0xac256d98, 0x14620012,
78160x24020001, 0x3c020002, 0x8c428ff8, 0x3c032000,
78170x34635000, 0x431024, 0x14400006, 0x24020001,
78180x3c010001, 0xac206f1c, 0x3c010001, 0xac226d98,
78190x24020001, 0x3c010001, 0xac226e24, 0x3c010001,
78200xac226da4, 0x24020001, 0x3c010001, 0xac226d9c,
78210x3c020001, 0x8c426db0, 0x1040001e, 0x0,
78220x3c020001, 0x8c426d9c, 0x10400008, 0x24020001,
78230x3c010001, 0xac206d9c, 0xaee204b8, 0x3c010001,
78240xac206e1c, 0x3c010001, 0xac226dd4, 0x8ee304b8,
78250x24020008, 0x10620005, 0x24020001, 0xc004239,
78260x0, 0x1000000b, 0x0, 0x3c030001,
78270x8c636d98, 0x10620007, 0x2402000e, 0x3c030002,
78280x8c638f90, 0x10620003, 0x0, 0xc004e9c,
78290x8f840220, 0x8fbf0010, 0x3e00008, 0x27bd0018,
78300x27bdffe0, 0x3c03fdff, 0x3c040001, 0x8c846d98,
78310x3c020001, 0x8c426dc0, 0x3463ffff, 0x283a024,
78320x14820006, 0xafbf0018, 0x8ee304b8, 0x3c020001,
78330x8c426dc4, 0x10620006, 0x0, 0x8ee204b8,
78340x3c010001, 0xac246dc0, 0x3c010001, 0xac226dc4,
78350x3c030001, 0x8c636d98, 0x24020002, 0x1062019c,
78360x2c620003, 0x10400005, 0x24020001, 0x1062000a,
78370x0, 0x10000226, 0x0, 0x24020004,
78380x106200b6, 0x24020008, 0x1062010a, 0x24020001,
78390x1000021f, 0x0, 0x8ee204b8, 0x2443ffff,
78400x2c620008, 0x1040021c, 0x31080, 0x3c010001,
78410x220821, 0x8c226af8, 0x400008, 0x0,
78420x3c030001, 0x8c636f40, 0x24020005, 0x14620010,
78430x0, 0x3c020001, 0x8c426da4, 0x10400008,
78440x24020003, 0xc004482, 0x0, 0x24020002,
78450xaee204b8, 0x3c010001, 0x10000002, 0xac206da4,
78460xaee204b8, 0x3c010001, 0x10000203, 0xac206d30,
78470xc004482, 0x0, 0x3c020001, 0x8c426da4,
78480x3c010001, 0xac206d30, 0x1440017a, 0x24020002,
78490x1000019d, 0x24020007, 0x3c030001, 0x8c636f40,
78500x24020005, 0x14620003, 0x24020001, 0x3c010001,
78510xac226dd0, 0xc0045ff, 0x0, 0x3c030001,
78520x8c636dd0, 0x10000174, 0x24020011, 0x3c050001,
78530x8ca56d98, 0x3c060002, 0x8cc68ffc, 0xc005104,
78540x2021, 0x24020005, 0x3c010001, 0xac206da4,
78550x100001e1, 0xaee204b8, 0x3c040001, 0x24846aec,
78560x3c05000f, 0x34a50100, 0x3021, 0x3821,
78570xafa00010, 0xc002b3b, 0xafa00014, 0x100001d6,
78580x0, 0x8f820220, 0x3c030004, 0x431024,
78590x14400175, 0x24020007, 0x8f830054, 0x3c020001,
78600x8c426f28, 0x2463d8f0, 0x431023, 0x2c422710,
78610x14400003, 0x24020001, 0x3c010001, 0xac226d9c,
78620x3c020002, 0x8c428ffc, 0x30425000, 0x104001c2,
78630x0, 0x8f820220, 0x30428000, 0x1040017d,
78640x0, 0x10000175, 0x0, 0x3c050001,
78650x8ca56d98, 0xc00529b, 0x2021, 0xc00551b,
78660x2021, 0x3c030002, 0x8c638ff4, 0x46101b0,
78670x24020001, 0x3c020008, 0x621024, 0x10400006,
78680x0, 0x8f820214, 0x3c03ffff, 0x431024,
78690x10000005, 0x3442251f, 0x8f820214, 0x3c03ffff,
78700x431024, 0x3442241f, 0xaf820214, 0x8f820220,
78710x3c030200, 0x34420002, 0xaf820220, 0x24020008,
78720xaee204b8, 0x8f820220, 0x283a025, 0x3c030004,
78730x431024, 0x14400016, 0x0, 0x3c020002,
78740x8c428ffc, 0x30425000, 0x1040000d, 0x0,
78750x8f820220, 0x30428000, 0x10400006, 0x0,
78760x8f820220, 0x3c03ffff, 0x34637fff, 0x10000003,
78770x431024, 0x8f820220, 0x34428000, 0xaf820220,
78780x8f820220, 0x3c03f700, 0x431025, 0xaf820220,
78790x3c030001, 0x8c636f40, 0x24020005, 0x1462000a,
78800x0, 0x3c020001, 0x94426f26, 0x24429fbc,
78810x2c420004, 0x10400004, 0x24040018, 0x24050002,
78820xc004ddb, 0x24060020, 0xc003e6d, 0x0,
78830x3c010001, 0x10000170, 0xac206e20, 0x8ee204b8,
78840x2443ffff, 0x2c620008, 0x1040016b, 0x31080,
78850x3c010001, 0x220821, 0x8c226b18, 0x400008,
78860x0, 0xc004547, 0x0, 0x3c030001,
78870x8c636db4, 0x100000e8, 0x24020009, 0x3c020002,
78880x8c428ff8, 0x30424000, 0x10400004, 0x0,
78890x8f820044, 0x10000006, 0x3442f080, 0x8f820044,
78900x3c03ffff, 0x34633f7f, 0x431024, 0x3442a080,
78910xaf820044, 0x8f830054, 0x100000ea, 0x24020004,
78920x8f830054, 0x3c020001, 0x8c426f28, 0x2463d8f0,
78930x431023, 0x2c422710, 0x14400147, 0x24020005,
78940x100000d8, 0x0, 0x8f820220, 0x3c03f700,
78950x431025, 0xaf820220, 0xaf800204, 0x3c010002,
78960x100000d6, 0xac208fe0, 0x8f830054, 0x3c020001,
78970x8c426f28, 0x2463fff6, 0x431023, 0x2c42000a,
78980x14400135, 0x24020007, 0x100000d7, 0x0,
78990xc003f50, 0x0, 0x1040012d, 0x24020001,
79000x8f820214, 0x3c03ffff, 0x3c040001, 0x8c846f1c,
79010x431024, 0x3442251f, 0xaf820214, 0x24020008,
79020x10800005, 0xaee204b8, 0x3c020001, 0x8c426e44,
79030x10400064, 0x24020001, 0x8f820220, 0x3c030008,
79040x431024, 0x1040006a, 0x3c020200, 0x10000078,
79050x0, 0x8ee204b8, 0x2443ffff, 0x2c620007,
79060x10400115, 0x31080, 0x3c010001, 0x220821,
79070x8c226b38, 0x400008, 0x0, 0xc003daf,
79080x0, 0x3c010001, 0xac206d9c, 0xaf800204,
79090x3c010002, 0xc004482, 0xac208fe0, 0x24020001,
79100x3c010001, 0xac226db4, 0x24020002, 0x10000102,
79110xaee204b8, 0xc004547, 0x0, 0x3c030001,
79120x8c636db4, 0x10000084, 0x24020009, 0x3c020002,
79130x8c428ff8, 0x30424000, 0x10400003, 0x3c0200c8,
79140x10000002, 0x344201f6, 0x344201fe, 0xaf820238,
79150x8f830054, 0x1000008b, 0x24020004, 0x8f830054,
79160x3c020001, 0x8c426f28, 0x2463d8f0, 0x431023,
79170x2c422710, 0x144000e8, 0x24020005, 0x10000079,
79180x0, 0x8f820220, 0x3c03f700, 0x431025,
79190xaf820220, 0xaf800204, 0x3c010002, 0x10000077,
79200xac208fe0, 0x8f830054, 0x3c020001, 0x8c426f28,
79210x2463fff6, 0x431023, 0x2c42000a, 0x144000d6,
79220x24020007, 0x10000078, 0x0, 0xc003f50,
79230x0, 0x104000ce, 0x24020001, 0x8f820214,
79240x3c03ffff, 0x3c040001, 0x8c846f1c, 0x431024,
79250x3442251f, 0xaf820214, 0x24020008, 0x1080000f,
79260xaee204b8, 0x3c020001, 0x8c426e44, 0x1440000b,
79270x0, 0x8f820220, 0x34420002, 0xaf820220,
79280x24020001, 0x3c010002, 0xac228f90, 0xc004e9c,
79290x8f840220, 0x10000016, 0x0, 0x8f820220,
79300x3c030008, 0x431024, 0x14400011, 0x3c020200,
79310x282a025, 0x2402000e, 0x3c010002, 0xac228f90,
79320xc00551b, 0x2021, 0x8f820220, 0x34420002,
79330xc003e6d, 0xaf820220, 0x3c050001, 0x8ca56d98,
79340xc00529b, 0x2021, 0x100000a3, 0x0,
79350x3c020001, 0x8c426e44, 0x1040009f, 0x0,
79360x3c020001, 0x8c426e40, 0x2442ffff, 0x3c010001,
79370xac226e40, 0x14400098, 0x24020002, 0x3c010001,
79380xac206e44, 0x3c010001, 0x10000093, 0xac226e40,
79390x8ee204b8, 0x2443ffff, 0x2c620007, 0x1040008e,
79400x31080, 0x3c010001, 0x220821, 0x8c226b58,
79410x400008, 0x0, 0x3c020001, 0x8c426da4,
79420x10400018, 0x24020005, 0xc004482, 0x0,
79430x24020002, 0xaee204b8, 0x3c010001, 0x1000007e,
79440xac206da4, 0xc004963, 0x0, 0x3c030001,
79450x8c636dd4, 0x24020006, 0x14620077, 0x24020003,
79460x10000075, 0xaee204b8, 0x3c050001, 0x8ca56d98,
79470x3c060002, 0x8cc68ff8, 0xc005104, 0x2021,
79480x24020005, 0x1000006c, 0xaee204b8, 0x8f820220,
79490x3c03f700, 0x431025, 0xaf820220, 0x8f830054,
79500x24020006, 0xaee204b8, 0x3c010001, 0x10000062,
79510xac236f28, 0x8f820220, 0x3c030004, 0x431024,
79520x10400003, 0x24020007, 0x1000005b, 0xaee204b8,
79530x8f830054, 0x3c020001, 0x8c426f28, 0x2463d8f0,
79540x431023, 0x2c422710, 0x14400003, 0x24020001,
79550x3c010001, 0xac226d9c, 0x3c020002, 0x8c428ff8,
79560x30425000, 0x1040004c, 0x0, 0x8f820220,
79570x30428000, 0x10400007, 0x0, 0x8f820220,
79580x3c03ffff, 0x34637fff, 0x431024, 0x10000042,
79590xaf820220, 0x8f820220, 0x34428000, 0x1000003e,
79600xaf820220, 0x3c050001, 0x8ca56d98, 0xc00529b,
79610x2021, 0xc00551b, 0x2021, 0x3c020002,
79620x8c428ff0, 0x4410032, 0x24020001, 0x8f820214,
79630x3c03ffff, 0x431024, 0x3442251f, 0xaf820214,
79640x24020008, 0xaee204b8, 0x8f820220, 0x34420002,
79650xaf820220, 0x8f820220, 0x3c030004, 0x431024,
79660x14400016, 0x0, 0x3c020002, 0x8c428ff8,
79670x30425000, 0x1040000d, 0x0, 0x8f820220,
79680x30428000, 0x10400006, 0x0, 0x8f820220,
79690x3c03ffff, 0x34637fff, 0x10000003, 0x431024,
79700x8f820220, 0x34428000, 0xaf820220, 0x8f820220,
79710x3c03f700, 0x431025, 0xaf820220, 0x3c020001,
79720x94426f26, 0x24429fbc, 0x2c420004, 0x10400004,
79730x24040018, 0x24050002, 0xc004ddb, 0x24060020,
79740xc003e6d, 0x0, 0x10000003, 0x0,
79750x3c010001, 0xac226d9c, 0x8fbf0018, 0x3e00008,
79760x27bd0020, 0x8f820200, 0x8f820220, 0x8f820220,
79770x34420004, 0xaf820220, 0x8f820200, 0x3c050001,
79780x8ca56d98, 0x34420004, 0xaf820200, 0x24020002,
79790x10a2004b, 0x2ca20003, 0x10400005, 0x24020001,
79800x10a2000a, 0x0, 0x100000b1, 0x0,
79810x24020004, 0x10a20072, 0x24020008, 0x10a20085,
79820x3c02f0ff, 0x100000aa, 0x0, 0x8f830050,
79830x3c02f0ff, 0x3442ffff, 0x3c040001, 0x8c846f40,
79840x621824, 0x3c020700, 0x621825, 0x24020e00,
79850x2484fffb, 0x2c840002, 0xaf830050, 0xaf850200,
79860xaf850220, 0x14800006, 0xaf820238, 0x8f820044,
79870x3c03ffff, 0x34633f7f, 0x431024, 0xaf820044,
79880x3c030001, 0x8c636f40, 0x24020005, 0x14620004,
79890x0, 0x8f820044, 0x34425000, 0xaf820044,
79900x3c020001, 0x8c426d88, 0x3c030001, 0x8c636f40,
79910x34420022, 0x2463fffc, 0x2c630002, 0x1460000c,
79920xaf820200, 0x3c020001, 0x8c426dac, 0x3c030001,
79930x8c636d90, 0x3c040001, 0x8c846d8c, 0x34428000,
79940x621825, 0x641825, 0x1000000a, 0x34620002,
79950x3c020001, 0x8c426d90, 0x3c030001, 0x8c636dac,
79960x3c040001, 0x8c846d8c, 0x431025, 0x441025,
79970x34420002, 0xaf820220, 0x1000002f, 0x24020001,
79980x24020e01, 0xaf820238, 0x8f830050, 0x3c02f0ff,
79990x3442ffff, 0x3c040001, 0x8c846f1c, 0x621824,
80000x3c020d00, 0x621825, 0x24020001, 0xaf830050,
80010xaf820200, 0xaf820220, 0x10800005, 0x3c033f00,
80020x3c020001, 0x8c426d80, 0x10000004, 0x34630070,
80030x3c020001, 0x8c426d80, 0x34630072, 0x431025,
80040xaf820200, 0x3c030001, 0x8c636d84, 0x3c02f700,
80050x621825, 0x3c020001, 0x8c426d90, 0x3c040001,
80060x8c846dac, 0x3c050001, 0x8ca56f40, 0x431025,
80070x441025, 0xaf820220, 0x24020005, 0x14a20006,
80080x24020001, 0x8f820044, 0x2403afff, 0x431024,
80090xaf820044, 0x24020001, 0x1000003d, 0xaf820238,
80100x8f830050, 0x3c02f0ff, 0x3442ffff, 0x3c040001,
80110x8c846f1c, 0x621824, 0x3c020a00, 0x621825,
80120x24020001, 0xaf830050, 0xaf820200, 0x1080001e,
80130xaf820220, 0x3c020001, 0x8c426e44, 0x1440001a,
80140x3c033f00, 0x3c020001, 0x8c426d80, 0x1000001a,
80150x346300e0, 0x8f830050, 0x3c040001, 0x8c846f1c,
80160x3442ffff, 0x621824, 0x1080000f, 0xaf830050,
80170x3c020001, 0x8c426e44, 0x1440000b, 0x3c043f00,
80180x3c030001, 0x8c636d80, 0x348400e0, 0x24020001,
80190xaf820200, 0xaf820220, 0x641825, 0xaf830200,
80200x10000008, 0x3c05f700, 0x3c020001, 0x8c426d80,
80210x3c033f00, 0x346300e2, 0x431025, 0xaf820200,
80220x3c05f700, 0x34a58000, 0x3c030001, 0x8c636d84,
80230x3c020001, 0x8c426d90, 0x3c040001, 0x8c846dac,
80240x651825, 0x431025, 0x441025, 0xaf820220,
80250x3e00008, 0x0, 0x3c030001, 0x8c636db4,
80260x3c020001, 0x8c426db8, 0x10620003, 0x24020002,
80270x3c010001, 0xac236db8, 0x1062001d, 0x2c620003,
80280x10400025, 0x24020001, 0x14620023, 0x24020004,
80290x3c030001, 0x8c636d98, 0x10620006, 0x24020008,
80300x1462000c, 0x3c0200c8, 0x344201fb, 0x10000009,
80310xaf820238, 0x24020e01, 0xaf820238, 0x8f820044,
80320x3c03ffff, 0x34633f7f, 0x431024, 0x34420080,
80330xaf820044, 0x8f830054, 0x24020002, 0x3c010001,
80340xac226db4, 0x3c010001, 0x1000000b, 0xac236f2c,
80350x8f830054, 0x3c020001, 0x8c426f2c, 0x2463d8f0,
80360x431023, 0x2c422710, 0x14400003, 0x24020009,
80370x3c010001, 0xac226db4, 0x3e00008, 0x0,
80380x0, 0x0, 0x0, 0x27bdffd8,
80390xafb20018, 0x809021, 0xafb3001c, 0xa09821,
80400xafb10014, 0xc08821, 0xafb00010, 0x8021,
80410xafbf0020, 0xa6200000, 0xc004d78, 0x24040001,
80420x26100001, 0x2e020020, 0x1440fffb, 0x0,
80430xc004d78, 0x2021, 0xc004d78, 0x24040001,
80440xc004d78, 0x24040001, 0xc004d78, 0x2021,
80450x24100010, 0x2501024, 0x10400002, 0x2021,
80460x24040001, 0xc004d78, 0x108042, 0x1600fffa,
80470x2501024, 0x24100010, 0x2701024, 0x10400002,
80480x2021, 0x24040001, 0xc004d78, 0x108042,
80490x1600fffa, 0x2701024, 0xc004db9, 0x34108000,
80500xc004db9, 0x0, 0xc004d58, 0x0,
80510x50400005, 0x108042, 0x96220000, 0x501025,
80520xa6220000, 0x108042, 0x1600fff7, 0x0,
80530xc004db9, 0x0, 0x8fbf0020, 0x8fb3001c,
80540x8fb20018, 0x8fb10014, 0x8fb00010, 0x3e00008,
80550x27bd0028, 0x27bdffd8, 0xafb10014, 0x808821,
80560xafb20018, 0xa09021, 0xafb3001c, 0xc09821,
80570xafb00010, 0x8021, 0xafbf0020, 0xc004d78,
80580x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
80590x0, 0xc004d78, 0x2021, 0xc004d78,
80600x24040001, 0xc004d78, 0x2021, 0xc004d78,
80610x24040001, 0x24100010, 0x2301024, 0x10400002,
80620x2021, 0x24040001, 0xc004d78, 0x108042,
80630x1600fffa, 0x2301024, 0x24100010, 0x2501024,
80640x10400002, 0x2021, 0x24040001, 0xc004d78,
80650x108042, 0x1600fffa, 0x2501024, 0xc004d78,
80660x24040001, 0xc004d78, 0x2021, 0x34108000,
80670x96620000, 0x501024, 0x10400002, 0x2021,
80680x24040001, 0xc004d78, 0x108042, 0x1600fff8,
80690x0, 0xc004db9, 0x0, 0x8fbf0020,
80700x8fb3001c, 0x8fb20018, 0x8fb10014, 0x8fb00010,
80710x3e00008, 0x27bd0028, 0x3c040001, 0x8c846dd0,
80720x3c020001, 0x8c426e18, 0x27bdffd8, 0xafbf0020,
80730xafb1001c, 0x10820003, 0xafb00018, 0x3c010001,
80740xac246e18, 0x3c030001, 0x8c636f40, 0x24020005,
80750x14620005, 0x2483ffff, 0xc004963, 0x0,
80760x1000034c, 0x0, 0x2c620013, 0x10400349,
80770x31080, 0x3c010001, 0x220821, 0x8c226b80,
80780x400008, 0x0, 0xc004db9, 0x8021,
80790x34028000, 0xa7a20010, 0x27b10010, 0xc004d78,
80800x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
80810x0, 0xc004d78, 0x2021, 0xc004d78,
80820x24040001, 0xc004d78, 0x2021, 0xc004d78,
80830x24040001, 0x24100010, 0x32020001, 0x10400002,
80840x2021, 0x24040001, 0xc004d78, 0x108042,
80850x1600fffa, 0x32020001, 0x24100010, 0xc004d78,
80860x2021, 0x108042, 0x1600fffc, 0x0,
80870xc004d78, 0x24040001, 0xc004d78, 0x2021,
80880x34108000, 0x96220000, 0x501024, 0x10400002,
80890x2021, 0x24040001, 0xc004d78, 0x108042,
80900x1600fff8, 0x0, 0xc004db9, 0x0,
80910x1000030e, 0x24020002, 0x27b10010, 0xa7a00010,
80920x8021, 0xc004d78, 0x24040001, 0x26100001,
80930x2e020020, 0x1440fffb, 0x0, 0xc004d78,
80940x2021, 0xc004d78, 0x24040001, 0xc004d78,
80950x24040001, 0xc004d78, 0x2021, 0x24100010,
80960x32020001, 0x10400002, 0x2021, 0x24040001,
80970xc004d78, 0x108042, 0x1600fffa, 0x32020001,
80980x24100010, 0xc004d78, 0x2021, 0x108042,
80990x1600fffc, 0x0, 0xc004db9, 0x34108000,
81000xc004db9, 0x0, 0xc004d58, 0x0,
81010x50400005, 0x108042, 0x96220000, 0x501025,
81020xa6220000, 0x108042, 0x1600fff7, 0x0,
81030xc004db9, 0x0, 0x97a20010, 0x30428000,
81040x144002dc, 0x24020003, 0x100002d8, 0x0,
81050x24021200, 0xa7a20010, 0x27b10010, 0x8021,
81060xc004d78, 0x24040001, 0x26100001, 0x2e020020,
81070x1440fffb, 0x0, 0xc004d78, 0x2021,
81080xc004d78, 0x24040001, 0xc004d78, 0x2021,
81090xc004d78, 0x24040001, 0x24100010, 0x32020001,
81100x10400002, 0x2021, 0x24040001, 0xc004d78,
81110x108042, 0x1600fffa, 0x32020001, 0x24100010,
81120xc004d78, 0x2021, 0x108042, 0x1600fffc,
81130x0, 0xc004d78, 0x24040001, 0xc004d78,
81140x2021, 0x34108000, 0x96220000, 0x501024,
81150x10400002, 0x2021, 0x24040001, 0xc004d78,
81160x108042, 0x1600fff8, 0x0, 0xc004db9,
81170x0, 0x8f830054, 0x10000296, 0x24020004,
81180x8f830054, 0x3c020001, 0x8c426f3c, 0x2463ff9c,
81190x431023, 0x2c420064, 0x1440029e, 0x24020002,
81200x3c030001, 0x8c636f40, 0x10620297, 0x2c620003,
81210x14400296, 0x24020011, 0x24020003, 0x10620005,
81220x24020004, 0x10620291, 0x2402000f, 0x1000028f,
81230x24020011, 0x1000028d, 0x24020005, 0x24020014,
81240xa7a20010, 0x27b10010, 0x8021, 0xc004d78,
81250x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
81260x0, 0xc004d78, 0x2021, 0xc004d78,
81270x24040001, 0xc004d78, 0x2021, 0xc004d78,
81280x24040001, 0x24100010, 0x32020001, 0x10400002,
81290x2021, 0x24040001, 0xc004d78, 0x108042,
81300x1600fffa, 0x32020001, 0x24100010, 0x32020012,
81310x10400002, 0x2021, 0x24040001, 0xc004d78,
81320x108042, 0x1600fffa, 0x32020012, 0xc004d78,
81330x24040001, 0xc004d78, 0x2021, 0x34108000,
81340x96220000, 0x501024, 0x10400002, 0x2021,
81350x24040001, 0xc004d78, 0x108042, 0x1600fff8,
81360x0, 0xc004db9, 0x0, 0x8f830054,
81370x10000248, 0x24020006, 0x8f830054, 0x3c020001,
81380x8c426f3c, 0x2463ff9c, 0x431023, 0x2c420064,
81390x14400250, 0x24020007, 0x1000024c, 0x0,
81400x24020006, 0xa7a20010, 0x27b10010, 0x8021,
81410xc004d78, 0x24040001, 0x26100001, 0x2e020020,
81420x1440fffb, 0x0, 0xc004d78, 0x2021,
81430xc004d78, 0x24040001, 0xc004d78, 0x2021,
81440xc004d78, 0x24040001, 0x24100010, 0x32020001,
81450x10400002, 0x2021, 0x24040001, 0xc004d78,
81460x108042, 0x1600fffa, 0x32020001, 0x24100010,
81470x32020013, 0x10400002, 0x2021, 0x24040001,
81480xc004d78, 0x108042, 0x1600fffa, 0x32020013,
81490xc004d78, 0x24040001, 0xc004d78, 0x2021,
81500x34108000, 0x96220000, 0x501024, 0x10400002,
81510x2021, 0x24040001, 0xc004d78, 0x108042,
81520x1600fff8, 0x0, 0xc004db9, 0x0,
81530x8f830054, 0x10000207, 0x24020008, 0x8f830054,
81540x3c020001, 0x8c426f3c, 0x2463ff9c, 0x431023,
81550x2c420064, 0x1440020f, 0x24020009, 0x1000020b,
81560x0, 0x27b10010, 0xa7a00010, 0x8021,
81570xc004d78, 0x24040001, 0x26100001, 0x2e020020,
81580x1440fffb, 0x0, 0xc004d78, 0x2021,
81590xc004d78, 0x24040001, 0xc004d78, 0x24040001,
81600xc004d78, 0x2021, 0x24100010, 0x32020001,
81610x10400002, 0x2021, 0x24040001, 0xc004d78,
81620x108042, 0x1600fffa, 0x32020001, 0x24100010,
81630x32020018, 0x10400002, 0x2021, 0x24040001,
81640xc004d78, 0x108042, 0x1600fffa, 0x32020018,
81650xc004db9, 0x34108000, 0xc004db9, 0x0,
81660xc004d58, 0x0, 0x50400005, 0x108042,
81670x96220000, 0x501025, 0xa6220000, 0x108042,
81680x1600fff7, 0x0, 0xc004db9, 0x8021,
81690x97a20010, 0x27b10010, 0x34420001, 0xa7a20010,
81700xc004d78, 0x24040001, 0x26100001, 0x2e020020,
81710x1440fffb, 0x0, 0xc004d78, 0x2021,
81720xc004d78, 0x24040001, 0xc004d78, 0x2021,
81730xc004d78, 0x24040001, 0x24100010, 0x32020001,
81740x10400002, 0x2021, 0x24040001, 0xc004d78,
81750x108042, 0x1600fffa, 0x32020001, 0x24100010,
81760x32020018, 0x10400002, 0x2021, 0x24040001,
81770xc004d78, 0x108042, 0x1600fffa, 0x32020018,
81780xc004d78, 0x24040001, 0xc004d78, 0x2021,
81790x34108000, 0x96220000, 0x501024, 0x10400002,
81800x2021, 0x24040001, 0xc004d78, 0x108042,
81810x1600fff8, 0x0, 0xc004db9, 0x0,
81820x8f830054, 0x10000193, 0x2402000a, 0x8f830054,
81830x3c020001, 0x8c426f3c, 0x2463ff9c, 0x431023,
81840x2c420064, 0x1440019b, 0x2402000b, 0x10000197,
81850x0, 0x27b10010, 0xa7a00010, 0x8021,
81860xc004d78, 0x24040001, 0x26100001, 0x2e020020,
81870x1440fffb, 0x0, 0xc004d78, 0x2021,
81880xc004d78, 0x24040001, 0xc004d78, 0x24040001,
81890xc004d78, 0x2021, 0x24100010, 0x32020001,
81900x10400002, 0x2021, 0x24040001, 0xc004d78,
81910x108042, 0x1600fffa, 0x32020001, 0x24100010,
81920x32020017, 0x10400002, 0x2021, 0x24040001,
81930xc004d78, 0x108042, 0x1600fffa, 0x32020017,
81940xc004db9, 0x34108000, 0xc004db9, 0x0,
81950xc004d58, 0x0, 0x50400005, 0x108042,
81960x96220000, 0x501025, 0xa6220000, 0x108042,
81970x1600fff7, 0x0, 0xc004db9, 0x8021,
81980x97a20010, 0x27b10010, 0x34420700, 0xa7a20010,
81990xc004d78, 0x24040001, 0x26100001, 0x2e020020,
82000x1440fffb, 0x0, 0xc004d78, 0x2021,
82010xc004d78, 0x24040001, 0xc004d78, 0x2021,
82020xc004d78, 0x24040001, 0x24100010, 0x32020001,
82030x10400002, 0x2021, 0x24040001, 0xc004d78,
82040x108042, 0x1600fffa, 0x32020001, 0x24100010,
82050x32020017, 0x10400002, 0x2021, 0x24040001,
82060xc004d78, 0x108042, 0x1600fffa, 0x32020017,
82070xc004d78, 0x24040001, 0xc004d78, 0x2021,
82080x34108000, 0x96220000, 0x501024, 0x10400002,
82090x2021, 0x24040001, 0xc004d78, 0x108042,
82100x1600fff8, 0x0, 0xc004db9, 0x0,
82110x8f830054, 0x1000011f, 0x2402000c, 0x8f830054,
82120x3c020001, 0x8c426f3c, 0x2463ff9c, 0x431023,
82130x2c420064, 0x14400127, 0x24020012, 0x10000123,
82140x0, 0x27b10010, 0xa7a00010, 0x8021,
82150xc004d78, 0x24040001, 0x26100001, 0x2e020020,
82160x1440fffb, 0x0, 0xc004d78, 0x2021,
82170xc004d78, 0x24040001, 0xc004d78, 0x24040001,
82180xc004d78, 0x2021, 0x24100010, 0x32020001,
82190x10400002, 0x2021, 0x24040001, 0xc004d78,
82200x108042, 0x1600fffa, 0x32020001, 0x24100010,
82210x32020014, 0x10400002, 0x2021, 0x24040001,
82220xc004d78, 0x108042, 0x1600fffa, 0x32020014,
82230xc004db9, 0x34108000, 0xc004db9, 0x0,
82240xc004d58, 0x0, 0x50400005, 0x108042,
82250x96220000, 0x501025, 0xa6220000, 0x108042,
82260x1600fff7, 0x0, 0xc004db9, 0x8021,
82270x97a20010, 0x27b10010, 0x34420010, 0xa7a20010,
82280xc004d78, 0x24040001, 0x26100001, 0x2e020020,
82290x1440fffb, 0x0, 0xc004d78, 0x2021,
82300xc004d78, 0x24040001, 0xc004d78, 0x2021,
82310xc004d78, 0x24040001, 0x24100010, 0x32020001,
82320x10400002, 0x2021, 0x24040001, 0xc004d78,
82330x108042, 0x1600fffa, 0x32020001, 0x24100010,
82340x32020014, 0x10400002, 0x2021, 0x24040001,
82350xc004d78, 0x108042, 0x1600fffa, 0x32020014,
82360xc004d78, 0x24040001, 0xc004d78, 0x2021,
82370x34108000, 0x96220000, 0x501024, 0x10400002,
82380x2021, 0x24040001, 0xc004d78, 0x108042,
82390x1600fff8, 0x0, 0xc004db9, 0x0,
82400x8f830054, 0x100000ab, 0x24020013, 0x8f830054,
82410x3c020001, 0x8c426f3c, 0x2463ff9c, 0x431023,
82420x2c420064, 0x144000b3, 0x2402000d, 0x100000af,
82430x0, 0x27b10010, 0xa7a00010, 0x8021,
82440xc004d78, 0x24040001, 0x26100001, 0x2e020020,
82450x1440fffb, 0x0, 0xc004d78, 0x2021,
82460xc004d78, 0x24040001, 0xc004d78, 0x24040001,
82470xc004d78, 0x2021, 0x24100010, 0x32020001,
82480x10400002, 0x2021, 0x24040001, 0xc004d78,
82490x108042, 0x1600fffa, 0x32020001, 0x24100010,
82500x32020018, 0x10400002, 0x2021, 0x24040001,
82510xc004d78, 0x108042, 0x1600fffa, 0x32020018,
82520xc004db9, 0x34108000, 0xc004db9, 0x0,
82530xc004d58, 0x0, 0x50400005, 0x108042,
82540x96220000, 0x501025, 0xa6220000, 0x108042,
82550x1600fff7, 0x0, 0xc004db9, 0x8021,
82560x97a20010, 0x27b10010, 0x3042fffe, 0xa7a20010,
82570xc004d78, 0x24040001, 0x26100001, 0x2e020020,
82580x1440fffb, 0x0, 0xc004d78, 0x2021,
82590xc004d78, 0x24040001, 0xc004d78, 0x2021,
82600xc004d78, 0x24040001, 0x24100010, 0x32020001,
82610x10400002, 0x2021, 0x24040001, 0xc004d78,
82620x108042, 0x1600fffa, 0x32020001, 0x24100010,
82630x32020018, 0x10400002, 0x2021, 0x24040001,
82640xc004d78, 0x108042, 0x1600fffa, 0x32020018,
82650xc004d78, 0x24040001, 0xc004d78, 0x2021,
82660x34108000, 0x96220000, 0x501024, 0x10400002,
82670x2021, 0x24040001, 0xc004d78, 0x108042,
82680x1600fff8, 0x0, 0xc004db9, 0x0,
82690x8f830054, 0x10000037, 0x2402000e, 0x24020840,
82700xa7a20010, 0x27b10010, 0x8021, 0xc004d78,
82710x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
82720x0, 0xc004d78, 0x2021, 0xc004d78,
82730x24040001, 0xc004d78, 0x2021, 0xc004d78,
82740x24040001, 0x24100010, 0x32020001, 0x10400002,
82750x2021, 0x24040001, 0xc004d78, 0x108042,
82760x1600fffa, 0x32020001, 0x24100010, 0x32020013,
82770x10400002, 0x2021, 0x24040001, 0xc004d78,
82780x108042, 0x1600fffa, 0x32020013, 0xc004d78,
82790x24040001, 0xc004d78, 0x2021, 0x34108000,
82800x96220000, 0x501024, 0x10400002, 0x2021,
82810x24040001, 0xc004d78, 0x108042, 0x1600fff8,
82820x0, 0xc004db9, 0x0, 0x8f830054,
82830x24020010, 0x3c010001, 0xac226dd0, 0x3c010001,
82840x1000000c, 0xac236f3c, 0x8f830054, 0x3c020001,
82850x8c426f3c, 0x2463ff9c, 0x431023, 0x2c420064,
82860x14400004, 0x0, 0x24020011, 0x3c010001,
82870xac226dd0, 0x8fbf0020, 0x8fb1001c, 0x8fb00018,
82880x3e00008, 0x27bd0028, 0x3c030001, 0x8c636d98,
82890x27bdffc8, 0x24020002, 0xafbf0034, 0xafb20030,
82900xafb1002c, 0x14620004, 0xafb00028, 0x3c120002,
82910x10000003, 0x8e528ff8, 0x3c120002, 0x8e528ffc,
82920x3c030001, 0x8c636dd4, 0x3c020001, 0x8c426e1c,
82930x50620004, 0x2463ffff, 0x3c010001, 0xac236e1c,
82940x2463ffff, 0x2c620006, 0x10400377, 0x31080,
82950x3c010001, 0x220821, 0x8c226bd8, 0x400008,
82960x0, 0x2021, 0x2821, 0xc004ddb,
82970x34068000, 0x24040010, 0x24050002, 0x24060002,
82980x24020002, 0xc004ddb, 0xa7a20018, 0x24020002,
82990x3c010001, 0x10000364, 0xac226dd4, 0x27b10018,
83000xa7a00018, 0x8021, 0xc004d78, 0x24040001,
83010x26100001, 0x2e020020, 0x1440fffb, 0x0,
83020xc004d78, 0x2021, 0xc004d78, 0x24040001,
83030xc004d78, 0x24040001, 0xc004d78, 0x2021,
83040x24100010, 0x32020001, 0x10400002, 0x2021,
83050x24040001, 0xc004d78, 0x108042, 0x1600fffa,
83060x32020001, 0x24100010, 0xc004d78, 0x2021,
83070x108042, 0x1600fffc, 0x0, 0xc004db9,
83080x34108000, 0xc004db9, 0x0, 0xc004d58,
83090x0, 0x50400005, 0x108042, 0x96220000,
83100x501025, 0xa6220000, 0x108042, 0x1600fff7,
83110x0, 0xc004db9, 0x0, 0x97a20018,
83120x30428000, 0x14400004, 0x24020003, 0x3c010001,
83130xac226dd4, 0x24020003, 0x3c010001, 0x1000032a,
83140xac226dd4, 0x24040010, 0x24050002, 0x24060002,
83150x24020002, 0xc004ddb, 0xa7a20018, 0x3c030001,
83160x8c636e20, 0x24020001, 0x146201e1, 0x8021,
83170x27b10018, 0xa7a00018, 0xc004d78, 0x24040001,
83180x26100001, 0x2e020020, 0x1440fffb, 0x0,
83190xc004d78, 0x2021, 0xc004d78, 0x24040001,
83200xc004d78, 0x24040001, 0xc004d78, 0x2021,
83210x24100010, 0x32020001, 0x10400002, 0x2021,
83220x24040001, 0xc004d78, 0x108042, 0x1600fffa,
83230x32020001, 0x24100010, 0x32020018, 0x10400002,
83240x2021, 0x24040001, 0xc004d78, 0x108042,
83250x1600fffa, 0x32020018, 0xc004db9, 0x34108000,
83260xc004db9, 0x0, 0xc004d58, 0x0,
83270x50400005, 0x108042, 0x96220000, 0x501025,
83280xa6220000, 0x108042, 0x1600fff7, 0x0,
83290xc004db9, 0x8021, 0x27b10018, 0xa7a00018,
83300xc004d78, 0x24040001, 0x26100001, 0x2e020020,
83310x1440fffb, 0x0, 0xc004d78, 0x2021,
83320xc004d78, 0x24040001, 0xc004d78, 0x24040001,
83330xc004d78, 0x2021, 0x24100010, 0x32020001,
83340x10400002, 0x2021, 0x24040001, 0xc004d78,
83350x108042, 0x1600fffa, 0x32020001, 0x24100010,
83360x32020018, 0x10400002, 0x2021, 0x24040001,
83370xc004d78, 0x108042, 0x1600fffa, 0x32020018,
83380xc004db9, 0x34108000, 0xc004db9, 0x0,
83390xc004d58, 0x0, 0x50400005, 0x108042,
83400x96220000, 0x501025, 0xa6220000, 0x108042,
83410x1600fff7, 0x0, 0xc004db9, 0x8021,
83420x24040018, 0x2821, 0xc004ddb, 0x24060404,
83430xa7a0001a, 0xc004d78, 0x24040001, 0x26100001,
83440x2e020020, 0x1440fffb, 0x0, 0xc004d78,
83450x2021, 0xc004d78, 0x24040001, 0xc004d78,
83460x24040001, 0xc004d78, 0x2021, 0x24100010,
83470x32020001, 0x10400002, 0x2021, 0x24040001,
83480xc004d78, 0x108042, 0x1600fffa, 0x32020001,
83490x24100010, 0x32020018, 0x10400002, 0x2021,
83500x24040001, 0xc004d78, 0x108042, 0x1600fffa,
83510x32020018, 0xc004db9, 0x34108000, 0xc004db9,
83520x0, 0xc004d58, 0x0, 0x50400005,
83530x108042, 0x97a2001a, 0x501025, 0xa7a2001a,
83540x108042, 0x1600fff7, 0x0, 0xc004db9,
83550x8021, 0xa7a0001a, 0xc004d78, 0x24040001,
83560x26100001, 0x2e020020, 0x1440fffb, 0x0,
83570xc004d78, 0x2021, 0xc004d78, 0x24040001,
83580xc004d78, 0x24040001, 0xc004d78, 0x2021,
83590x24100010, 0x32020001, 0x10400002, 0x2021,
83600x24040001, 0xc004d78, 0x108042, 0x1600fffa,
83610x32020001, 0x24100010, 0x32020018, 0x10400002,
83620x2021, 0x24040001, 0xc004d78, 0x108042,
83630x1600fffa, 0x32020018, 0xc004db9, 0x34108000,
83640xc004db9, 0x0, 0xc004d58, 0x0,
83650x50400005, 0x108042, 0x97a2001a, 0x501025,
83660xa7a2001a, 0x108042, 0x1600fff7, 0x0,
83670xc004db9, 0x8021, 0xa7a0001c, 0xc004d78,
83680x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
83690x0, 0xc004d78, 0x2021, 0xc004d78,
83700x24040001, 0xc004d78, 0x24040001, 0xc004d78,
83710x2021, 0x24100010, 0xc004d78, 0x2021,
83720x108042, 0x1600fffc, 0x0, 0x24100010,
83730x3202001e, 0x10400002, 0x2021, 0x24040001,
83740xc004d78, 0x108042, 0x1600fffa, 0x3202001e,
83750xc004db9, 0x34108000, 0xc004db9, 0x0,
83760xc004d58, 0x0, 0x50400005, 0x108042,
83770x97a2001c, 0x501025, 0xa7a2001c, 0x108042,
83780x1600fff7, 0x0, 0xc004db9, 0x8021,
83790xa7a0001c, 0xc004d78, 0x24040001, 0x26100001,
83800x2e020020, 0x1440fffb, 0x0, 0xc004d78,
83810x2021, 0xc004d78, 0x24040001, 0xc004d78,
83820x24040001, 0xc004d78, 0x2021, 0x24100010,
83830xc004d78, 0x2021, 0x108042, 0x1600fffc,
83840x0, 0x24100010, 0x3202001e, 0x10400002,
83850x2021, 0x24040001, 0xc004d78, 0x108042,
83860x1600fffa, 0x3202001e, 0xc004db9, 0x34108000,
83870xc004db9, 0x0, 0xc004d58, 0x0,
83880x50400005, 0x108042, 0x97a2001c, 0x501025,
83890xa7a2001c, 0x108042, 0x1600fff7, 0x0,
83900xc004db9, 0x8021, 0x24020002, 0xa7a2001e,
83910xc004d78, 0x24040001, 0x26100001, 0x2e020020,
83920x1440fffb, 0x0, 0xc004d78, 0x2021,
83930xc004d78, 0x24040001, 0xc004d78, 0x2021,
83940xc004d78, 0x24040001, 0x24100010, 0xc004d78,
83950x2021, 0x108042, 0x1600fffc, 0x0,
83960x24100010, 0x3202001e, 0x10400002, 0x2021,
83970x24040001, 0xc004d78, 0x108042, 0x1600fffa,
83980x3202001e, 0xc004d78, 0x24040001, 0xc004d78,
83990x2021, 0x34108000, 0x97a2001e, 0x501024,
84000x10400002, 0x2021, 0x24040001, 0xc004d78,
84010x108042, 0x1600fff8, 0x0, 0xc004db9,
84020x8021, 0xa7a00020, 0xc004d78, 0x24040001,
84030x26100001, 0x2e020020, 0x1440fffb, 0x0,
84040xc004d78, 0x2021, 0xc004d78, 0x24040001,
84050xc004d78, 0x24040001, 0xc004d78, 0x2021,
84060x24100010, 0xc004d78, 0x2021, 0x108042,
84070x1600fffc, 0x0, 0x24100010, 0x3202001e,
84080x10400002, 0x2021, 0x24040001, 0xc004d78,
84090x108042, 0x1600fffa, 0x3202001e, 0xc004db9,
84100x34108000, 0xc004db9, 0x0, 0xc004d58,
84110x0, 0x50400005, 0x108042, 0x97a20020,
84120x501025, 0xa7a20020, 0x108042, 0x1600fff7,
84130x0, 0xc004db9, 0x8021, 0xa7a00020,
84140xc004d78, 0x24040001, 0x26100001, 0x2e020020,
84150x1440fffb, 0x0, 0xc004d78, 0x2021,
84160xc004d78, 0x24040001, 0xc004d78, 0x24040001,
84170xc004d78, 0x2021, 0x24100010, 0xc004d78,
84180x2021, 0x108042, 0x1600fffc, 0x0,
84190x24100010, 0x3202001e, 0x10400002, 0x2021,
84200x24040001, 0xc004d78, 0x108042, 0x1600fffa,
84210x3202001e, 0xc004db9, 0x34108000, 0xc004db9,
84220x0, 0xc004d58, 0x0, 0x50400005,
84230x108042, 0x97a20020, 0x501025, 0xa7a20020,
84240x108042, 0x1600fff7, 0x0, 0xc004db9,
84250x8021, 0xa7a00022, 0xc004d78, 0x24040001,
84260x26100001, 0x2e020020, 0x1440fffb, 0x0,
84270xc004d78, 0x2021, 0xc004d78, 0x24040001,
84280xc004d78, 0x2021, 0xc004d78, 0x24040001,
84290x24100010, 0xc004d78, 0x2021, 0x108042,
84300x1600fffc, 0x0, 0x24100010, 0xc004d78,
84310x2021, 0x108042, 0x1600fffc, 0x0,
84320xc004d78, 0x24040001, 0xc004d78, 0x2021,
84330x34108000, 0x97a20022, 0x501024, 0x10400002,
84340x2021, 0x24040001, 0xc004d78, 0x108042,
84350x1600fff8, 0x0, 0xc004db9, 0x0,
84360x24040018, 0x24050002, 0xc004ddb, 0x24060004,
84370x3c100001, 0x8e106e24, 0x24020001, 0x1602011d,
84380x0, 0x3c020001, 0x94426f26, 0x3c010001,
84390xac206e24, 0x24429fbc, 0x2c420004, 0x1040000c,
84400x24040009, 0x24050001, 0xc004ddb, 0x24060400,
84410x24040018, 0x24050001, 0xc004ddb, 0x24060020,
84420x24040018, 0x24050001, 0xc004ddb, 0x24062000,
84430x3c024000, 0x2421024, 0x10400123, 0x3c022000,
84440x2421024, 0x10400004, 0x0, 0x3c010001,
84450x10000003, 0xac306f1c, 0x3c010001, 0xac206f1c,
84460x3c030001, 0x8c636f34, 0x24020005, 0x146200f9,
84470x0, 0x3c020001, 0x8c426f1c, 0x10400067,
84480x3c020004, 0x2421024, 0x10400011, 0xa7a00018,
84490x3c020008, 0x2421024, 0x10400002, 0x24020200,
84500xa7a20018, 0x3c020010, 0x2421024, 0x10400004,
84510x0, 0x97a20018, 0x34420100, 0xa7a20018,
84520x97a60018, 0x24040009, 0x10000004, 0x2821,
84530x24040009, 0x2821, 0x3021, 0xc004ddb,
84540x0, 0x24020001, 0xa7a2001a, 0x3c020008,
84550x2421024, 0x1040000c, 0x3c020002, 0x2421024,
84560x10400002, 0x24020101, 0xa7a2001a, 0x3c020001,
84570x2421024, 0x10400005, 0x3c020010, 0x97a2001a,
84580x34420040, 0xa7a2001a, 0x3c020010, 0x2421024,
84590x1040000e, 0x3c020002, 0x2421024, 0x10400005,
84600x3c020001, 0x97a2001a, 0x34420080, 0xa7a2001a,
84610x3c020001, 0x2421024, 0x10400005, 0x3c0300a0,
84620x97a2001a, 0x34420020, 0xa7a2001a, 0x3c0300a0,
84630x2431024, 0x54430004, 0x3c020020, 0x97a2001a,
84640x1000000c, 0x34420400, 0x2421024, 0x50400004,
84650x3c020080, 0x97a2001a, 0x10000006, 0x34420800,
84660x2421024, 0x10400004, 0x0, 0x97a2001a,
84670x34420c00, 0xa7a2001a, 0x97a6001a, 0x24040004,
84680xc004ddb, 0x2821, 0x3c020004, 0x2421024,
84690x10400004, 0xa7a0001c, 0x32425000, 0x14400004,
84700x0, 0x32424000, 0x10400005, 0x2021,
84710xc004cf9, 0x2402021, 0x10000096, 0x0,
84720x97a6001c, 0x2821, 0x34c61200, 0xc004ddb,
84730xa7a6001c, 0x1000008f, 0x0, 0x2421024,
84740x10400004, 0xa7a00018, 0x32425000, 0x14400004,
84750x0, 0x32424000, 0x10400005, 0x3c020010,
84760xc004cf9, 0x2402021, 0x10000019, 0xa7a0001a,
84770x2421024, 0x10400004, 0x0, 0x97a20018,
84780x10000004, 0xa7a20018, 0x97a20018, 0x34420100,
84790xa7a20018, 0x3c020001, 0x2421024, 0x10400004,
84800x0, 0x97a20018, 0x10000004, 0xa7a20018,
84810x97a20018, 0x34422000, 0xa7a20018, 0x97a60018,
84820x2021, 0xc004ddb, 0x2821, 0xa7a0001a,
84830x8021, 0xc004d78, 0x24040001, 0x26100001,
84840x2e020020, 0x1440fffb, 0x0, 0xc004d78,
84850x2021, 0xc004d78, 0x24040001, 0xc004d78,
84860x24040001, 0xc004d78, 0x2021, 0x24100010,
84870x32020001, 0x10400002, 0x2021, 0x24040001,
84880xc004d78, 0x108042, 0x1600fffa, 0x32020001,
84890x24100010, 0xc004d78, 0x2021, 0x108042,
84900x1600fffc, 0x0, 0xc004db9, 0x34108000,
84910xc004db9, 0x0, 0xc004d58, 0x0,
84920x50400005, 0x108042, 0x97a2001a, 0x501025,
84930xa7a2001a, 0x108042, 0x1600fff7, 0x0,
84940xc004db9, 0x8021, 0xa7a0001a, 0xc004d78,
84950x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
84960x0, 0xc004d78, 0x2021, 0xc004d78,
84970x24040001, 0xc004d78, 0x24040001, 0xc004d78,
84980x2021, 0x24100010, 0x32020001, 0x10400002,
84990x2021, 0x24040001, 0xc004d78, 0x108042,
85000x1600fffa, 0x32020001, 0x24100010, 0xc004d78,
85010x2021, 0x108042, 0x1600fffc, 0x0,
85020xc004db9, 0x34108000, 0xc004db9, 0x0,
85030xc004d58, 0x0, 0x50400005, 0x108042,
85040x97a2001a, 0x501025, 0xa7a2001a, 0x108042,
85050x1600fff7, 0x0, 0xc004db9, 0x0,
85060x3c040001, 0x24846bcc, 0x97a60018, 0x97a7001a,
85070x3c020001, 0x8c426d98, 0x3c030001, 0x8c636f1c,
85080x3c05000d, 0x34a50205, 0xafa20010, 0xc002b3b,
85090xafa30014, 0x8f830054, 0x24020004, 0x3c010001,
85100xac226dd4, 0x3c010001, 0x10000017, 0xac236f38,
85110x8f830054, 0x3c020001, 0x8c426f38, 0x2463ff9c,
85120x431023, 0x2c420064, 0x1440000f, 0x0,
85130x8f820220, 0x24030005, 0x3c010001, 0xac236dd4,
85140x3c03f700, 0x431025, 0x10000007, 0xaf820220,
85150x24020006, 0x3c010001, 0xac226dd4, 0x24020011,
85160x3c010001, 0xac226dd0, 0x8fbf0034, 0x8fb20030,
85170x8fb1002c, 0x8fb00028, 0x3e00008, 0x27bd0038,
85180x27bdffd8, 0xafb00018, 0x808021, 0xafb1001c,
85190x8821, 0x32024000, 0x10400013, 0xafbf0020,
85200x3c020010, 0x2021024, 0x2c420001, 0x21023,
85210x30434100, 0x3c020001, 0x2021024, 0x14400006,
85220x34714000, 0x3c020002, 0x2021024, 0x14400002,
85230x34716000, 0x34714040, 0x2021, 0x2821,
85240x10000036, 0x2203021, 0x32021000, 0x10400035,
85250x2021, 0x2821, 0xc004ddb, 0x24060040,
85260x24040018, 0x2821, 0xc004ddb, 0x24060c00,
85270x24040017, 0x2821, 0xc004ddb, 0x24060400,
85280x24040016, 0x2821, 0xc004ddb, 0x24060006,
85290x24040017, 0x2821, 0xc004ddb, 0x24062500,
85300x24040016, 0x2821, 0xc004ddb, 0x24060006,
85310x24040017, 0x2821, 0xc004ddb, 0x24064600,
85320x24040016, 0x2821, 0xc004ddb, 0x24060006,
85330x24040017, 0x2821, 0xc004ddb, 0x24066700,
85340x24040016, 0x2821, 0xc004ddb, 0x24060006,
85350x2404001f, 0x2821, 0xc004ddb, 0x24060010,
85360x24040009, 0x2821, 0xc004ddb, 0x24061500,
85370x24040009, 0x2821, 0x24061d00, 0xc004ddb,
85380x0, 0x3c040001, 0x24846bf0, 0x3c05000e,
85390x34a50100, 0x2003021, 0x2203821, 0xafa00010,
85400xc002b3b, 0xafa00014, 0x8fbf0020, 0x8fb1001c,
85410x8fb00018, 0x3e00008, 0x27bd0028, 0x8f850044,
85420x8f820044, 0x3c030001, 0x431025, 0x3c030008,
85430xaf820044, 0x8f840054, 0x8f820054, 0xa32824,
85440x10000002, 0x24840001, 0x8f820054, 0x821023,
85450x2c420002, 0x1440fffc, 0x0, 0x8f820044,
85460x3c03fffe, 0x3463ffff, 0x431024, 0xaf820044,
85470x8f830054, 0x8f820054, 0x10000002, 0x24630001,
85480x8f820054, 0x621023, 0x2c420002, 0x1440fffc,
85490x0, 0x3e00008, 0xa01021, 0x8f830044,
85500x3c02fff0, 0x3442ffff, 0x42480, 0x621824,
85510x3c020002, 0x822025, 0x641825, 0xaf830044,
85520x8f820044, 0x3c03fffe, 0x3463ffff, 0x431024,
85530xaf820044, 0x8f830054, 0x8f820054, 0x10000002,
85540x24630001, 0x8f820054, 0x621023, 0x2c420002,
85550x1440fffc, 0x0, 0x8f820044, 0x3c030001,
85560x431025, 0xaf820044, 0x8f830054, 0x8f820054,
85570x10000002, 0x24630001, 0x8f820054, 0x621023,
85580x2c420002, 0x1440fffc, 0x0, 0x3e00008,
85590x0, 0x8f820044, 0x2403ff7f, 0x431024,
85600xaf820044, 0x8f830054, 0x8f820054, 0x10000002,
85610x24630001, 0x8f820054, 0x621023, 0x2c420002,
85620x1440fffc, 0x0, 0x8f820044, 0x34420080,
85630xaf820044, 0x8f830054, 0x8f820054, 0x10000002,
85640x24630001, 0x8f820054, 0x621023, 0x2c420002,
85650x1440fffc, 0x0, 0x3e00008, 0x0,
85660x8f820044, 0x3c03fff0, 0x3463ffff, 0x431024,
85670xaf820044, 0x8f820044, 0x3c030001, 0x431025,
85680xaf820044, 0x8f830054, 0x8f820054, 0x10000002,
85690x24630001, 0x8f820054, 0x621023, 0x2c420002,
85700x1440fffc, 0x0, 0x8f820044, 0x3c03fffe,
85710x3463ffff, 0x431024, 0xaf820044, 0x8f830054,
85720x8f820054, 0x10000002, 0x24630001, 0x8f820054,
85730x621023, 0x2c420002, 0x1440fffc, 0x0,
85740x3e00008, 0x0, 0x27bdffc8, 0xafb30024,
85750x809821, 0xafbe002c, 0xa0f021, 0xafb20020,
85760xc09021, 0x33c2ffff, 0xafbf0030, 0xafb50028,
85770xafb1001c, 0xafb00018, 0x14400034, 0xa7b20010,
85780x3271ffff, 0x27b20010, 0x8021, 0xc004d78,
85790x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
85800x0, 0xc004d78, 0x2021, 0xc004d78,
85810x24040001, 0xc004d78, 0x2021, 0xc004d78,
85820x24040001, 0x24100010, 0x32020001, 0x10400002,
85830x2021, 0x24040001, 0xc004d78, 0x108042,
85840x1600fffa, 0x32020001, 0x24100010, 0x2301024,
85850x10400002, 0x2021, 0x24040001, 0xc004d78,
85860x108042, 0x1600fffa, 0x2301024, 0xc004d78,
85870x24040001, 0xc004d78, 0x2021, 0x34108000,
85880x96420000, 0x501024, 0x10400002, 0x2021,
85890x24040001, 0xc004d78, 0x108042, 0x12000075,
85900x0, 0x1000fff6, 0x0, 0x3275ffff,
85910x27b10010, 0xa7a00010, 0x8021, 0xc004d78,
85920x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
85930x0, 0xc004d78, 0x2021, 0xc004d78,
85940x24040001, 0xc004d78, 0x24040001, 0xc004d78,
85950x2021, 0x24100010, 0x32020001, 0x10400002,
85960x2021, 0x24040001, 0xc004d78, 0x108042,
85970x1600fffa, 0x32020001, 0x24100010, 0x2b01024,
85980x10400002, 0x2021, 0x24040001, 0xc004d78,
85990x108042, 0x1600fffa, 0x2b01024, 0xc004db9,
86000x34108000, 0xc004db9, 0x0, 0xc004d58,
86010x0, 0x50400005, 0x108042, 0x96220000,
86020x501025, 0xa6220000, 0x108042, 0x1600fff7,
86030x0, 0xc004db9, 0x0, 0x33c5ffff,
86040x24020001, 0x54a20004, 0x24020002, 0x97a20010,
86050x10000006, 0x521025, 0x14a20006, 0x3271ffff,
86060x97a20010, 0x121827, 0x431024, 0xa7a20010,
86070x3271ffff, 0x27b20010, 0x8021, 0xc004d78,
86080x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
86090x0, 0xc004d78, 0x2021, 0xc004d78,
86100x24040001, 0xc004d78, 0x2021, 0xc004d78,
86110x24040001, 0x24100010, 0x32020001, 0x10400002,
86120x2021, 0x24040001, 0xc004d78, 0x108042,
86130x1600fffa, 0x32020001, 0x24100010, 0x2301024,
86140x10400002, 0x2021, 0x24040001, 0xc004d78,
86150x108042, 0x1600fffa, 0x2301024, 0xc004d78,
86160x24040001, 0xc004d78, 0x2021, 0x34108000,
86170x96420000, 0x501024, 0x10400002, 0x2021,
86180x24040001, 0xc004d78, 0x108042, 0x1600fff8,
86190x0, 0xc004db9, 0x0, 0x8fbf0030,
86200x8fbe002c, 0x8fb50028, 0x8fb30024, 0x8fb20020,
86210x8fb1001c, 0x8fb00018, 0x3e00008, 0x27bd0038,
86220x0, 0x0, 0x0, 0x27bdffe8,
86230xafbf0010, 0x8ee304b8, 0x24020008, 0x146201e0,
86240x0, 0x3c020001, 0x8c426f1c, 0x14400005,
86250x0, 0xc003daf, 0x8f840224, 0x100001d8,
86260x0, 0x8f820220, 0x3c030008, 0x431024,
86270x10400026, 0x24020001, 0x8f840224, 0x8f820220,
86280x3c030400, 0x431024, 0x10400006, 0x0,
86290x3c010002, 0xac208fa0, 0x3c010002, 0x1000000b,
86300xac208fc0, 0x3c030002, 0x24638fa0, 0x8c620000,
86310x24420001, 0xac620000, 0x2c420002, 0x14400003,
86320x24020001, 0x3c010002, 0xac228fc0, 0x3c020002,
86330x8c428fc0, 0x10400006, 0x30820040, 0x10400004,
86340x24020001, 0x3c010002, 0x10000003, 0xac228fc4,
86350x3c010002, 0xac208fc4, 0x3c010002, 0xac248f9c,
86360x3c010002, 0x1000000b, 0xac208fd0, 0x3c010002,
86370xac228fd0, 0x3c010002, 0xac208fc0, 0x3c010002,
86380xac208fa0, 0x3c010002, 0xac208fc4, 0x3c010002,
86390xac208f9c, 0x3c030002, 0x8c638f90, 0x3c020002,
86400x8c428f94, 0x50620004, 0x2463ffff, 0x3c010002,
86410xac238f94, 0x2463ffff, 0x2c62000e, 0x10400194,
86420x31080, 0x3c010001, 0x220821, 0x8c226c00,
86430x400008, 0x0, 0x24020002, 0x3c010002,
86440xac208fc0, 0x3c010002, 0xac208fa0, 0x3c010002,
86450xac208f9c, 0x3c010002, 0xac208fc4, 0x3c010002,
86460xac208fb8, 0x3c010002, 0xac208fb0, 0xaf800224,
86470x3c010002, 0xac228f90, 0x3c020002, 0x8c428fd0,
86480x1440004f, 0x3c02fdff, 0x3442ffff, 0xc003daf,
86490x282a024, 0xaf800204, 0x8f820200, 0x2403fffd,
86500x431024, 0xaf820200, 0x3c010002, 0xac208fe0,
86510x8f830054, 0x3c020002, 0x8c428fb8, 0x24040001,
86520x3c010002, 0xac248fcc, 0x24420001, 0x3c010002,
86530xac228fb8, 0x2c420004, 0x3c010002, 0xac238fb4,
86540x14400006, 0x24020003, 0x3c010001, 0xac246d9c,
86550x3c010002, 0x1000015e, 0xac208fb8, 0x3c010002,
86560x1000015b, 0xac228f90, 0x8f830054, 0x3c020002,
86570x8c428fb4, 0x2463d8f0, 0x431023, 0x2c422710,
86580x14400003, 0x24020004, 0x3c010002, 0xac228f90,
86590x3c020002, 0x8c428fd0, 0x14400021, 0x3c02fdff,
86600x3442ffff, 0x1000014a, 0x282a024, 0x3c040001,
86610x8c846f20, 0x3c010002, 0xc005084, 0xac208fa8,
86620x3c020002, 0x8c428fdc, 0xaf820204, 0x3c020002,
86630x8c428fd0, 0x14400012, 0x3c03fdff, 0x8f820204,
86640x3463ffff, 0x30420030, 0x1440012f, 0x283a024,
86650x3c030002, 0x8c638fdc, 0x24020005, 0x3c010002,
86660xac228f90, 0x3c010002, 0x10000131, 0xac238fe0,
86670x3c020002, 0x8c428fd0, 0x10400010, 0x3c02fdff,
86680x3c020001, 0x8c426e3c, 0x24420001, 0x3c010001,
86690xac226e3c, 0x2c420002, 0x14400125, 0x24020001,
86700x3c010001, 0xac226e44, 0x3c010001, 0xac206e3c,
86710x3c010001, 0x1000011e, 0xac226d9c, 0x3c030002,
86720x8c638fc0, 0x3442ffff, 0x10600119, 0x282a024,
86730x3c020002, 0x8c428f9c, 0x10400115, 0x0,
86740x3c010002, 0xac228fc8, 0x24020003, 0x3c010002,
86750xac228fa0, 0x100000b8, 0x24020006, 0x3c010002,
86760xac208fa8, 0x8f820204, 0x34420040, 0xaf820204,
86770x3c020002, 0x8c428fe0, 0x24030007, 0x3c010002,
86780xac238f90, 0x34420040, 0x3c010002, 0xac228fe0,
86790x3c020002, 0x8c428fc0, 0x10400005, 0x0,
86800x3c020002, 0x8c428f9c, 0x104000f0, 0x24020002,
86810x3c050002, 0x24a58fa0, 0x8ca20000, 0x2c424e21,
86820x104000ea, 0x24020002, 0x3c020002, 0x8c428fc4,
86830x104000ef, 0x2404ffbf, 0x3c020002, 0x8c428f9c,
86840x3c030002, 0x8c638fc8, 0x441024, 0x641824,
86850x10430004, 0x24020001, 0x3c010002, 0x100000e4,
86860xac228f90, 0x24020003, 0xaca20000, 0x24020008,
86870x3c010002, 0xac228f90, 0x3c020002, 0x8c428fcc,
86880x1040000c, 0x24020001, 0x3c040002, 0xc005091,
86890x8c848f9c, 0x3c020002, 0x8c428fe8, 0x14400005,
86900x24020001, 0x3c020002, 0x8c428fe4, 0x10400006,
86910x24020001, 0x3c010001, 0xac226d9c, 0x3c010002,
86920x100000cb, 0xac208fb8, 0x3c020002, 0x8c428fb0,
86930x3c030002, 0x8c638f9c, 0x2c420001, 0x210c0,
86940x30630008, 0x3c010002, 0xac228fb0, 0x3c010002,
86950xac238fac, 0x8f830054, 0x24020009, 0x3c010002,
86960xac228f90, 0x3c010002, 0x100000b9, 0xac238fb4,
86970x8f830054, 0x3c020002, 0x8c428fb4, 0x2463d8f0,
86980x431023, 0x2c422710, 0x1440009f, 0x0,
86990x3c020002, 0x8c428fc0, 0x10400005, 0x0,
87000x3c020002, 0x8c428f9c, 0x104000a0, 0x24020002,
87010x3c030002, 0x24638fa0, 0x8c620000, 0x2c424e21,
87020x1040009a, 0x24020002, 0x3c020002, 0x8c428fcc,
87030x1040000e, 0x0, 0x3c020002, 0x8c428f9c,
87040x3c010002, 0xac208fcc, 0x30420080, 0x1040002f,
87050x2402000c, 0x8f820204, 0x30420080, 0x1440000c,
87060x24020003, 0x10000029, 0x2402000c, 0x3c020002,
87070x8c428f9c, 0x30420080, 0x14400005, 0x24020003,
87080x8f820204, 0x30420080, 0x1040001f, 0x24020003,
87090xac620000, 0x2402000a, 0x3c010002, 0xac228f90,
87100x3c040002, 0x24848fd8, 0x8c820000, 0x3c030002,
87110x8c638fb0, 0x431025, 0xaf820204, 0x8c830000,
87120x3c040002, 0x8c848fb0, 0x2402000b, 0x3c010002,
87130xac228f90, 0x641825, 0x3c010002, 0xac238fe0,
87140x3c050002, 0x24a58fa0, 0x8ca20000, 0x2c424e21,
87150x10400066, 0x24020002, 0x3c020002, 0x8c428fd0,
87160x10400005, 0x0, 0x2402000c, 0x3c010002,
87170x10000067, 0xac228f90, 0x3c020002, 0x8c428fc0,
87180x10400063, 0x0, 0x3c040002, 0x8c848f9c,
87190x10800055, 0x30820008, 0x3c030002, 0x8c638fac,
87200x1062005b, 0x24020003, 0x3c010002, 0xac248fc8,
87210xaca20000, 0x24020006, 0x3c010002, 0x10000054,
87220xac228f90, 0x8f820200, 0x34420002, 0xaf820200,
87230x8f830054, 0x2402000d, 0x3c010002, 0xac228f90,
87240x3c010002, 0xac238fb4, 0x8f830054, 0x3c020002,
87250x8c428fb4, 0x2463d8f0, 0x431023, 0x2c422710,
87260x14400031, 0x0, 0x3c020002, 0x8c428fd0,
87270x10400020, 0x2402000e, 0x3c030002, 0x8c638fe4,
87280x3c010002, 0x14600015, 0xac228f90, 0xc003e6d,
87290x0, 0x3c050001, 0x8ca56d98, 0xc00529b,
87300x2021, 0x3c030001, 0x8c636d98, 0x24020004,
87310x14620005, 0x2403fffb, 0x3c020001, 0x8c426d94,
87320x10000003, 0x2403fff7, 0x3c020001, 0x8c426d94,
87330x431024, 0x3c010001, 0xac226d94, 0x8f830224,
87340x3c020200, 0x3c010002, 0xac238fec, 0x10000020,
87350x282a025, 0x3c020002, 0x8c428fc0, 0x10400005,
87360x0, 0x3c020002, 0x8c428f9c, 0x1040000f,
87370x24020002, 0x3c020002, 0x8c428fa0, 0x2c424e21,
87380x1040000a, 0x24020002, 0x3c020002, 0x8c428fc0,
87390x1040000f, 0x0, 0x3c020002, 0x8c428f9c,
87400x1440000b, 0x0, 0x24020002, 0x3c010002,
87410x10000007, 0xac228f90, 0x3c020002, 0x8c428fc0,
87420x10400003, 0x0, 0xc003daf, 0x0,
87430x8f820220, 0x3c03f700, 0x431025, 0xaf820220,
87440x8fbf0010, 0x3e00008, 0x27bd0018, 0x3c030002,
87450x24638fe8, 0x8c620000, 0x10400005, 0x34422000,
87460x3c010002, 0xac228fdc, 0x10000003, 0xac600000,
87470x3c010002, 0xac248fdc, 0x3e00008, 0x0,
87480x27bdffe0, 0x30820030, 0xafbf0018, 0x3c010002,
87490xac228fe4, 0x14400067, 0x3c02ffff, 0x34421f0e,
87500x821024, 0x14400061, 0x24020030, 0x30822000,
87510x1040005d, 0x30838000, 0x31a02, 0x30820001,
87520x21200, 0x3c040001, 0x8c846f20, 0x621825,
87530x331c2, 0x3c030001, 0x24636e48, 0x30828000,
87540x21202, 0x30840001, 0x42200, 0x441025,
87550x239c2, 0x61080, 0x431021, 0x471021,
87560x90430000, 0x24020001, 0x10620025, 0x0,
87570x10600007, 0x24020002, 0x10620013, 0x24020003,
87580x1062002c, 0x3c05000f, 0x10000037, 0x0,
87590x8f820200, 0x2403feff, 0x431024, 0xaf820200,
87600x8f820220, 0x3c03fffe, 0x3463ffff, 0x431024,
87610xaf820220, 0x3c010002, 0xac209004, 0x3c010002,
87620x10000034, 0xac20900c, 0x8f820200, 0x34420100,
87630xaf820200, 0x8f820220, 0x3c03fffe, 0x3463ffff,
87640x431024, 0xaf820220, 0x24020100, 0x3c010002,
87650xac229004, 0x3c010002, 0x10000026, 0xac20900c,
87660x8f820200, 0x2403feff, 0x431024, 0xaf820200,
87670x8f820220, 0x3c030001, 0x431025, 0xaf820220,
87680x3c010002, 0xac209004, 0x3c010002, 0x10000019,
87690xac23900c, 0x8f820200, 0x34420100, 0xaf820200,
87700x8f820220, 0x3c030001, 0x431025, 0xaf820220,
87710x24020100, 0x3c010002, 0xac229004, 0x3c010002,
87720x1000000c, 0xac23900c, 0x34a5ffff, 0x3c040001,
87730x24846c38, 0xafa30010, 0xc002b3b, 0xafa00014,
87740x10000004, 0x0, 0x24020030, 0x3c010002,
87750xac228fe8, 0x8fbf0018, 0x3e00008, 0x27bd0020,
87760x0, 0x0, 0x0, 0x27bdffc8,
87770xafb20028, 0x809021, 0xafb3002c, 0xa09821,
87780xafb00020, 0xc08021, 0x3c040001, 0x24846c50,
87790x3c050009, 0x3c020001, 0x8c426d98, 0x34a59001,
87800x2403021, 0x2603821, 0xafbf0030, 0xafb10024,
87810xa7a0001a, 0xafb00014, 0xc002b3b, 0xafa20010,
87820x24020002, 0x12620083, 0x2e620003, 0x10400005,
87830x24020001, 0x1262000a, 0x0, 0x10000173,
87840x0, 0x24020004, 0x126200f8, 0x24020008,
87850x126200f7, 0x3c02ffec, 0x1000016c, 0x0,
87860x3c020001, 0x8c426d94, 0x30420002, 0x14400004,
87870x128940, 0x3c02fffb, 0x3442ffff, 0x2028024,
87880x3c010002, 0x310821, 0xac308ffc, 0x3c024000,
87890x2021024, 0x1040004e, 0x1023c2, 0x30840030,
87900x101382, 0x3042001c, 0x3c030001, 0x24636dd8,
87910x431021, 0x823821, 0x3c020020, 0x2021024,
87920x10400006, 0x24020100, 0x3c010002, 0x310821,
87930xac229000, 0x10000005, 0x3c020080, 0x3c010002,
87940x310821, 0xac209000, 0x3c020080, 0x2021024,
87950x10400006, 0x121940, 0x3c020001, 0x3c010002,
87960x230821, 0x10000005, 0xac229008, 0x121140,
87970x3c010002, 0x220821, 0xac209008, 0x94e40000,
87980x3c030001, 0x8c636f40, 0x24020005, 0x10620010,
87990xa7a40018, 0x32024000, 0x10400002, 0x34824000,
88000xa7a20018, 0x24040001, 0x94e20002, 0x24050004,
88010x24e60002, 0x34420001, 0xc0045be, 0xa4e20002,
88020x24040001, 0x2821, 0xc0045be, 0x27a60018,
88030x3c020001, 0x8c426d98, 0x24110001, 0x3c010001,
88040xac316da4, 0x14530004, 0x32028000, 0xc003daf,
88050x0, 0x32028000, 0x1040011c, 0x0,
88060xc003daf, 0x0, 0x3c030001, 0x8c636f40,
88070x24020005, 0x10620115, 0x24020002, 0x3c010001,
88080xac316d9c, 0x3c010001, 0x10000110, 0xac226d98,
88090x24040001, 0x24050004, 0x27b0001a, 0xc0045be,
88100x2003021, 0x24040001, 0x2821, 0xc0045be,
88110x2003021, 0x3c020002, 0x511021, 0x8c428ff4,
88120x3c040001, 0x8c846d98, 0x3c03bfff, 0x3463ffff,
88130x3c010001, 0xac336da4, 0x431024, 0x3c010002,
88140x310821, 0x109300f7, 0xac228ff4, 0x100000f7,
88150x0, 0x3c022000, 0x2021024, 0x10400005,
88160x24020001, 0x3c010001, 0xac226f1c, 0x10000004,
88170x128940, 0x3c010001, 0xac206f1c, 0x128940,
88180x3c010002, 0x310821, 0xac308ff8, 0x3c024000,
88190x2021024, 0x14400014, 0x0, 0x3c020001,
88200x8c426f1c, 0x10400006, 0x24040004, 0x24050001,
88210xc004ddb, 0x24062000, 0x24020001, 0xaee204b8,
88220x3c020002, 0x511021, 0x8c428ff0, 0x3c03bfff,
88230x3463ffff, 0x431024, 0x3c010002, 0x310821,
88240x100000d0, 0xac228ff0, 0x3c020001, 0x8c426f1c,
88250x10400028, 0x3c0300a0, 0x2031024, 0x5443000d,
88260x3c020020, 0x3c020001, 0x8c426f20, 0x24030100,
88270x3c010002, 0x310821, 0xac239004, 0x3c030001,
88280x3c010002, 0x310821, 0xac23900c, 0x10000015,
88290x34420400, 0x2021024, 0x10400008, 0x24030100,
88300x3c020001, 0x8c426f20, 0x3c010002, 0x310821,
88310xac239004, 0x1000000b, 0x34420800, 0x3c020080,
88320x2021024, 0x1040002e, 0x3c030001, 0x3c020001,
88330x8c426f20, 0x3c010002, 0x310821, 0xac23900c,
88340x34420c00, 0x3c010001, 0xac226f20, 0x10000025,
88350x24040001, 0x3c020020, 0x2021024, 0x10400006,
88360x24020100, 0x3c010002, 0x310821, 0xac229004,
88370x10000005, 0x3c020080, 0x3c010002, 0x310821,
88380xac209004, 0x3c020080, 0x2021024, 0x10400007,
88390x121940, 0x3c020001, 0x3c010002, 0x230821,
88400xac22900c, 0x10000006, 0x24040001, 0x121140,
88410x3c010002, 0x220821, 0xac20900c, 0x24040001,
88420x2821, 0x27b0001e, 0xc00457c, 0x2003021,
88430x24040001, 0x2821, 0xc00457c, 0x2003021,
88440x24040001, 0x24050001, 0x27b0001c, 0xc00457c,
88450x2003021, 0x24040001, 0x24050001, 0xc00457c,
88460x2003021, 0x10000077, 0x0, 0x3c02ffec,
88470x3442ffff, 0x2028024, 0x3c020008, 0x2028025,
88480x121140, 0x3c010002, 0x220821, 0xac308ff8,
88490x3c022000, 0x2021024, 0x10400009, 0x0,
88500x3c020001, 0x8c426e44, 0x14400005, 0x24020001,
88510x3c010001, 0xac226f1c, 0x10000004, 0x3c024000,
88520x3c010001, 0xac206f1c, 0x3c024000, 0x2021024,
88530x1440001d, 0x24020e01, 0x3c030001, 0x8c636f1c,
88540xaf820238, 0x3c010001, 0xac206db0, 0x10600005,
88550x24022020, 0x3c010001, 0xac226f20, 0x24020001,
88560xaee204b8, 0x3c04bfff, 0x121940, 0x3c020002,
88570x431021, 0x8c428ff0, 0x3c050001, 0x8ca56d98,
88580x3484ffff, 0x441024, 0x3c010002, 0x230821,
88590xac228ff0, 0x24020001, 0x10a20044, 0x0,
88600x10000040, 0x0, 0x3c020001, 0x8c426f1c,
88610x1040001c, 0x24022000, 0x3c010001, 0xac226f20,
88620x3c0300a0, 0x2031024, 0x14430005, 0x121140,
88630x3402a000, 0x3c010001, 0x1000002d, 0xac226f20,
88640x3c030002, 0x621821, 0x8c638ff8, 0x3c020020,
88650x621024, 0x10400004, 0x24022001, 0x3c010001,
88660x10000023, 0xac226f20, 0x3c020080, 0x621024,
88670x1040001f, 0x3402a001, 0x3c010001, 0x1000001c,
88680xac226f20, 0x3c020020, 0x2021024, 0x10400007,
88690x121940, 0x24020100, 0x3c010002, 0x230821,
88700xac229004, 0x10000006, 0x3c020080, 0x121140,
88710x3c010002, 0x220821, 0xac209004, 0x3c020080,
88720x2021024, 0x10400006, 0x121940, 0x3c020001,
88730x3c010002, 0x230821, 0x10000005, 0xac22900c,
88740x121140, 0x3c010002, 0x220821, 0xac20900c,
88750x3c030001, 0x8c636d98, 0x24020001, 0x10620003,
88760x0, 0xc003daf, 0x0, 0x8fbf0030,
88770x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
88780x3e00008, 0x27bd0038, 0x27bdffb0, 0xafb3003c,
88790x9821, 0xafb50040, 0xa821, 0xafb10034,
88800x8821, 0x24020002, 0xafbf0048, 0xafbe0044,
88810xafb20038, 0xafb00030, 0xafa4002c, 0xa7a0001a,
88820xa7a00018, 0xa7a00020, 0xa7a0001e, 0xa7a00022,
88830x10a20130, 0xa7a0001c, 0x2ca20003, 0x10400005,
88840x24020001, 0x10a2000a, 0x3c024000, 0x1000025d,
88850x2201021, 0x24020004, 0x10a2020a, 0x24020008,
88860x10a20208, 0x2201021, 0x10000256, 0x0,
88870x8fa8002c, 0x88140, 0x3c030002, 0x701821,
88880x8c638ffc, 0x621024, 0x14400009, 0x24040001,
88890x3c027fff, 0x3442ffff, 0x628824, 0x3c010002,
88900x300821, 0xac318ff4, 0x10000246, 0x2201021,
88910x24050001, 0xc00457c, 0x27a60018, 0x24040001,
88920x24050001, 0xc00457c, 0x27a60018, 0x97a20018,
88930x30420004, 0x104000d9, 0x3c114000, 0x3c020001,
88940x8c426f40, 0x2443ffff, 0x2c620006, 0x104000d9,
88950x31080, 0x3c010001, 0x220821, 0x8c226c68,
88960x400008, 0x0, 0x24040001, 0x24050011,
88970x27b0001a, 0xc00457c, 0x2003021, 0x24040001,
88980x24050011, 0xc00457c, 0x2003021, 0x97a3001a,
88990x30624000, 0x10400002, 0x3c150010, 0x3c150008,
89000x30628000, 0x104000aa, 0x3c130001, 0x100000a8,
89010x3c130002, 0x24040001, 0x24050014, 0x27b0001a,
89020xc00457c, 0x2003021, 0x24040001, 0x24050014,
89030xc00457c, 0x2003021, 0x97a3001a, 0x30621000,
89040x10400002, 0x3c150010, 0x3c150008, 0x30620800,
89050x10400097, 0x3c130001, 0x10000095, 0x3c130002,
89060x24040001, 0x24050019, 0x27b0001c, 0xc00457c,
89070x2003021, 0x24040001, 0x24050019, 0xc00457c,
89080x2003021, 0x97a2001c, 0x30430700, 0x24020400,
89090x10620027, 0x28620401, 0x1040000e, 0x24020200,
89100x1062001f, 0x28620201, 0x10400005, 0x24020100,
89110x5062001e, 0x3c130001, 0x1000001e, 0x24040001,
89120x24020300, 0x50620019, 0x3c130002, 0x10000019,
89130x24040001, 0x24020600, 0x1062000d, 0x28620601,
89140x10400005, 0x24020500, 0x5062000b, 0x3c130002,
89150x10000010, 0x24040001, 0x24020700, 0x1462000d,
89160x24040001, 0x3c130004, 0x1000000a, 0x3c150008,
89170x10000006, 0x3c130004, 0x10000005, 0x3c150008,
89180x3c130001, 0x10000002, 0x3c150008, 0x3c150010,
89190x24040001, 0x24050018, 0x27b0001e, 0xc00457c,
89200x2003021, 0x24040001, 0x24050018, 0xc00457c,
89210x2003021, 0x8fa8002c, 0x97a7001e, 0x81140,
89220x3c060002, 0xc23021, 0x8cc68ff4, 0x97a20022,
89230x3c100001, 0x26106c5c, 0x2002021, 0xafa20010,
89240x97a2001c, 0x3c05000c, 0x34a50303, 0xc002b3b,
89250xafa20014, 0x3c020004, 0x16620010, 0x3c020001,
89260x8f840054, 0x24030001, 0x24020002, 0x3c010001,
89270xac236d9c, 0x3c010001, 0xac226d98, 0x3c010001,
89280xac236da4, 0x3c010001, 0xac236e24, 0x3c010001,
89290xac246f30, 0x1000004f, 0x2b38825, 0x16620039,
89300x3c028000, 0x3c020001, 0x8c426e20, 0x1440001e,
89310x24040018, 0x2021, 0x2821, 0xc004ddb,
89320x34068000, 0x8f830054, 0x8f820054, 0x2b38825,
89330x10000002, 0x24630032, 0x8f820054, 0x621023,
89340x2c420033, 0x1440fffc, 0x0, 0x8f830054,
89350x24020001, 0x3c010001, 0xac226e20, 0x3c010001,
89360xac226d9c, 0x3c010001, 0xac226d98, 0x3c010001,
89370xac226da4, 0x3c010001, 0xac226e24, 0x3c010001,
89380x1000002c, 0xac236f30, 0x2821, 0xc004ddb,
89390x24060404, 0x2021, 0x2405001e, 0x27a60018,
89400x24020002, 0xc0045be, 0xa7a20018, 0x2021,
89410x2821, 0x27a60018, 0xc0045be, 0xa7a00018,
89420x24040018, 0x24050002, 0xc004ddb, 0x24060004,
89430x3c028000, 0x2221025, 0x2b31825, 0x10000015,
89440x438825, 0x2221025, 0x2751825, 0x438825,
89450x2002021, 0x97a6001c, 0x3c070001, 0x8ce76d98,
89460x3c05000c, 0x34a50326, 0xafb30010, 0xc002b3b,
89470xafb10014, 0x10000007, 0x0, 0x3c110002,
89480x2308821, 0x8e318ffc, 0x3c027fff, 0x3442ffff,
89490x2228824, 0x3c020001, 0x8c426da8, 0x1040001e,
89500x0, 0x3c020001, 0x8c426f1c, 0x10400002,
89510x3c022000, 0x2228825, 0x8fa8002c, 0x81140,
89520x3c010002, 0x220821, 0x8c229000, 0x10400003,
89530x3c020020, 0x10000005, 0x2228825, 0x3c02ffdf,
89540x3442ffff, 0x2228824, 0x8fa8002c, 0x81140,
89550x3c010002, 0x220821, 0x8c229008, 0x10400003,
89560x3c020080, 0x10000004, 0x2228825, 0x3c02ff7f,
89570x3442ffff, 0x2228824, 0x8fa8002c, 0x81140,
89580x3c010002, 0x220821, 0xac318ff4, 0x10000135,
89590x2201021, 0x8fa8002c, 0x8f140, 0x3c030002,
89600x7e1821, 0x8c638ff8, 0x3c024000, 0x621024,
89610x14400009, 0x24040001, 0x3c027fff, 0x3442ffff,
89620x628824, 0x3c010002, 0x3e0821, 0xac318ff0,
89630x10000124, 0x2201021, 0x2821, 0xc00457c,
89640x27a60018, 0x24040001, 0x2821, 0xc00457c,
89650x27a60018, 0x24040001, 0x24050001, 0x27b20020,
89660xc00457c, 0x2403021, 0x24040001, 0x24050001,
89670xc00457c, 0x2403021, 0x24040001, 0x24050004,
89680x27b1001e, 0xc00457c, 0x2203021, 0x24040001,
89690x24050004, 0xc00457c, 0x2203021, 0x24040001,
89700x24050005, 0x27b00022, 0xc00457c, 0x2003021,
89710x24040001, 0x24050005, 0xc00457c, 0x2003021,
89720x24040001, 0x24050010, 0xc00457c, 0x27a60018,
89730x24040001, 0x24050010, 0xc00457c, 0x27a60018,
89740x24040001, 0x2405000a, 0xc00457c, 0x2403021,
89750x24040001, 0x2405000a, 0xc00457c, 0x2403021,
89760x24040001, 0x24050018, 0xc00457c, 0x2203021,
89770x24040001, 0x24050018, 0xc00457c, 0x2203021,
89780x24040001, 0x24050001, 0xc00457c, 0x27a60018,
89790x24040001, 0x24050001, 0xc00457c, 0x27a60018,
89800x97a20018, 0x30420004, 0x10400066, 0x3c114000,
89810x3c030001, 0x8c636f34, 0x24020005, 0x14620067,
89820x24040001, 0x24050019, 0x27b0001c, 0xc00457c,
89830x2003021, 0x24040001, 0x24050019, 0xc00457c,
89840x2003021, 0x97a2001c, 0x30430700, 0x24020400,
89850x10620027, 0x28620401, 0x1040000e, 0x24020200,
89860x1062001f, 0x28620201, 0x10400005, 0x24020100,
89870x5062001e, 0x3c130001, 0x1000001e, 0x3c020004,
89880x24020300, 0x50620019, 0x3c130002, 0x10000019,
89890x3c020004, 0x24020600, 0x1062000d, 0x28620601,
89900x10400005, 0x24020500, 0x5062000b, 0x3c130002,
89910x10000010, 0x3c020004, 0x24020700, 0x1462000d,
89920x3c020004, 0x3c130004, 0x1000000a, 0x3c150008,
89930x10000006, 0x3c130004, 0x10000005, 0x3c150008,
89940x3c130001, 0x10000002, 0x3c150008, 0x3c150010,
89950x3c020004, 0x12620017, 0x3c028000, 0x8f820054,
89960x24100001, 0x3c010001, 0xac306d9c, 0x3c010001,
89970xac306d98, 0x3c010001, 0xac306da4, 0x3c010001,
89980xac306e24, 0x3c010001, 0xac226f30, 0x3c020001,
89990x16620022, 0x2758825, 0x2021, 0x2821,
90000xc004ddb, 0x34068000, 0x3c010001, 0x1000001b,
90010xac306e20, 0x2221025, 0x2b31825, 0x438825,
90020x97a6001c, 0x3c020001, 0x8c426f1c, 0x3c070001,
90030x8ce76d98, 0x3c040001, 0x24846c5c, 0xafa20010,
90040x97a2001e, 0x3c05000c, 0x34a50323, 0x3c010001,
90050xac206e20, 0xc002b3b, 0xafa20014, 0x10000007,
90060x0, 0x3c110002, 0x23e8821, 0x8e318ff0,
90070x3c027fff, 0x3442ffff, 0x2228824, 0x3c020001,
90080x8c426da8, 0x10400069, 0x0, 0x3c020001,
90090x8c426f1c, 0x10400002, 0x3c022000, 0x2228825,
90100x8fa8002c, 0x81140, 0x3c010002, 0x220821,
90110x8c229004, 0x10400003, 0x3c020020, 0x10000005,
90120x2228825, 0x3c02ffdf, 0x3442ffff, 0x2228824,
90130x8fa8002c, 0x81140, 0x3c010002, 0x220821,
90140x8c22900c, 0x10400003, 0x3c020080, 0x1000004f,
90150x2228825, 0x3c02ff7f, 0x3442ffff, 0x1000004b,
90160x2228824, 0x8fa8002c, 0x82940, 0x3c030002,
90170x651821, 0x8c638ff8, 0x3c024000, 0x621024,
90180x14400008, 0x3c027fff, 0x3442ffff, 0x628824,
90190x3c010002, 0x250821, 0xac318ff0, 0x10000041,
90200x2201021, 0x3c020001, 0x8c426da8, 0x10400034,
90210x3c11c00c, 0x3c020001, 0x8c426e44, 0x3c04c00c,
90220x34842000, 0x3c030001, 0x8c636f1c, 0x2102b,
90230x21023, 0x441024, 0x10600003, 0x518825,
90240x3c022000, 0x2228825, 0x3c020002, 0x451021,
90250x8c429004, 0x10400003, 0x3c020020, 0x10000004,
90260x2228825, 0x3c02ffdf, 0x3442ffff, 0x2228824,
90270x8fa8002c, 0x81140, 0x3c010002, 0x220821,
90280x8c22900c, 0x10400003, 0x3c020080, 0x10000004,
90290x2228825, 0x3c02ff7f, 0x3442ffff, 0x2228824,
90300x3c020001, 0x8c426e30, 0x10400002, 0x3c020800,
90310x2228825, 0x3c020001, 0x8c426e34, 0x10400002,
90320x3c020400, 0x2228825, 0x3c020001, 0x8c426e38,
90330x10400006, 0x3c020100, 0x10000004, 0x2228825,
90340x3c027fff, 0x3442ffff, 0x628824, 0x8fa8002c,
90350x81140, 0x3c010002, 0x220821, 0xac318ff0,
90360x2201021, 0x8fbf0048, 0x8fbe0044, 0x8fb50040,
90370x8fb3003c, 0x8fb20038, 0x8fb10034, 0x8fb00030,
90380x3e00008, 0x27bd0050, 0x27bdffd0, 0xafb20028,
90390x809021, 0xafbf002c, 0xafb10024, 0xafb00020,
90400x8f840200, 0x3c100001, 0x8e106d98, 0x8f860220,
90410x24020002, 0x1202005c, 0x2e020003, 0x10400005,
90420x24020001, 0x1202000a, 0x121940, 0x1000010c,
90430x0, 0x24020004, 0x120200bf, 0x24020008,
90440x120200be, 0x128940, 0x10000105, 0x0,
90450x3c050002, 0xa32821, 0x8ca58ffc, 0x3c100002,
90460x2038021, 0x8e108ff4, 0x3c024000, 0xa21024,
90470x10400038, 0x3c020008, 0x2021024, 0x10400020,
90480x34840002, 0x3c020002, 0x431021, 0x8c429000,
90490x10400005, 0x34840020, 0x34840100, 0x3c020020,
90500x10000006, 0x2028025, 0x2402feff, 0x822024,
90510x3c02ffdf, 0x3442ffff, 0x2028024, 0x121140,
90520x3c010002, 0x220821, 0x8c229008, 0x10400005,
90530x3c020001, 0xc23025, 0x3c020080, 0x10000016,
90540x2028025, 0x3c02fffe, 0x3442ffff, 0xc23024,
90550x3c02ff7f, 0x3442ffff, 0x1000000f, 0x2028024,
90560x2402fedf, 0x822024, 0x3c02fffe, 0x3442ffff,
90570xc23024, 0x3c02ff5f, 0x3442ffff, 0x2028024,
90580x3c010002, 0x230821, 0xac209000, 0x3c010002,
90590x230821, 0xac209008, 0xaf840200, 0xaf860220,
90600x8f820220, 0x34420002, 0xaf820220, 0x1000000a,
90610x121140, 0x3c02bfff, 0x3442ffff, 0x8f830200,
90620x2028024, 0x2402fffd, 0x621824, 0xc003daf,
90630xaf830200, 0x121140, 0x3c010002, 0x220821,
90640x100000b7, 0xac308ff4, 0x3c020001, 0x8c426f1c,
90650x10400069, 0x24050004, 0x24040001, 0xc00457c,
90660x27a60018, 0x24040001, 0x24050005, 0xc00457c,
90670x27a6001a, 0x97a30018, 0x97a2001a, 0x3c040001,
90680x24846e48, 0x30630c00, 0x31a82, 0x30420c00,
90690x21282, 0xa7a2001a, 0x21080, 0x441021,
90700x431021, 0xa7a30018, 0x90480000, 0x24020001,
90710x3103ffff, 0x10620029, 0x28620002, 0x10400005,
90720x0, 0x10600009, 0x0, 0x1000003d,
90730x0, 0x10700013, 0x24020003, 0x1062002c,
90740x0, 0x10000037, 0x0, 0x8f820200,
90750x2403feff, 0x431024, 0xaf820200, 0x8f820220,
90760x3c03fffe, 0x3463ffff, 0x431024, 0xaf820220,
90770x3c010002, 0xac209004, 0x3c010002, 0x10000032,
90780xac20900c, 0x8f820200, 0x34420100, 0xaf820200,
90790x8f820220, 0x3c03fffe, 0x3463ffff, 0x431024,
90800xaf820220, 0x24020100, 0x3c010002, 0xac229004,
90810x3c010002, 0x10000024, 0xac20900c, 0x8f820200,
90820x2403feff, 0x431024, 0xaf820200, 0x8f820220,
90830x3c030001, 0x431025, 0xaf820220, 0x3c010002,
90840xac209004, 0x3c010002, 0x10000017, 0xac23900c,
90850x8f820200, 0x34420100, 0xaf820200, 0x8f820220,
90860x3c030001, 0x431025, 0xaf820220, 0x24020100,
90870x3c010002, 0xac229004, 0x3c010002, 0x1000000a,
90880xac23900c, 0x3c040001, 0x24846c80, 0x97a6001a,
90890x97a70018, 0x3c050001, 0x34a5ffff, 0xafa80010,
90900xc002b3b, 0xafa00014, 0x8f820200, 0x34420002,
90910x1000004b, 0xaf820200, 0x128940, 0x3c050002,
90920xb12821, 0x8ca58ff8, 0x3c100002, 0x2118021,
90930x8e108ff0, 0x3c024000, 0xa21024, 0x14400010,
90940x0, 0x3c020001, 0x8c426f1c, 0x14400005,
90950x3c02bfff, 0x8f820200, 0x34420002, 0xaf820200,
90960x3c02bfff, 0x3442ffff, 0xc003daf, 0x2028024,
90970x3c010002, 0x310821, 0x10000031, 0xac308ff0,
90980x3c020001, 0x8c426f1c, 0x10400005, 0x3c020020,
90990x3c020001, 0x8c426e44, 0x10400025, 0x3c020020,
91000xa21024, 0x10400007, 0x34840020, 0x24020100,
91010x3c010002, 0x310821, 0xac229004, 0x10000006,
91020x34840100, 0x3c010002, 0x310821, 0xac209004,
91030x2402feff, 0x822024, 0x3c020080, 0xa21024,
91040x10400007, 0x121940, 0x3c020001, 0x3c010002,
91050x230821, 0xac22900c, 0x10000008, 0xc23025,
91060x121140, 0x3c010002, 0x220821, 0xac20900c,
91070x3c02fffe, 0x3442ffff, 0xc23024, 0xaf840200,
91080xaf860220, 0x8f820220, 0x34420002, 0xaf820220,
91090x121140, 0x3c010002, 0x220821, 0xac308ff0,
91100x8fbf002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
91110x3e00008, 0x27bd0030, 0x0, 0x1821,
91120x308400ff, 0x2405ffdf, 0x2406ffbf, 0x641007,
91130x30420001, 0x10400004, 0x0, 0x8f820044,
91140x10000003, 0x34420040, 0x8f820044, 0x461024,
91150xaf820044, 0x8f820044, 0x34420020, 0xaf820044,
91160x8f820044, 0x451024, 0xaf820044, 0x24630001,
91170x28620008, 0x5440ffee, 0x641007, 0x3e00008,
91180x0, 0x2c820008, 0x1040001b, 0x0,
91190x2405ffdf, 0x2406ffbf, 0x41880, 0x3c020001,
91200x24426e60, 0x621821, 0x24640004, 0x90620000,
91210x10400004, 0x0, 0x8f820044, 0x10000003,
91220x34420040, 0x8f820044, 0x461024, 0xaf820044,
91230x8f820044, 0x34420020, 0xaf820044, 0x8f820044,
91240x451024, 0xaf820044, 0x24630001, 0x64102b,
91250x1440ffee, 0x0, 0x3e00008, 0x0,
91260x0, 0x0, 0x0, 0x8f8400c4,
91270x8f8600e0, 0x8f8700e4, 0x2402fff8, 0xc22824,
91280x10e5001a, 0x27623ff8, 0x14e20002, 0x24e80008,
91290x27683000, 0x55050004, 0x8d0a0000, 0x30c20004,
91300x14400012, 0x805021, 0x8ce90000, 0x8f42013c,
91310x1494823, 0x49182b, 0x94eb0006, 0x10600002,
91320x25630050, 0x494821, 0x123182b, 0x50400003,
91330x8f4201fc, 0x3e00008, 0xe01021, 0xaf8800e8,
91340x24420001, 0xaf4201fc, 0xaf8800e4, 0x3e00008,
91350x1021, 0x3e00008, 0x0, 0x8f8300e4,
91360x27623ff8, 0x10620004, 0x24620008, 0xaf8200e8,
91370x3e00008, 0xaf8200e4, 0x27623000, 0xaf8200e8,
91380x3e00008, 0xaf8200e4, 0x3e00008, 0x0,
91390x0, 0x0, 0x0, 0x8f880120,
91400x27624fe0, 0x8f830128, 0x15020002, 0x25090020,
91410x27694800, 0x11230012, 0x8fa20010, 0xad040000,
91420xad050004, 0xad060008, 0xa507000e, 0x8fa30014,
91430xad020018, 0x8fa20018, 0xad03001c, 0x25030016,
91440xad020010, 0xad030014, 0xaf890120, 0x8f4300fc,
91450x24020001, 0x2463ffff, 0x3e00008, 0xaf4300fc,
91460x8f430324, 0x1021, 0x24630001, 0x3e00008,
91470xaf430324, 0x3e00008, 0x0, 0x8f880100,
91480x276247e0, 0x8f830108, 0x15020002, 0x25090020,
91490x27694000, 0x1123000f, 0x8fa20010, 0xad040000,
91500xad050004, 0xad060008, 0xa507000e, 0x8fa30014,
91510xad020018, 0x8fa20018, 0xad03001c, 0x25030016,
91520xad020010, 0xad030014, 0xaf890100, 0x3e00008,
91530x24020001, 0x8f430328, 0x1021, 0x24630001,
91540x3e00008, 0xaf430328, 0x3e00008, 0x0,
91550x0, 0x0, 0x0, 0x0 };
9156static u32 tigon2FwRodata[(MAX_RODATA_LEN/4) + 1] __devinitdata = {
91570x24486561, 0x6465723a, 0x202f7072,
91580x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
91590x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
91600x6e2f6677, 0x6d61696e, 0x2e632c76, 0x20312e31,
91610x2e322e34, 0x35203139, 0x39392f30, 0x312f3234,
91620x2030303a, 0x31303a35, 0x35207368, 0x75616e67,
91630x20457870, 0x20240000, 0x65767452, 0x6e674600,
91640x51657674, 0x46000000, 0x51657674, 0x505f4600,
91650x4d657674, 0x526e6746, 0x0, 0x4d516576,
91660x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
91670x6e495f46, 0x0, 0x5173436f, 0x6e734600,
91680x51725072, 0x6f644600, 0x6261644d, 0x656d537a,
91690x0, 0x68775665, 0x72000000, 0x62616448,
91700x77566572, 0x0, 0x2a2a4441, 0x574e5f41,
91710x0, 0x74785278, 0x4266537a, 0x0,
91720x62664174, 0x6e4d726b, 0x0, 0x7265645a,
91730x6f6e6531, 0x0, 0x70636943, 0x6f6e6600,
91740x67656e43, 0x6f6e6600, 0x2a646d61, 0x5244666c,
91750x0, 0x2a50414e, 0x49432a00, 0x2e2e2f2e,
91760x2e2f2e2e, 0x2f2e2e2f, 0x2e2e2f73, 0x72632f6e,
91770x69632f66, 0x77322f63, 0x6f6d6d6f, 0x6e2f6677,
91780x6d61696e, 0x2e630000, 0x72636246, 0x6c616773,
91790x0, 0x62616452, 0x78526362, 0x0,
91800x676c6f62, 0x466c6773, 0x0, 0x2b5f6469,
91810x73705f6c, 0x6f6f7000, 0x2b65765f, 0x68616e64,
91820x6c657200, 0x63616e74, 0x31446d61, 0x0,
91830x2b715f64, 0x6d615f74, 0x6f5f6e69, 0x635f636b,
91840x73756d00, 0x2b685f73, 0x656e645f, 0x64617461,
91850x5f726561, 0x64795f63, 0x6b73756d, 0x0,
91860x2b685f64, 0x6d615f72, 0x645f6173, 0x73697374,
91870x5f636b73, 0x756d0000, 0x74436b73, 0x6d4f6e00,
91880x2b715f64, 0x6d615f74, 0x6f5f6e69, 0x63000000,
91890x2b685f73, 0x656e645f, 0x64617461, 0x5f726561,
91900x64790000, 0x2b685f64, 0x6d615f72, 0x645f6173,
91910x73697374, 0x0, 0x74436b73, 0x6d4f6666,
91920x0, 0x2b685f73, 0x656e645f, 0x62645f72,
91930x65616479, 0x0, 0x68737453, 0x52696e67,
91940x0, 0x62616453, 0x52696e67, 0x0,
91950x6e696353, 0x52696e67, 0x0, 0x77446d61,
91960x416c6c41, 0x0, 0x2b715f64, 0x6d615f74,
91970x6f5f686f, 0x73745f63, 0x6b73756d, 0x0,
91980x2b685f6d, 0x61635f72, 0x785f636f, 0x6d705f63,
91990x6b73756d, 0x0, 0x2b685f64, 0x6d615f77,
92000x725f6173, 0x73697374, 0x5f636b73, 0x756d0000,
92010x72436b73, 0x6d4f6e00, 0x2b715f64, 0x6d615f74,
92020x6f5f686f, 0x73740000, 0x2b685f6d, 0x61635f72,
92030x785f636f, 0x6d700000, 0x2b685f64, 0x6d615f77,
92040x725f6173, 0x73697374, 0x0, 0x72436b73,
92050x6d4f6666, 0x0, 0x2b685f72, 0x6563765f,
92060x62645f72, 0x65616479, 0x0, 0x2b685f72,
92070x6563765f, 0x6a756d62, 0x6f5f6264, 0x5f726561,
92080x64790000, 0x2b685f72, 0x6563765f, 0x6d696e69,
92090x5f62645f, 0x72656164, 0x79000000, 0x2b6d685f,
92100x636f6d6d, 0x616e6400, 0x2b685f74, 0x696d6572,
92110x0, 0x2b685f64, 0x6f5f7570, 0x64617465,
92120x5f74785f, 0x636f6e73, 0x0, 0x2b685f64,
92130x6f5f7570, 0x64617465, 0x5f72785f, 0x70726f64,
92140x0, 0x2b636b73, 0x756d3136, 0x0,
92150x2b706565, 0x6b5f6d61, 0x635f7278, 0x5f776100,
92160x2b706565, 0x6b5f6d61, 0x635f7278, 0x0,
92170x2b646571, 0x5f6d6163, 0x5f727800, 0x2b685f6d,
92180x61635f72, 0x785f6174, 0x746e0000, 0x62616452,
92190x6574537a, 0x0, 0x72784264, 0x4266537a,
92200x0, 0x2b6e756c, 0x6c5f6861, 0x6e646c65,
92210x72000000, 0x66774f70, 0x4661696c, 0x0,
92220x2b685f75, 0x70646174, 0x655f6c65, 0x64340000,
92230x2b685f75, 0x70646174, 0x655f6c65, 0x64360000,
92240x2b685f75, 0x70646174, 0x655f6c65, 0x64320000,
92250x696e7453, 0x74617465, 0x0, 0x2a2a696e,
92260x69744370, 0x0, 0x23736372, 0x65616d00,
92270x69537461, 0x636b4572, 0x0, 0x70726f62,
92280x654d656d, 0x0, 0x2a2a4441, 0x574e5f42,
92290x0, 0x2b73775f, 0x646d615f, 0x61737369,
92300x73745f70, 0x6c75735f, 0x74696d65, 0x72000000,
92310x2b267072, 0x656c6f61, 0x645f7772, 0x5f646573,
92320x63720000, 0x2b267072, 0x656c6f61, 0x645f7264,
92330x5f646573, 0x63720000, 0x2b685f68, 0x665f7469,
92340x6d657200, 0x24486561, 0x6465723a, 0x202f7072,
92350x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
92360x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
92370x6e2f7469, 0x6d65722e, 0x632c7620, 0x312e312e,
92380x322e3335, 0x20313939, 0x392f3031, 0x2f323720,
92390x31393a30, 0x393a3530, 0x20686179, 0x65732045,
92400x78702024, 0x0, 0x65767452, 0x6e674600,
92410x51657674, 0x46000000, 0x51657674, 0x505f4600,
92420x4d657674, 0x526e6746, 0x0, 0x4d516576,
92430x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
92440x6e495f46, 0x0, 0x5173436f, 0x6e734600,
92450x51725072, 0x6f644600, 0x542d446d, 0x61526432,
92460x0, 0x542d446d, 0x61526431, 0x0,
92470x542d446d, 0x61526442, 0x0, 0x542d446d,
92480x61577232, 0x0, 0x542d446d, 0x61577231,
92490x0, 0x542d446d, 0x61577242, 0x0,
92500x0, 0x24486561, 0x6465723a, 0x202f7072,
92510x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
92520x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
92530x6e2f636f, 0x6d6d616e, 0x642e632c, 0x7620312e,
92540x312e322e, 0x32382031, 0x3939392f, 0x30312f32,
92550x30203139, 0x3a34393a, 0x34392073, 0x6875616e,
92560x67204578, 0x70202400, 0x65767452, 0x6e674600,
92570x51657674, 0x46000000, 0x51657674, 0x505f4600,
92580x4d657674, 0x526e6746, 0x0, 0x4d516576,
92590x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
92600x6e495f46, 0x0, 0x5173436f, 0x6e734600,
92610x51725072, 0x6f644600, 0x3f48636d, 0x644d6278,
92620x0, 0x3f636d64, 0x48737453, 0x0,
92630x3f636d64, 0x4d634d64, 0x0, 0x3f636d64,
92640x50726f6d, 0x0, 0x3f636d64, 0x4c696e6b,
92650x0, 0x3f636d64, 0x45727200, 0x86ac,
92660x8e5c, 0x8e5c, 0x8de4, 0x8b78,
92670x8e30, 0x8e5c, 0x8790, 0x8800,
92680x8990, 0x8a68, 0x8a34, 0x8e5c,
92690x8870, 0x8b24, 0x8e5c, 0x8b34,
92700x87b4, 0x8824, 0x0, 0x0,
92710x0, 0x24486561, 0x6465723a, 0x202f7072,
92720x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
92730x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
92740x6e2f6d63, 0x6173742e, 0x632c7620, 0x312e312e,
92750x322e3820, 0x31393938, 0x2f31322f, 0x30382030,
92760x323a3336, 0x3a333620, 0x73687561, 0x6e672045,
92770x78702024, 0x0, 0x65767452, 0x6e674600,
92780x51657674, 0x46000000, 0x51657674, 0x505f4600,
92790x4d657674, 0x526e6746, 0x0, 0x4d516576,
92800x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
92810x6e495f46, 0x0, 0x5173436f, 0x6e734600,
92820x51725072, 0x6f644600, 0x6164644d, 0x63447570,
92830x0, 0x6164644d, 0x6346756c, 0x0,
92840x64656c4d, 0x634e6f45, 0x0, 0x0,
92850x0, 0x24486561, 0x6465723a, 0x202f7072,
92860x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
92870x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
92880x6e2f646d, 0x612e632c, 0x7620312e, 0x312e322e,
92890x32342031, 0x3939382f, 0x31322f32, 0x31203030,
92900x3a33333a, 0x30392073, 0x6875616e, 0x67204578,
92910x70202400, 0x65767452, 0x6e674600, 0x51657674,
92920x46000000, 0x51657674, 0x505f4600, 0x4d657674,
92930x526e6746, 0x0, 0x4d516576, 0x74460000,
92940x4d516576, 0x505f4600, 0x5173436f, 0x6e495f46,
92950x0, 0x5173436f, 0x6e734600, 0x51725072,
92960x6f644600, 0x7377446d, 0x614f6666, 0x0,
92970x31446d61, 0x4f6e0000, 0x7377446d, 0x614f6e00,
92980x2372446d, 0x6141544e, 0x0, 0x72446d61,
92990x41544e30, 0x0, 0x72446d61, 0x41544e31,
93000x0, 0x72446d61, 0x34476200, 0x2a50414e,
93010x49432a00, 0x2e2e2f2e, 0x2e2f2e2e, 0x2f2e2e2f,
93020x2e2e2f73, 0x72632f6e, 0x69632f66, 0x77322f63,
93030x6f6d6d6f, 0x6e2f646d, 0x612e6300, 0x2377446d,
93040x6141544e, 0x0, 0x77446d61, 0x41544e30,
93050x0, 0x77446d61, 0x41544e31, 0x0,
93060x77446d61, 0x34476200, 0x0, 0x0,
93070x0, 0x24486561, 0x6465723a, 0x202f7072,
93080x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
93090x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
93100x6e2f7472, 0x6163652e, 0x632c7620, 0x312e312e,
93110x322e3520, 0x31393938, 0x2f30392f, 0x33302031,
93120x383a3530, 0x3a323820, 0x73687561, 0x6e672045,
93130x78702024, 0x0, 0x0, 0x0,
93140x0, 0x24486561, 0x6465723a, 0x202f7072,
93150x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
93160x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
93170x6e2f6461, 0x74612e63, 0x2c762031, 0x2e312e32,
93180x2e313220, 0x31393939, 0x2f30312f, 0x32302031,
93190x393a3439, 0x3a353120, 0x73687561, 0x6e672045,
93200x78702024, 0x0, 0x46575f56, 0x45525349,
93210x4f4e3a20, 0x23312046, 0x72692041, 0x70722037,
93220x2031373a, 0x35373a35, 0x32205044, 0x54203230,
93230x30300000, 0x46575f43, 0x4f4d5049, 0x4c455f54,
93240x494d453a, 0x2031373a, 0x35373a35, 0x32000000,
93250x46575f43, 0x4f4d5049, 0x4c455f42, 0x593a2064,
93260x65767263, 0x73000000, 0x46575f43, 0x4f4d5049,
93270x4c455f48, 0x4f53543a, 0x20636f6d, 0x70757465,
93280x0, 0x46575f43, 0x4f4d5049, 0x4c455f44,
93290x4f4d4149, 0x4e3a2065, 0x6e672e61, 0x6374656f,
93300x6e2e636f, 0x6d000000, 0x46575f43, 0x4f4d5049,
93310x4c45523a, 0x20676363, 0x20766572, 0x73696f6e,
93320x20322e37, 0x2e320000, 0x0, 0x12041100,
93330x0, 0x24486561, 0x6465723a, 0x202f7072,
93340x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
93350x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
93360x6e2f6d65, 0x6d2e632c, 0x7620312e, 0x312e322e,
93370x35203139, 0x39382f30, 0x392f3330, 0x2031383a,
93380x35303a30, 0x38207368, 0x75616e67, 0x20457870,
93390x20240000, 0x24486561, 0x6465723a, 0x202f7072,
93400x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
93410x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
93420x6e2f7365, 0x6e642e63, 0x2c762031, 0x2e312e32,
93430x2e343420, 0x31393938, 0x2f31322f, 0x32312030,
93440x303a3333, 0x3a313820, 0x73687561, 0x6e672045,
93450x78702024, 0x0, 0x65767452, 0x6e674600,
93460x51657674, 0x46000000, 0x51657674, 0x505f4600,
93470x4d657674, 0x526e6746, 0x0, 0x4d516576,
93480x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
93490x6e495f46, 0x0, 0x5173436f, 0x6e734600,
93500x51725072, 0x6f644600, 0x69736e74, 0x54637055,
93510x0, 0x24486561, 0x6465723a, 0x202f7072,
93520x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
93530x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
93540x6e2f7265, 0x63762e63, 0x2c762031, 0x2e312e32,
93550x2e353320, 0x31393939, 0x2f30312f, 0x31362030,
93560x323a3535, 0x3a343320, 0x73687561, 0x6e672045,
93570x78702024, 0x0, 0x65767452, 0x6e674600,
93580x51657674, 0x46000000, 0x51657674, 0x505f4600,
93590x4d657674, 0x526e6746, 0x0, 0x4d516576,
93600x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
93610x6e495f46, 0x0, 0x5173436f, 0x6e734600,
93620x51725072, 0x6f644600, 0x724d6163, 0x43686b30,
93630x0, 0x72784672, 0x6d324c67, 0x0,
93640x72784e6f, 0x53744264, 0x0, 0x72784e6f,
93650x4d694264, 0x0, 0x72784e6f, 0x4a6d4264,
93660x0, 0x7278436b, 0x446d6146, 0x0,
93670x72785144, 0x6d457846, 0x0, 0x72785144,
93680x6d614600, 0x72785144, 0x4c426446, 0x0,
93690x72785144, 0x6d426446, 0x0, 0x72784372,
93700x63506164, 0x0, 0x72536d51, 0x446d6146,
93710x0, 0x24486561, 0x6465723a, 0x202f7072,
93720x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
93730x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
93740x6e2f6d61, 0x632e632c, 0x7620312e, 0x312e322e,
93750x32322031, 0x3939382f, 0x31322f30, 0x38203032,
93760x3a33363a, 0x33302073, 0x6875616e, 0x67204578,
93770x70202400, 0x65767452, 0x6e674600, 0x51657674,
93780x46000000, 0x51657674, 0x505f4600, 0x4d657674,
93790x526e6746, 0x0, 0x4d516576, 0x74460000,
93800x4d516576, 0x505f4600, 0x5173436f, 0x6e495f46,
93810x0, 0x5173436f, 0x6e734600, 0x51725072,
93820x6f644600, 0x6d616354, 0x68726573, 0x0,
93830x23744d61, 0x6341544e, 0x0, 0x23724d61,
93840x6341544e, 0x0, 0x72656d41, 0x73737274,
93850x0, 0x6c696e6b, 0x444f574e, 0x0,
93860x6c696e6b, 0x55500000, 0x0, 0x0,
93870x0, 0x24486561, 0x6465723a, 0x202f7072,
93880x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
93890x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
93900x6e2f636b, 0x73756d2e, 0x632c7620, 0x312e312e,
93910x322e3920, 0x31393939, 0x2f30312f, 0x31342030,
93920x303a3033, 0x3a343820, 0x73687561, 0x6e672045,
93930x78702024, 0x0, 0x65767452, 0x6e674600,
93940x51657674, 0x46000000, 0x51657674, 0x505f4600,
93950x4d657674, 0x526e6746, 0x0, 0x4d516576,
93960x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
93970x6e495f46, 0x0, 0x5173436f, 0x6e734600,
93980x51725072, 0x6f644600, 0x0, 0x0,
93990x0, 0x50726f62, 0x65506879, 0x0,
94000x6c6e6b41, 0x53535254, 0x0, 0x109a4,
94010x10a1c, 0x10a50, 0x10a7c, 0x11050,
94020x10aa8, 0x10b10, 0x111fc, 0x10dc0,
94030x10c68, 0x10c80, 0x10cc4, 0x10cec,
94040x10d0c, 0x10d34, 0x111fc, 0x10dc0,
94050x10df8, 0x10e10, 0x10e40, 0x10e68,
94060x10e88, 0x10eb0, 0x0, 0x10fdc,
94070x11008, 0x1102c, 0x111fc, 0x11050,
94080x11078, 0x11108, 0x0, 0x0,
94090x0, 0x1186c, 0x1193c, 0x11a14,
94100x11ae4, 0x11b40, 0x11c1c, 0x11c44,
94110x11d20, 0x11d48, 0x11ef0, 0x11f18,
94120x120c0, 0x122b8, 0x1254c, 0x12460,
94130x1254c, 0x12578, 0x120e8, 0x12290,
94140x7273745f, 0x676d6969, 0x0, 0x12608,
94150x12640, 0x12728, 0x13374, 0x133b4,
94160x133cc, 0x7365746c, 0x6f6f7000, 0x0,
94170x0, 0x13bbc, 0x13bfc, 0x13c8c,
94180x13cd0, 0x13d34, 0x13dc0, 0x13df4,
94190x13e7c, 0x13f14, 0x13fe4, 0x14024,
94200x140a8, 0x140cc, 0x141dc, 0x646f4261,
94210x73655067, 0x0, 0x0, 0x0,
94220x0, 0x73746d61, 0x634c4e4b, 0x0,
94230x6765746d, 0x636c6e6b, 0x0, 0x14ed8,
94240x14ed8, 0x14b8c, 0x14bd8, 0x14c24,
94250x14ed8, 0x7365746d, 0x61636163, 0x74000000,
94260x0, 0x0 };
9427static u32 tigon2FwData[(MAX_DATA_LEN/4) + 1] __devinitdata = {
94280x1,
94290x1, 0x1, 0xc001fc, 0x3ffc,
94300xc00000, 0x416c7465, 0x6f6e2041, 0x63654e49,
94310x43205600, 0x0, 0x0, 0x0,
94320x0, 0x0, 0x0, 0x416c7465,
94330x6f6e2041, 0x63654e49, 0x43205600, 0x42424242,
94340x0, 0x0, 0x0, 0x1ffffc,
94350x1fff7c, 0x0, 0x0, 0x0,
94360x0, 0x0, 0x0, 0x60cf00,
94370x60, 0xcf000000, 0x0, 0x0,
94380x0, 0x0, 0x0, 0x0,
94390x0, 0x0, 0x0, 0x0,
94400x0, 0x0, 0x0, 0x0,
94410x0, 0x0, 0x0, 0x0,
94420x0, 0x0, 0x3, 0x0,
94430x1, 0x0, 0x0, 0x0,
94440x1, 0x0, 0x1, 0x0,
94450x0, 0x0, 0x0, 0x1,
94460x1, 0x0, 0x0, 0x0,
94470x0, 0x0, 0x1000000, 0x21000000,
94480x12000140, 0x0, 0x0, 0x20000000,
94490x120000a0, 0x0, 0x12000060, 0x12000180,
94500x120001e0, 0x0, 0x0, 0x0,
94510x1, 0x0, 0x0, 0x0,
94520x0, 0x0, 0x0, 0x2,
94530x0, 0x0, 0x30001, 0x1,
94540x30201, 0x0, 0x0, 0x1010101,
94550x1010100, 0x10100, 0x1010001, 0x10001,
94560x1000101, 0x101, 0x0, 0x0 };
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 187ac6eb6e94..7709992bb6bf 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -1813,6 +1813,25 @@ static void __devinit amd8111e_probe_ext_phy(struct net_device* dev)
1813 lp->ext_phy_addr = 1; 1813 lp->ext_phy_addr = 1;
1814} 1814}
1815 1815
1816static const struct net_device_ops amd8111e_netdev_ops = {
1817 .ndo_open = amd8111e_open,
1818 .ndo_stop = amd8111e_close,
1819 .ndo_start_xmit = amd8111e_start_xmit,
1820 .ndo_tx_timeout = amd8111e_tx_timeout,
1821 .ndo_get_stats = amd8111e_get_stats,
1822 .ndo_set_multicast_list = amd8111e_set_multicast_list,
1823 .ndo_validate_addr = eth_validate_addr,
1824 .ndo_set_mac_address = amd8111e_set_mac_address,
1825 .ndo_do_ioctl = amd8111e_ioctl,
1826 .ndo_change_mtu = amd8111e_change_mtu,
1827#if AMD8111E_VLAN_TAG_USED
1828 .ndo_vlan_rx_register = amd8111e_vlan_rx_register,
1829#endif
1830#ifdef CONFIG_NET_POLL_CONTROLLER
1831 .ndo_poll_controller = amd8111e_poll,
1832#endif
1833};
1834
1816static int __devinit amd8111e_probe_one(struct pci_dev *pdev, 1835static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1817 const struct pci_device_id *ent) 1836 const struct pci_device_id *ent)
1818{ 1837{
@@ -1872,7 +1891,6 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1872 1891
1873#if AMD8111E_VLAN_TAG_USED 1892#if AMD8111E_VLAN_TAG_USED
1874 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ; 1893 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ;
1875 dev->vlan_rx_register =amd8111e_vlan_rx_register;
1876#endif 1894#endif
1877 1895
1878 lp = netdev_priv(dev); 1896 lp = netdev_priv(dev);
@@ -1901,27 +1919,16 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1901 if(dynamic_ipg[card_idx++]) 1919 if(dynamic_ipg[card_idx++])
1902 lp->options |= OPTION_DYN_IPG_ENABLE; 1920 lp->options |= OPTION_DYN_IPG_ENABLE;
1903 1921
1922
1904 /* Initialize driver entry points */ 1923 /* Initialize driver entry points */
1905 dev->open = amd8111e_open; 1924 dev->netdev_ops = &amd8111e_netdev_ops;
1906 dev->hard_start_xmit = amd8111e_start_xmit;
1907 dev->stop = amd8111e_close;
1908 dev->get_stats = amd8111e_get_stats;
1909 dev->set_multicast_list = amd8111e_set_multicast_list;
1910 dev->set_mac_address = amd8111e_set_mac_address;
1911 dev->do_ioctl = amd8111e_ioctl;
1912 dev->change_mtu = amd8111e_change_mtu;
1913 SET_ETHTOOL_OPS(dev, &ops); 1925 SET_ETHTOOL_OPS(dev, &ops);
1914 dev->irq =pdev->irq; 1926 dev->irq =pdev->irq;
1915 dev->tx_timeout = amd8111e_tx_timeout;
1916 dev->watchdog_timeo = AMD8111E_TX_TIMEOUT; 1927 dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
1917 netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32); 1928 netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
1918#ifdef CONFIG_NET_POLL_CONTROLLER
1919 dev->poll_controller = amd8111e_poll;
1920#endif
1921 1929
1922#if AMD8111E_VLAN_TAG_USED 1930#if AMD8111E_VLAN_TAG_USED
1923 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1931 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1924 dev->vlan_rx_register =amd8111e_vlan_rx_register;
1925#endif 1932#endif
1926 /* Probe the external PHY */ 1933 /* Probe the external PHY */
1927 amd8111e_probe_ext_phy(dev); 1934 amd8111e_probe_ext_phy(dev);
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 9a0be9b2eaad..da64ba88d7f8 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -48,12 +48,18 @@ static int ipddp_mode = IPDDP_DECAP;
48 48
49/* Index to functions, as function prototypes. */ 49/* Index to functions, as function prototypes. */
50static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev); 50static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev);
51static struct net_device_stats *ipddp_get_stats(struct net_device *dev);
52static int ipddp_create(struct ipddp_route *new_rt); 51static int ipddp_create(struct ipddp_route *new_rt);
53static int ipddp_delete(struct ipddp_route *rt); 52static int ipddp_delete(struct ipddp_route *rt);
54static struct ipddp_route* ipddp_find_route(struct ipddp_route *rt); 53static struct ipddp_route* ipddp_find_route(struct ipddp_route *rt);
55static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 54static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
56 55
56static const struct net_device_ops ipddp_netdev_ops = {
57 .ndo_start_xmit = ipddp_xmit,
58 .ndo_do_ioctl = ipddp_ioctl,
59 .ndo_change_mtu = eth_change_mtu,
60 .ndo_set_mac_address = eth_mac_addr,
61 .ndo_validate_addr = eth_validate_addr,
62};
57 63
58static struct net_device * __init ipddp_init(void) 64static struct net_device * __init ipddp_init(void)
59{ 65{
@@ -61,7 +67,7 @@ static struct net_device * __init ipddp_init(void)
61 struct net_device *dev; 67 struct net_device *dev;
62 int err; 68 int err;
63 69
64 dev = alloc_etherdev(sizeof(struct net_device_stats)); 70 dev = alloc_etherdev(0);
65 if (!dev) 71 if (!dev)
66 return ERR_PTR(-ENOMEM); 72 return ERR_PTR(-ENOMEM);
67 73
@@ -71,9 +77,7 @@ static struct net_device * __init ipddp_init(void)
71 printk(version); 77 printk(version);
72 78
73 /* Initalize the device structure. */ 79 /* Initalize the device structure. */
74 dev->hard_start_xmit = ipddp_xmit; 80 dev->netdev_ops = &ipddp_netdev_ops;
75 dev->get_stats = ipddp_get_stats;
76 dev->do_ioctl = ipddp_ioctl;
77 81
78 dev->type = ARPHRD_IPDDP; /* IP over DDP tunnel */ 82 dev->type = ARPHRD_IPDDP; /* IP over DDP tunnel */
79 dev->mtu = 585; 83 dev->mtu = 585;
@@ -103,13 +107,6 @@ static struct net_device * __init ipddp_init(void)
103 return dev; 107 return dev;
104} 108}
105 109
106/*
107 * Get the current statistics. This may be called with the card open or closed.
108 */
109static struct net_device_stats *ipddp_get_stats(struct net_device *dev)
110{
111 return netdev_priv(dev);
112}
113 110
114/* 111/*
115 * Transmit LLAP/ELAP frame using aarp_send_ddp. 112 * Transmit LLAP/ELAP frame using aarp_send_ddp.
@@ -170,8 +167,8 @@ static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
170 167
171 skb->protocol = htons(ETH_P_ATALK); /* Protocol has changed */ 168 skb->protocol = htons(ETH_P_ATALK); /* Protocol has changed */
172 169
173 ((struct net_device_stats *) netdev_priv(dev))->tx_packets++; 170 dev->stats.tx_packets++;
174 ((struct net_device_stats *) netdev_priv(dev))->tx_bytes += skb->len; 171 dev->stats.tx_bytes += skb->len;
175 172
176 if(aarp_send_ddp(rt->dev, skb, &rt->at, NULL) < 0) 173 if(aarp_send_ddp(rt->dev, skb, &rt->at, NULL) < 0)
177 dev_kfree_skb(skb); 174 dev_kfree_skb(skb);
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index ea493ce23982..4317b3edb3d7 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -204,8 +204,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_id);
204static void net_rx(struct net_device *dev); 204static void net_rx(struct net_device *dev);
205static void read_block(long ioaddr, int length, unsigned char *buffer, int data_mode); 205static void read_block(long ioaddr, int length, unsigned char *buffer, int data_mode);
206static int net_close(struct net_device *dev); 206static int net_close(struct net_device *dev);
207static void set_rx_mode_8002(struct net_device *dev); 207static void set_rx_mode(struct net_device *dev);
208static void set_rx_mode_8012(struct net_device *dev);
209static void tx_timeout(struct net_device *dev); 208static void tx_timeout(struct net_device *dev);
210 209
211 210
@@ -242,6 +241,17 @@ static int __init atp_init(void)
242 return -ENODEV; 241 return -ENODEV;
243} 242}
244 243
244static const struct net_device_ops atp_netdev_ops = {
245 .ndo_open = net_open,
246 .ndo_stop = net_close,
247 .ndo_start_xmit = atp_send_packet,
248 .ndo_set_multicast_list = set_rx_mode,
249 .ndo_tx_timeout = tx_timeout,
250 .ndo_change_mtu = eth_change_mtu,
251 .ndo_set_mac_address = eth_mac_addr,
252 .ndo_validate_addr = eth_validate_addr,
253};
254
245static int __init atp_probe1(long ioaddr) 255static int __init atp_probe1(long ioaddr)
246{ 256{
247 struct net_device *dev = NULL; 257 struct net_device *dev = NULL;
@@ -342,12 +352,7 @@ static int __init atp_probe1(long ioaddr)
342 if (dev->mem_end & 0xf) 352 if (dev->mem_end & 0xf)
343 net_debug = dev->mem_end & 7; 353 net_debug = dev->mem_end & 7;
344 354
345 dev->open = net_open; 355 dev->netdev_ops = &atp_netdev_ops;
346 dev->stop = net_close;
347 dev->hard_start_xmit = atp_send_packet;
348 dev->set_multicast_list =
349 lp->chip_type == RTL8002 ? &set_rx_mode_8002 : &set_rx_mode_8012;
350 dev->tx_timeout = tx_timeout;
351 dev->watchdog_timeo = TX_TIMEOUT; 356 dev->watchdog_timeo = TX_TIMEOUT;
352 357
353 res = register_netdev(dev); 358 res = register_netdev(dev);
@@ -903,6 +908,17 @@ static void set_rx_mode_8012(struct net_device *dev)
903 write_reg(ioaddr, CMR2, CMR2_IRQOUT); /* Switch back to page 0 */ 908 write_reg(ioaddr, CMR2, CMR2_IRQOUT); /* Switch back to page 0 */
904} 909}
905 910
911static void set_rx_mode(struct net_device *dev)
912{
913 struct net_local *lp = netdev_priv(dev);
914
915 if (lp->chip_type == RTL8002)
916 return set_rx_mode_8002(dev);
917 else
918 return set_rx_mode_8012(dev);
919}
920
921
906static int __init atp_init_module(void) { 922static int __init atp_init_module(void) {
907 if (debug) /* Emit version even if no cards detected. */ 923 if (debug) /* Emit version even if no cards detected. */
908 printk(KERN_INFO "%s", version); 924 printk(KERN_INFO "%s", version);
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 0e7470a201f0..6926ebedfdc9 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -2108,6 +2108,22 @@ static int __devinit b44_get_invariants(struct b44 *bp)
2108 return err; 2108 return err;
2109} 2109}
2110 2110
2111static const struct net_device_ops b44_netdev_ops = {
2112 .ndo_open = b44_open,
2113 .ndo_stop = b44_close,
2114 .ndo_start_xmit = b44_start_xmit,
2115 .ndo_get_stats = b44_get_stats,
2116 .ndo_set_multicast_list = b44_set_rx_mode,
2117 .ndo_set_mac_address = b44_set_mac_addr,
2118 .ndo_validate_addr = eth_validate_addr,
2119 .ndo_do_ioctl = b44_ioctl,
2120 .ndo_tx_timeout = b44_tx_timeout,
2121 .ndo_change_mtu = b44_change_mtu,
2122#ifdef CONFIG_NET_POLL_CONTROLLER
2123 .ndo_poll_controller = b44_poll_controller,
2124#endif
2125};
2126
2111static int __devinit b44_init_one(struct ssb_device *sdev, 2127static int __devinit b44_init_one(struct ssb_device *sdev,
2112 const struct ssb_device_id *ent) 2128 const struct ssb_device_id *ent)
2113{ 2129{
@@ -2145,20 +2161,9 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
2145 bp->rx_pending = B44_DEF_RX_RING_PENDING; 2161 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2146 bp->tx_pending = B44_DEF_TX_RING_PENDING; 2162 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2147 2163
2148 dev->open = b44_open; 2164 dev->netdev_ops = &b44_netdev_ops;
2149 dev->stop = b44_close;
2150 dev->hard_start_xmit = b44_start_xmit;
2151 dev->get_stats = b44_get_stats;
2152 dev->set_multicast_list = b44_set_rx_mode;
2153 dev->set_mac_address = b44_set_mac_addr;
2154 dev->do_ioctl = b44_ioctl;
2155 dev->tx_timeout = b44_tx_timeout;
2156 netif_napi_add(dev, &bp->napi, b44_poll, 64); 2165 netif_napi_add(dev, &bp->napi, b44_poll, 64);
2157 dev->watchdog_timeo = B44_TX_TIMEOUT; 2166 dev->watchdog_timeo = B44_TX_TIMEOUT;
2158#ifdef CONFIG_NET_POLL_CONTROLLER
2159 dev->poll_controller = b44_poll_controller;
2160#endif
2161 dev->change_mtu = b44_change_mtu;
2162 dev->irq = sdev->irq; 2167 dev->irq = sdev->irq;
2163 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); 2168 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2164 2169
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index ef8103b3523e..4be05847f86f 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -8243,6 +8243,9 @@ static int bnx2x_set_eeprom(struct net_device *dev,
8243 struct bnx2x *bp = netdev_priv(dev); 8243 struct bnx2x *bp = netdev_priv(dev);
8244 int rc; 8244 int rc;
8245 8245
8246 if (!netif_running(dev))
8247 return -EAGAIN;
8248
8246 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" 8249 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8247 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", 8250 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8248 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, 8251 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 460c2cad2755..9fb388388fb7 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4148,7 +4148,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
4148 4148
4149 bond_for_each_slave(bond, slave, i) { 4149 bond_for_each_slave(bond, slave, i) {
4150 pr_debug("s %p s->p %p c_m %p\n", slave, 4150 pr_debug("s %p s->p %p c_m %p\n", slave,
4151 slave->prev, slave->dev->change_mtu); 4151 slave->prev, slave->dev->netdev_ops->ndo_change_mtu);
4152 4152
4153 res = dev_set_mtu(slave->dev, new_mtu); 4153 res = dev_set_mtu(slave->dev, new_mtu);
4154 4154
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 321f43d9f0e2..840b3d1a22f5 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -4977,6 +4977,22 @@ static void __devinit cas_program_bridge(struct pci_dev *cas_pdev)
4977 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff); 4977 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
4978} 4978}
4979 4979
4980static const struct net_device_ops cas_netdev_ops = {
4981 .ndo_open = cas_open,
4982 .ndo_stop = cas_close,
4983 .ndo_start_xmit = cas_start_xmit,
4984 .ndo_get_stats = cas_get_stats,
4985 .ndo_set_multicast_list = cas_set_multicast,
4986 .ndo_do_ioctl = cas_ioctl,
4987 .ndo_tx_timeout = cas_tx_timeout,
4988 .ndo_change_mtu = cas_change_mtu,
4989 .ndo_set_mac_address = eth_mac_addr,
4990 .ndo_validate_addr = eth_validate_addr,
4991#ifdef CONFIG_NET_POLL_CONTROLLER
4992 .ndo_poll_controller = cas_netpoll,
4993#endif
4994};
4995
4980static int __devinit cas_init_one(struct pci_dev *pdev, 4996static int __devinit cas_init_one(struct pci_dev *pdev,
4981 const struct pci_device_id *ent) 4997 const struct pci_device_id *ent)
4982{ 4998{
@@ -5166,22 +5182,13 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5166 for (i = 0; i < N_RX_FLOWS; i++) 5182 for (i = 0; i < N_RX_FLOWS; i++)
5167 skb_queue_head_init(&cp->rx_flows[i]); 5183 skb_queue_head_init(&cp->rx_flows[i]);
5168 5184
5169 dev->open = cas_open; 5185 dev->netdev_ops = &cas_netdev_ops;
5170 dev->stop = cas_close;
5171 dev->hard_start_xmit = cas_start_xmit;
5172 dev->get_stats = cas_get_stats;
5173 dev->set_multicast_list = cas_set_multicast;
5174 dev->do_ioctl = cas_ioctl;
5175 dev->ethtool_ops = &cas_ethtool_ops; 5186 dev->ethtool_ops = &cas_ethtool_ops;
5176 dev->tx_timeout = cas_tx_timeout;
5177 dev->watchdog_timeo = CAS_TX_TIMEOUT; 5187 dev->watchdog_timeo = CAS_TX_TIMEOUT;
5178 dev->change_mtu = cas_change_mtu; 5188
5179#ifdef USE_NAPI 5189#ifdef USE_NAPI
5180 netif_napi_add(dev, &cp->napi, cas_poll, 64); 5190 netif_napi_add(dev, &cp->napi, cas_poll, 64);
5181#endif 5191#endif
5182#ifdef CONFIG_NET_POLL_CONTROLLER
5183 dev->poll_controller = cas_netpoll;
5184#endif
5185 dev->irq = pdev->irq; 5192 dev->irq = pdev->irq;
5186 dev->dma = 0; 5193 dev->dma = 0;
5187 5194
diff --git a/drivers/net/de600.c b/drivers/net/de600.c
index 970f820ba814..de63f1d41d32 100644
--- a/drivers/net/de600.c
+++ b/drivers/net/de600.c
@@ -378,6 +378,16 @@ static void de600_rx_intr(struct net_device *dev)
378 */ 378 */
379} 379}
380 380
381static const struct net_device_ops de600_netdev_ops = {
382 .ndo_open = de600_open,
383 .ndo_stop = de600_close,
384 .ndo_start_xmit = de600_start_xmit,
385 .ndo_change_mtu = eth_change_mtu,
386 .ndo_set_mac_address = eth_mac_addr,
387 .ndo_validate_addr = eth_validate_addr,
388};
389
390
381static struct net_device * __init de600_probe(void) 391static struct net_device * __init de600_probe(void)
382{ 392{
383 int i; 393 int i;
@@ -439,9 +449,7 @@ static struct net_device * __init de600_probe(void)
439 449
440 printk(", Ethernet Address: %pM\n", dev->dev_addr); 450 printk(", Ethernet Address: %pM\n", dev->dev_addr);
441 451
442 dev->open = de600_open; 452 dev->netdev_ops = &de600_netdev_ops;
443 dev->stop = de600_close;
444 dev->hard_start_xmit = &de600_start_xmit;
445 453
446 dev->flags&=~IFF_MULTICAST; 454 dev->flags&=~IFF_MULTICAST;
447 455
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index bdfa89403389..d52f34cc9526 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -784,6 +784,17 @@ static int adapter_init(struct net_device *dev)
784 return 0; /* all ok */ 784 return 0; /* all ok */
785} 785}
786 786
787static const struct net_device_ops de620_netdev_ops = {
788 .ndo_open = de620_open,
789 .ndo_stop = de620_close,
790 .ndo_start_xmit = de620_start_xmit,
791 .ndo_tx_timeout = de620_timeout,
792 .ndo_set_multicast_list = de620_set_multicast_list,
793 .ndo_change_mtu = eth_change_mtu,
794 .ndo_set_mac_address = eth_mac_addr,
795 .ndo_validate_addr = eth_validate_addr,
796};
797
787/****************************************************************************** 798/******************************************************************************
788 * 799 *
789 * Only start-up code below 800 * Only start-up code below
@@ -861,12 +872,8 @@ struct net_device * __init de620_probe(int unit)
861 else 872 else
862 printk(" UTP)\n"); 873 printk(" UTP)\n");
863 874
864 dev->open = de620_open; 875 dev->netdev_ops = &de620_netdev_ops;
865 dev->stop = de620_close;
866 dev->hard_start_xmit = de620_start_xmit;
867 dev->tx_timeout = de620_timeout;
868 dev->watchdog_timeo = HZ*2; 876 dev->watchdog_timeo = HZ*2;
869 dev->set_multicast_list = de620_set_multicast_list;
870 877
871 /* base_addr and irq are already set, see above! */ 878 /* base_addr and irq are already set, see above! */
872 879
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 134b2d60b479..86bb876fb123 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -161,6 +161,7 @@
161#include <linux/skbuff.h> 161#include <linux/skbuff.h>
162#include <linux/ethtool.h> 162#include <linux/ethtool.h>
163#include <linux/string.h> 163#include <linux/string.h>
164#include <linux/firmware.h>
164#include <asm/unaligned.h> 165#include <asm/unaligned.h>
165 166
166 167
@@ -174,10 +175,17 @@
174#define E100_WATCHDOG_PERIOD (2 * HZ) 175#define E100_WATCHDOG_PERIOD (2 * HZ)
175#define E100_NAPI_WEIGHT 16 176#define E100_NAPI_WEIGHT 16
176 177
178#define FIRMWARE_D101M "e100/d101m_ucode.bin"
179#define FIRMWARE_D101S "e100/d101s_ucode.bin"
180#define FIRMWARE_D102E "e100/d102e_ucode.bin"
181
177MODULE_DESCRIPTION(DRV_DESCRIPTION); 182MODULE_DESCRIPTION(DRV_DESCRIPTION);
178MODULE_AUTHOR(DRV_COPYRIGHT); 183MODULE_AUTHOR(DRV_COPYRIGHT);
179MODULE_LICENSE("GPL"); 184MODULE_LICENSE("GPL");
180MODULE_VERSION(DRV_VERSION); 185MODULE_VERSION(DRV_VERSION);
186MODULE_FIRMWARE(FIRMWARE_D101M);
187MODULE_FIRMWARE(FIRMWARE_D101S);
188MODULE_FIRMWARE(FIRMWARE_D102E);
181 189
182static int debug = 3; 190static int debug = 3;
183static int eeprom_bad_csum_allow = 0; 191static int eeprom_bad_csum_allow = 0;
@@ -1049,178 +1057,6 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1049 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); 1057 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1050} 1058}
1051 1059
1052/********************************************************/
1053/* Micro code for 8086:1229 Rev 8 */
1054/********************************************************/
1055
1056/* Parameter values for the D101M B-step */
1057#define D101M_CPUSAVER_TIMER_DWORD 78
1058#define D101M_CPUSAVER_BUNDLE_DWORD 65
1059#define D101M_CPUSAVER_MIN_SIZE_DWORD 126
1060
1061#define D101M_B_RCVBUNDLE_UCODE \
1062{\
10630x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
10640x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
10650x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
10660x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
10670x00380438, 0x00000000, 0x00140000, 0x00380555, \
10680x00308000, 0x00100662, 0x00100561, 0x000E0408, \
10690x00134861, 0x000C0002, 0x00103093, 0x00308000, \
10700x00100624, 0x00100561, 0x000E0408, 0x00100861, \
10710x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
10720x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
10730x00000000, 0x00000000, 0x00000000, 0x00000000, \
10740x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
10750x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
10760x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
10770x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
10780x00041000, 0x00010004, 0x00130826, 0x000C0006, \
10790x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
10800x00000000, 0x00000000, 0x00000000, 0x00000000, \
10810x00000000, 0x00000000, 0x00000000, 0x00000000, \
10820x00080600, 0x00101B10, 0x00050004, 0x00100826, \
10830x00101210, 0x00380C34, 0x00000000, 0x00000000, \
10840x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
10850x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
10860x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
10870x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
10880x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
10890x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
10900x00130826, 0x000C0001, 0x00220559, 0x00101313, \
10910x00380559, 0x00000000, 0x00000000, 0x00000000, \
10920x00000000, 0x00000000, 0x00000000, 0x00000000, \
10930x00000000, 0x00130831, 0x0010090B, 0x00124813, \
10940x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
10950x003806A8, 0x00000000, 0x00000000, 0x00000000, \
1096}
1097
1098/********************************************************/
1099/* Micro code for 8086:1229 Rev 9 */
1100/********************************************************/
1101
1102/* Parameter values for the D101S */
1103#define D101S_CPUSAVER_TIMER_DWORD 78
1104#define D101S_CPUSAVER_BUNDLE_DWORD 67
1105#define D101S_CPUSAVER_MIN_SIZE_DWORD 128
1106
1107#define D101S_RCVBUNDLE_UCODE \
1108{\
11090x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
11100x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
11110x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
11120x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
11130x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
11140x00308000, 0x00100610, 0x00100561, 0x000E0408, \
11150x00134861, 0x000C0002, 0x00103093, 0x00308000, \
11160x00100624, 0x00100561, 0x000E0408, 0x00100861, \
11170x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
11180x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
11190x00000000, 0x00000000, 0x00000000, 0x00000000, \
11200x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
11210x003A047E, 0x00044010, 0x00380819, 0x00000000, \
11220x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
11230x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
11240x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
11250x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
11260x00101313, 0x00380700, 0x00000000, 0x00000000, \
11270x00000000, 0x00000000, 0x00000000, 0x00000000, \
11280x00080600, 0x00101B10, 0x00050004, 0x00100826, \
11290x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
11300x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
11310x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
11320x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
11330x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
11340x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
11350x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
11360x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
11370x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
11380x00000000, 0x00000000, 0x00000000, 0x00000000, \
11390x00000000, 0x00000000, 0x00000000, 0x00130831, \
11400x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
11410x00041000, 0x00010004, 0x00380700 \
1142}
1143
1144/********************************************************/
1145/* Micro code for the 8086:1229 Rev F/10 */
1146/********************************************************/
1147
1148/* Parameter values for the D102 E-step */
1149#define D102_E_CPUSAVER_TIMER_DWORD 42
1150#define D102_E_CPUSAVER_BUNDLE_DWORD 54
1151#define D102_E_CPUSAVER_MIN_SIZE_DWORD 46
1152
1153#define D102_E_RCVBUNDLE_UCODE \
1154{\
11550x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
11560x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
11570x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
11580x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
11590x00000000, 0x00000000, 0x00000000, 0x00000000, \
11600x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
11610x00000000, 0x00000000, 0x00000000, 0x00000000, \
11620x00000000, 0x00000000, 0x00000000, 0x00000000, \
11630x00000000, 0x00000000, 0x00000000, 0x00000000, \
11640x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
11650x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
11660x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
11670x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
11680x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
11690x00000000, 0x00000000, 0x00000000, 0x00000000, \
11700x00000000, 0x00000000, 0x00000000, 0x00000000, \
11710x00000000, 0x00000000, 0x00000000, 0x00000000, \
11720x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
11730x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
11740x00000000, 0x00000000, 0x00000000, 0x00000000, \
11750x00000000, 0x00000000, 0x00000000, 0x00000000, \
11760x00000000, 0x00000000, 0x00000000, 0x00000000, \
11770x00000000, 0x00000000, 0x00000000, 0x00000000, \
11780x00000000, 0x00000000, 0x00000000, 0x00000000, \
11790x00000000, 0x00000000, 0x00000000, 0x00000000, \
11800x00000000, 0x00000000, 0x00000000, 0x00000000, \
11810x00000000, 0x00000000, 0x00000000, 0x00000000, \
11820x00000000, 0x00000000, 0x00000000, 0x00000000, \
11830x00000000, 0x00000000, 0x00000000, 0x00000000, \
11840x00000000, 0x00000000, 0x00000000, 0x00000000, \
11850x00000000, 0x00000000, 0x00000000, 0x00000000, \
11860x00000000, 0x00000000, 0x00000000, 0x00000000, \
11870x00000000, 0x00000000, 0x00000000, 0x00000000, \
1188}
1189
1190static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1191{
1192/* *INDENT-OFF* */
1193 static struct {
1194 u32 ucode[UCODE_SIZE + 1];
1195 u8 mac;
1196 u8 timer_dword;
1197 u8 bundle_dword;
1198 u8 min_size_dword;
1199 } ucode_opts[] = {
1200 { D101M_B_RCVBUNDLE_UCODE,
1201 mac_82559_D101M,
1202 D101M_CPUSAVER_TIMER_DWORD,
1203 D101M_CPUSAVER_BUNDLE_DWORD,
1204 D101M_CPUSAVER_MIN_SIZE_DWORD },
1205 { D101S_RCVBUNDLE_UCODE,
1206 mac_82559_D101S,
1207 D101S_CPUSAVER_TIMER_DWORD,
1208 D101S_CPUSAVER_BUNDLE_DWORD,
1209 D101S_CPUSAVER_MIN_SIZE_DWORD },
1210 { D102_E_RCVBUNDLE_UCODE,
1211 mac_82551_F,
1212 D102_E_CPUSAVER_TIMER_DWORD,
1213 D102_E_CPUSAVER_BUNDLE_DWORD,
1214 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1215 { D102_E_RCVBUNDLE_UCODE,
1216 mac_82551_10,
1217 D102_E_CPUSAVER_TIMER_DWORD,
1218 D102_E_CPUSAVER_BUNDLE_DWORD,
1219 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1220 { {0}, 0, 0, 0, 0}
1221 }, *opts;
1222/* *INDENT-ON* */
1223
1224/************************************************************************* 1060/*************************************************************************
1225* CPUSaver parameters 1061* CPUSaver parameters
1226* 1062*
@@ -1280,42 +1116,101 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb
1280#define BUNDLEMAX (u16)6 1116#define BUNDLEMAX (u16)6
1281#define INTDELAY (u16)1536 /* 0x600 */ 1117#define INTDELAY (u16)1536 /* 0x600 */
1282 1118
1119/* Initialize firmware */
1120static const struct firmware *e100_request_firmware(struct nic *nic)
1121{
1122 const char *fw_name;
1123 const struct firmware *fw;
1124 u8 timer, bundle, min_size;
1125 int err;
1126
1283 /* do not load u-code for ICH devices */ 1127 /* do not load u-code for ICH devices */
1284 if (nic->flags & ich) 1128 if (nic->flags & ich)
1285 goto noloaducode; 1129 return NULL;
1286 1130
1287 /* Search for ucode match against h/w revision */ 1131 /* Search for ucode match against h/w revision */
1288 for (opts = ucode_opts; opts->mac; opts++) { 1132 if (nic->mac == mac_82559_D101M)
1289 int i; 1133 fw_name = FIRMWARE_D101M;
1290 u32 *ucode = opts->ucode; 1134 else if (nic->mac == mac_82559_D101S)
1291 if (nic->mac != opts->mac) 1135 fw_name = FIRMWARE_D101S;
1292 continue; 1136 else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
1293 1137 fw_name = FIRMWARE_D102E;
1294 /* Insert user-tunable settings */ 1138 else /* No ucode on other devices */
1295 ucode[opts->timer_dword] &= 0xFFFF0000; 1139 return NULL;
1296 ucode[opts->timer_dword] |= INTDELAY; 1140
1297 ucode[opts->bundle_dword] &= 0xFFFF0000; 1141 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1298 ucode[opts->bundle_dword] |= BUNDLEMAX; 1142 if (err) {
1299 ucode[opts->min_size_dword] &= 0xFFFF0000; 1143 DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
1300 ucode[opts->min_size_dword] |= (BUNDLESMALL) ? 0xFFFF : 0xFF80; 1144 fw_name, err);
1301 1145 return ERR_PTR(err);
1302 for (i = 0; i < UCODE_SIZE; i++) 1146 }
1303 cb->u.ucode[i] = cpu_to_le32(ucode[i]); 1147 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1304 cb->command = cpu_to_le16(cb_ucode | cb_el); 1148 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1305 return; 1149 if (fw->size != UCODE_SIZE * 4 + 3) {
1306 } 1150 DPRINTK(PROBE, ERR, "Firmware \"%s\" has wrong size %zu\n",
1307 1151 fw_name, fw->size);
1308noloaducode: 1152 release_firmware(fw);
1309 cb->command = cpu_to_le16(cb_nop | cb_el); 1153 return ERR_PTR(-EINVAL);
1310} 1154 }
1311 1155
1312static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb, 1156 /* Read timer, bundle and min_size from end of firmware blob */
1313 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) 1157 timer = fw->data[UCODE_SIZE * 4];
1314{ 1158 bundle = fw->data[UCODE_SIZE * 4 + 1];
1159 min_size = fw->data[UCODE_SIZE * 4 + 2];
1160
1161 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1162 min_size >= UCODE_SIZE) {
1163 DPRINTK(PROBE, ERR,
1164 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1165 fw_name, timer, bundle, min_size);
1166 release_firmware(fw);
1167 return ERR_PTR(-EINVAL);
1168 }
1169 /* OK, firmware is validated and ready to use... */
1170 return fw;
1171}
1172
1173static void e100_setup_ucode(struct nic *nic, struct cb *cb,
1174 struct sk_buff *skb)
1175{
1176 const struct firmware *fw = (void *)skb;
1177 u8 timer, bundle, min_size;
1178
1179 /* It's not a real skb; we just abused the fact that e100_exec_cb
1180 will pass it through to here... */
1181 cb->skb = NULL;
1182
1183 /* firmware is stored as little endian already */
1184 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1185
1186 /* Read timer, bundle and min_size from end of firmware blob */
1187 timer = fw->data[UCODE_SIZE * 4];
1188 bundle = fw->data[UCODE_SIZE * 4 + 1];
1189 min_size = fw->data[UCODE_SIZE * 4 + 2];
1190
1191 /* Insert user-tunable settings in cb->u.ucode */
1192 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1193 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1194 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1195 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1196 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1197 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1198
1199 cb->command = cpu_to_le16(cb_ucode | cb_el);
1200}
1201
1202static inline int e100_load_ucode_wait(struct nic *nic)
1203{
1204 const struct firmware *fw;
1315 int err = 0, counter = 50; 1205 int err = 0, counter = 50;
1316 struct cb *cb = nic->cb_to_clean; 1206 struct cb *cb = nic->cb_to_clean;
1317 1207
1318 if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode))) 1208 fw = e100_request_firmware(nic);
1209 /* If it's NULL, then no ucode is required */
1210 if (!fw || IS_ERR(fw))
1211 return PTR_ERR(fw);
1212
1213 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
1319 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err); 1214 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
1320 1215
1321 /* must restart cuc */ 1216 /* must restart cuc */
@@ -1435,7 +1330,7 @@ static int e100_hw_init(struct nic *nic)
1435 return err; 1330 return err;
1436 if ((err = e100_exec_cmd(nic, ruc_load_base, 0))) 1331 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1437 return err; 1332 return err;
1438 if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode))) 1333 if ((err = e100_load_ucode_wait(nic)))
1439 return err; 1334 return err;
1440 if ((err = e100_exec_cb(nic, NULL, e100_configure))) 1335 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1441 return err; 1336 return err;
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c
index 2a33a613d9e6..8fe9dcaa7538 100644
--- a/drivers/net/ehea/ehea_phyp.c
+++ b/drivers/net/ehea/ehea_phyp.c
@@ -214,7 +214,7 @@ u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
214 u64 *qp_handle, struct h_epas *h_epas) 214 u64 *qp_handle, struct h_epas *h_epas)
215{ 215{
216 u64 hret; 216 u64 hret;
217 u64 outs[PLPAR_HCALL9_BUFSIZE]; 217 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
218 218
219 u64 allocate_controls = 219 u64 allocate_controls =
220 EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0) 220 EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0)
@@ -312,7 +312,7 @@ u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
312 u64 *cq_handle, struct h_epas *epas) 312 u64 *cq_handle, struct h_epas *epas)
313{ 313{
314 u64 hret; 314 u64 hret;
315 u64 outs[PLPAR_HCALL9_BUFSIZE]; 315 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
316 316
317 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE, 317 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
318 outs, 318 outs,
@@ -374,7 +374,7 @@ u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
374 struct ehea_eq_attr *eq_attr, u64 *eq_handle) 374 struct ehea_eq_attr *eq_attr, u64 *eq_handle)
375{ 375{
376 u64 hret, allocate_controls; 376 u64 hret, allocate_controls;
377 u64 outs[PLPAR_HCALL9_BUFSIZE]; 377 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
378 378
379 /* resource type */ 379 /* resource type */
380 allocate_controls = 380 allocate_controls =
@@ -407,7 +407,7 @@ u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat,
407 u16 *out_swr, u16 *out_rwr) 407 u16 *out_swr, u16 *out_rwr)
408{ 408{
409 u64 hret; 409 u64 hret;
410 u64 outs[PLPAR_HCALL9_BUFSIZE]; 410 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
411 411
412 hret = ehea_plpar_hcall9(H_MODIFY_HEA_QP, 412 hret = ehea_plpar_hcall9(H_MODIFY_HEA_QP,
413 outs, 413 outs,
@@ -449,7 +449,7 @@ u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
449 struct ehea_mr *mr) 449 struct ehea_mr *mr)
450{ 450{
451 u64 hret; 451 u64 hret;
452 u64 outs[PLPAR_HCALL9_BUFSIZE]; 452 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
453 453
454 hret = ehea_plpar_hcall9(H_REGISTER_SMR, 454 hret = ehea_plpar_hcall9(H_REGISTER_SMR,
455 outs, 455 outs,
@@ -468,7 +468,7 @@ u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
468 468
469u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle) 469u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle)
470{ 470{
471 u64 outs[PLPAR_HCALL9_BUFSIZE]; 471 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
472 472
473 return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA, 473 return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
474 outs, 474 outs,
@@ -493,7 +493,7 @@ u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
493 const u32 pd, u64 *mr_handle, u32 *lkey) 493 const u32 pd, u64 *mr_handle, u32 *lkey)
494{ 494{
495 u64 hret; 495 u64 hret;
496 u64 outs[PLPAR_HCALL9_BUFSIZE]; 496 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
497 497
498 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE, 498 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
499 outs, 499 outs,
@@ -564,7 +564,7 @@ u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
564 const u8 cb_cat, const u64 select_mask, 564 const u8 cb_cat, const u64 select_mask,
565 void *cb_addr) 565 void *cb_addr)
566{ 566{
567 u64 outs[PLPAR_HCALL9_BUFSIZE]; 567 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
568 u64 port_info; 568 u64 port_info;
569 u64 arr_index = 0; 569 u64 arr_index = 0;
570 u64 cb_logaddr = virt_to_abs(cb_addr); 570 u64 cb_logaddr = virt_to_abs(cb_addr);
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index cefe1d98f93e..fc6cc038c7b8 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -1531,6 +1531,17 @@ static int enc28j60_chipset_init(struct net_device *dev)
1531 return enc28j60_hw_init(priv); 1531 return enc28j60_hw_init(priv);
1532} 1532}
1533 1533
1534static const struct net_device_ops enc28j60_netdev_ops = {
1535 .ndo_open = enc28j60_net_open,
1536 .ndo_stop = enc28j60_net_close,
1537 .ndo_start_xmit = enc28j60_send_packet,
1538 .ndo_set_multicast_list = enc28j60_set_multicast_list,
1539 .ndo_set_mac_address = enc28j60_set_mac_address,
1540 .ndo_tx_timeout = enc28j60_tx_timeout,
1541 .ndo_change_mtu = eth_change_mtu,
1542 .ndo_validate_addr = eth_validate_addr,
1543};
1544
1534static int __devinit enc28j60_probe(struct spi_device *spi) 1545static int __devinit enc28j60_probe(struct spi_device *spi)
1535{ 1546{
1536 struct net_device *dev; 1547 struct net_device *dev;
@@ -1585,12 +1596,7 @@ static int __devinit enc28j60_probe(struct spi_device *spi)
1585 1596
1586 dev->if_port = IF_PORT_10BASET; 1597 dev->if_port = IF_PORT_10BASET;
1587 dev->irq = spi->irq; 1598 dev->irq = spi->irq;
1588 dev->open = enc28j60_net_open; 1599 dev->netdev_ops = &enc28j60_netdev_ops;
1589 dev->stop = enc28j60_net_close;
1590 dev->hard_start_xmit = enc28j60_send_packet;
1591 dev->set_multicast_list = &enc28j60_set_multicast_list;
1592 dev->set_mac_address = enc28j60_set_mac_address;
1593 dev->tx_timeout = &enc28j60_tx_timeout;
1594 dev->watchdog_timeo = TX_TIMEOUT; 1600 dev->watchdog_timeo = TX_TIMEOUT;
1595 SET_ETHTOOL_OPS(dev, &enc28j60_ethtool_ops); 1601 SET_ETHTOOL_OPS(dev, &enc28j60_ethtool_ops);
1596 1602
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index f9b37c80dda6..a539bc3163cf 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -308,7 +308,18 @@ static int epic_close(struct net_device *dev);
308static struct net_device_stats *epic_get_stats(struct net_device *dev); 308static struct net_device_stats *epic_get_stats(struct net_device *dev);
309static void set_rx_mode(struct net_device *dev); 309static void set_rx_mode(struct net_device *dev);
310 310
311 311static const struct net_device_ops epic_netdev_ops = {
312 .ndo_open = epic_open,
313 .ndo_stop = epic_close,
314 .ndo_start_xmit = epic_start_xmit,
315 .ndo_tx_timeout = epic_tx_timeout,
316 .ndo_get_stats = epic_get_stats,
317 .ndo_set_multicast_list = set_rx_mode,
318 .ndo_do_ioctl = netdev_ioctl,
319 .ndo_change_mtu = eth_change_mtu,
320 .ndo_set_mac_address = eth_mac_addr,
321 .ndo_validate_addr = eth_validate_addr,
322};
312 323
313static int __devinit epic_init_one (struct pci_dev *pdev, 324static int __devinit epic_init_one (struct pci_dev *pdev,
314 const struct pci_device_id *ent) 325 const struct pci_device_id *ent)
@@ -483,15 +494,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
483 dev->if_port = ep->default_port = option; 494 dev->if_port = ep->default_port = option;
484 495
485 /* The Epic-specific entries in the device structure. */ 496 /* The Epic-specific entries in the device structure. */
486 dev->open = &epic_open; 497 dev->netdev_ops = &epic_netdev_ops;
487 dev->hard_start_xmit = &epic_start_xmit;
488 dev->stop = &epic_close;
489 dev->get_stats = &epic_get_stats;
490 dev->set_multicast_list = &set_rx_mode;
491 dev->do_ioctl = &netdev_ioctl;
492 dev->ethtool_ops = &netdev_ethtool_ops; 498 dev->ethtool_ops = &netdev_ethtool_ops;
493 dev->watchdog_timeo = TX_TIMEOUT; 499 dev->watchdog_timeo = TX_TIMEOUT;
494 dev->tx_timeout = &epic_tx_timeout;
495 netif_napi_add(dev, &ep->napi, epic_poll, 64); 500 netif_napi_add(dev, &ep->napi, epic_poll, 64);
496 501
497 ret = register_netdev(dev); 502 ret = register_netdev(dev);
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index 31ab1ff623fc..daf7272c3352 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -467,6 +467,18 @@ static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue)
467 } 467 }
468} 468}
469 469
470static const struct net_device_ops netdev_ops = {
471 .ndo_open = netdev_open,
472 .ndo_stop = netdev_close,
473 .ndo_start_xmit = start_tx,
474 .ndo_get_stats = get_stats,
475 .ndo_set_multicast_list = set_rx_mode,
476 .ndo_do_ioctl = mii_ioctl,
477 .ndo_tx_timeout = fealnx_tx_timeout,
478 .ndo_change_mtu = eth_change_mtu,
479 .ndo_set_mac_address = eth_mac_addr,
480 .ndo_validate_addr = eth_validate_addr,
481};
470 482
471static int __devinit fealnx_init_one(struct pci_dev *pdev, 483static int __devinit fealnx_init_one(struct pci_dev *pdev,
472 const struct pci_device_id *ent) 484 const struct pci_device_id *ent)
@@ -649,15 +661,8 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev,
649 np->mii.force_media = 1; 661 np->mii.force_media = 1;
650 } 662 }
651 663
652 /* The chip-specific entries in the device structure. */ 664 dev->netdev_ops = &netdev_ops;
653 dev->open = &netdev_open;
654 dev->hard_start_xmit = &start_tx;
655 dev->stop = &netdev_close;
656 dev->get_stats = &get_stats;
657 dev->set_multicast_list = &set_rx_mode;
658 dev->do_ioctl = &mii_ioctl;
659 dev->ethtool_ops = &netdev_ethtool_ops; 665 dev->ethtool_ops = &netdev_ethtool_ops;
660 dev->tx_timeout = &fealnx_tx_timeout;
661 dev->watchdog_timeo = TX_TIMEOUT; 666 dev->watchdog_timeo = TX_TIMEOUT;
662 667
663 err = register_netdev(dev); 668 err = register_netdev(dev);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index c672ecfc9595..1b8deca8b9f8 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -238,8 +238,8 @@ static int gfar_of_init(struct net_device *dev)
238 goto err_out; 238 goto err_out;
239 } 239 }
240 240
241 snprintf(priv->phy_bus_id, BUS_ID_SIZE, PHY_ID_FMT, "0", 241 snprintf(priv->phy_bus_id, sizeof(priv->phy_bus_id),
242 fixed_link[0]); 242 PHY_ID_FMT, "0", fixed_link[0]);
243 } else { 243 } else {
244 phy = of_find_node_by_phandle(*ph); 244 phy = of_find_node_by_phandle(*ph);
245 245
@@ -256,7 +256,7 @@ static int gfar_of_init(struct net_device *dev)
256 of_node_put(mdio); 256 of_node_put(mdio);
257 257
258 gfar_mdio_bus_name(bus_name, mdio); 258 gfar_mdio_bus_name(bus_name, mdio);
259 snprintf(priv->phy_bus_id, BUS_ID_SIZE, "%s:%02x", 259 snprintf(priv->phy_bus_id, sizeof(priv->phy_bus_id), "%s:%02x",
260 bus_name, *id); 260 bus_name, *id);
261 } 261 }
262 262
@@ -1973,6 +1973,8 @@ static void adjust_link(struct net_device *dev)
1973 case 1000: 1973 case 1000:
1974 tempval = 1974 tempval =
1975 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 1975 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1976
1977 ecntrl &= ~(ECNTRL_R100);
1976 break; 1978 break;
1977 case 100: 1979 case 100:
1978 case 10: 1980 case 10:
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index ebe7651fcb86..ad8be7e78290 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -425,6 +425,28 @@ struct net_device * __init hp100_probe(int unit)
425} 425}
426#endif /* !MODULE && CONFIG_ISA */ 426#endif /* !MODULE && CONFIG_ISA */
427 427
428static const struct net_device_ops hp100_bm_netdev_ops = {
429 .ndo_open = hp100_open,
430 .ndo_stop = hp100_close,
431 .ndo_start_xmit = hp100_start_xmit_bm,
432 .ndo_get_stats = hp100_get_stats,
433 .ndo_set_multicast_list = hp100_set_multicast_list,
434 .ndo_change_mtu = eth_change_mtu,
435 .ndo_set_mac_address = eth_mac_addr,
436 .ndo_validate_addr = eth_validate_addr,
437};
438
439static const struct net_device_ops hp100_netdev_ops = {
440 .ndo_open = hp100_open,
441 .ndo_stop = hp100_close,
442 .ndo_start_xmit = hp100_start_xmit,
443 .ndo_get_stats = hp100_get_stats,
444 .ndo_set_multicast_list = hp100_set_multicast_list,
445 .ndo_change_mtu = eth_change_mtu,
446 .ndo_set_mac_address = eth_mac_addr,
447 .ndo_validate_addr = eth_validate_addr,
448};
449
428static int __devinit hp100_probe1(struct net_device *dev, int ioaddr, 450static int __devinit hp100_probe1(struct net_device *dev, int ioaddr,
429 u_char bus, struct pci_dev *pci_dev) 451 u_char bus, struct pci_dev *pci_dev)
430{ 452{
@@ -657,16 +679,10 @@ static int __devinit hp100_probe1(struct net_device *dev, int ioaddr,
657 lp->virt_memory_size = virt_memory_size; 679 lp->virt_memory_size = virt_memory_size;
658 lp->rx_ratio = hp100_rx_ratio; /* can be conf'd with insmod */ 680 lp->rx_ratio = hp100_rx_ratio; /* can be conf'd with insmod */
659 681
660 dev->open = hp100_open;
661 dev->stop = hp100_close;
662
663 if (lp->mode == 1) /* busmaster */ 682 if (lp->mode == 1) /* busmaster */
664 dev->hard_start_xmit = hp100_start_xmit_bm; 683 dev->netdev_ops = &hp100_bm_netdev_ops;
665 else 684 else
666 dev->hard_start_xmit = hp100_start_xmit; 685 dev->netdev_ops = &hp100_netdev_ops;
667
668 dev->get_stats = hp100_get_stats;
669 dev->set_multicast_list = &hp100_set_multicast_list;
670 686
671 /* Ask the card for which IRQ line it is configured */ 687 /* Ask the card for which IRQ line it is configured */
672 if (bus == HP100_BUS_PCI) { 688 if (bus == HP100_BUS_PCI) {
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 9bc0f178f24b..ca3bb9f7321b 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -754,7 +754,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
754 void (*done) (struct net_device *, u32)) 754 void (*done) (struct net_device *, u32))
755{ 755{
756 struct ibmveth_adapter *adapter = netdev_priv(dev); 756 struct ibmveth_adapter *adapter = netdev_priv(dev);
757 u64 set_attr, clr_attr, ret_attr; 757 unsigned long set_attr, clr_attr, ret_attr;
758 long ret; 758 long ret;
759 int rc1 = 0, rc2 = 0; 759 int rc1 = 0, rc2 = 0;
760 int restart = 0; 760 int restart = 0;
@@ -1209,7 +1209,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
1209 long ret; 1209 long ret;
1210 struct net_device *netdev; 1210 struct net_device *netdev;
1211 struct ibmveth_adapter *adapter; 1211 struct ibmveth_adapter *adapter;
1212 u64 set_attr, ret_attr; 1212 unsigned long set_attr, ret_attr;
1213 1213
1214 unsigned char *mac_addr_p; 1214 unsigned char *mac_addr_p;
1215 unsigned int *mcastFilterSize_p; 1215 unsigned int *mcastFilterSize_p;
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index d28186948752..ec76ace66c6b 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -39,11 +39,11 @@
39#define IbmVethMcastRemoveFilter 0x2UL 39#define IbmVethMcastRemoveFilter 0x2UL
40#define IbmVethMcastClearFilterTable 0x3UL 40#define IbmVethMcastClearFilterTable 0x3UL
41 41
42#define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000ULL 42#define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL
43#define IBMVETH_ILLAN_TRUNK_PRI_MASK 0x0000000000000F00ULL 43#define IBMVETH_ILLAN_TRUNK_PRI_MASK 0x0000000000000F00UL
44#define IBMVETH_ILLAN_IPV6_TCP_CSUM 0x0000000000000004ULL 44#define IBMVETH_ILLAN_IPV6_TCP_CSUM 0x0000000000000004UL
45#define IBMVETH_ILLAN_IPV4_TCP_CSUM 0x0000000000000002ULL 45#define IBMVETH_ILLAN_IPV4_TCP_CSUM 0x0000000000000002UL
46#define IBMVETH_ILLAN_ACTIVE_TRUNK 0x0000000000000001ULL 46#define IBMVETH_ILLAN_ACTIVE_TRUNK 0x0000000000000001UL
47 47
48/* hcall macros */ 48/* hcall macros */
49#define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \ 49#define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 7b6d435a8468..360aa5e35fda 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -2210,6 +2210,19 @@ static void __devexit ipg_remove(struct pci_dev *pdev)
2210 pci_set_drvdata(pdev, NULL); 2210 pci_set_drvdata(pdev, NULL);
2211} 2211}
2212 2212
2213static const struct net_device_ops ipg_netdev_ops = {
2214 .ndo_open = ipg_nic_open,
2215 .ndo_stop = ipg_nic_stop,
2216 .ndo_start_xmit = ipg_nic_hard_start_xmit,
2217 .ndo_get_stats = ipg_nic_get_stats,
2218 .ndo_set_multicast_list = ipg_nic_set_multicast_list,
2219 .ndo_do_ioctl = ipg_ioctl,
2220 .ndo_tx_timeout = ipg_tx_timeout,
2221 .ndo_change_mtu = ipg_nic_change_mtu,
2222 .ndo_set_mac_address = eth_mac_addr,
2223 .ndo_validate_addr = eth_validate_addr,
2224};
2225
2213static int __devinit ipg_probe(struct pci_dev *pdev, 2226static int __devinit ipg_probe(struct pci_dev *pdev,
2214 const struct pci_device_id *id) 2227 const struct pci_device_id *id)
2215{ 2228{
@@ -2258,15 +2271,7 @@ static int __devinit ipg_probe(struct pci_dev *pdev,
2258 2271
2259 /* Declare IPG NIC functions for Ethernet device methods. 2272 /* Declare IPG NIC functions for Ethernet device methods.
2260 */ 2273 */
2261 dev->open = &ipg_nic_open; 2274 dev->netdev_ops = &ipg_netdev_ops;
2262 dev->stop = &ipg_nic_stop;
2263 dev->hard_start_xmit = &ipg_nic_hard_start_xmit;
2264 dev->get_stats = &ipg_nic_get_stats;
2265 dev->set_multicast_list = &ipg_nic_set_multicast_list;
2266 dev->do_ioctl = ipg_ioctl;
2267 dev->tx_timeout = ipg_tx_timeout;
2268 dev->change_mtu = &ipg_nic_change_mtu;
2269
2270 SET_NETDEV_DEV(dev, &pdev->dev); 2275 SET_NETDEV_DEV(dev, &pdev->dev);
2271 SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops); 2276 SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops);
2272 2277
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 3c58e67ef1e4..17779f9bffc4 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -109,7 +109,6 @@ static int ali_ircc_net_open(struct net_device *dev);
109static int ali_ircc_net_close(struct net_device *dev); 109static int ali_ircc_net_close(struct net_device *dev);
110static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 110static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
111static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud); 111static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud);
112static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev);
113 112
114/* SIR function */ 113/* SIR function */
115static int ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev); 114static int ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -366,7 +365,6 @@ static int ali_ircc_open(int i, chipio_t *info)
366 dev->open = ali_ircc_net_open; 365 dev->open = ali_ircc_net_open;
367 dev->stop = ali_ircc_net_close; 366 dev->stop = ali_ircc_net_close;
368 dev->do_ioctl = ali_ircc_net_ioctl; 367 dev->do_ioctl = ali_ircc_net_ioctl;
369 dev->get_stats = ali_ircc_net_get_stats;
370 368
371 err = register_netdev(dev); 369 err = register_netdev(dev);
372 if (err) { 370 if (err) {
@@ -876,7 +874,7 @@ static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
876 * async_unwrap_char will deliver all found frames 874 * async_unwrap_char will deliver all found frames
877 */ 875 */
878 do { 876 do {
879 async_unwrap_char(self->netdev, &self->stats, &self->rx_buff, 877 async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
880 inb(iobase+UART_RX)); 878 inb(iobase+UART_RX));
881 879
882 /* Make sure we don't stay here too long */ 880 /* Make sure we don't stay here too long */
@@ -943,7 +941,7 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
943 netif_wake_queue(self->netdev); 941 netif_wake_queue(self->netdev);
944 } 942 }
945 943
946 self->stats.tx_packets++; 944 self->netdev->stats.tx_packets++;
947 945
948 /* Turn on receive interrupts */ 946 /* Turn on receive interrupts */
949 outb(UART_IER_RDI, iobase+UART_IER); 947 outb(UART_IER_RDI, iobase+UART_IER);
@@ -1467,7 +1465,7 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
1467 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len; 1465 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
1468 self->tx_fifo.tail += skb->len; 1466 self->tx_fifo.tail += skb->len;
1469 1467
1470 self->stats.tx_bytes += skb->len; 1468 dev->stats.tx_bytes += skb->len;
1471 1469
1472 skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start, 1470 skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
1473 skb->len); 1471 skb->len);
@@ -1661,12 +1659,12 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
1661 1659
1662 { 1660 {
1663 IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __func__); 1661 IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __func__);
1664 self->stats.tx_errors++; 1662 self->netdev->stats.tx_errors++;
1665 self->stats.tx_fifo_errors++; 1663 self->netdev->stats.tx_fifo_errors++;
1666 } 1664 }
1667 else 1665 else
1668 { 1666 {
1669 self->stats.tx_packets++; 1667 self->netdev->stats.tx_packets++;
1670 } 1668 }
1671 1669
1672 /* Check if we need to change the speed */ 1670 /* Check if we need to change the speed */
@@ -1831,35 +1829,35 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1831 IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __func__ ); 1829 IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __func__ );
1832 1830
1833 /* Skip frame */ 1831 /* Skip frame */
1834 self->stats.rx_errors++; 1832 self->netdev->stats.rx_errors++;
1835 1833
1836 self->rx_buff.data += len; 1834 self->rx_buff.data += len;
1837 1835
1838 if (status & LSR_FIFO_UR) 1836 if (status & LSR_FIFO_UR)
1839 { 1837 {
1840 self->stats.rx_frame_errors++; 1838 self->netdev->stats.rx_frame_errors++;
1841 IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __func__ ); 1839 IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __func__ );
1842 } 1840 }
1843 if (status & LSR_FRAME_ERROR) 1841 if (status & LSR_FRAME_ERROR)
1844 { 1842 {
1845 self->stats.rx_frame_errors++; 1843 self->netdev->stats.rx_frame_errors++;
1846 IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __func__ ); 1844 IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __func__ );
1847 } 1845 }
1848 1846
1849 if (status & LSR_CRC_ERROR) 1847 if (status & LSR_CRC_ERROR)
1850 { 1848 {
1851 self->stats.rx_crc_errors++; 1849 self->netdev->stats.rx_crc_errors++;
1852 IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __func__ ); 1850 IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __func__ );
1853 } 1851 }
1854 1852
1855 if(self->rcvFramesOverflow) 1853 if(self->rcvFramesOverflow)
1856 { 1854 {
1857 self->stats.rx_frame_errors++; 1855 self->netdev->stats.rx_frame_errors++;
1858 IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __func__ ); 1856 IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __func__ );
1859 } 1857 }
1860 if(len == 0) 1858 if(len == 0)
1861 { 1859 {
1862 self->stats.rx_frame_errors++; 1860 self->netdev->stats.rx_frame_errors++;
1863 IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __func__ ); 1861 IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __func__ );
1864 } 1862 }
1865 } 1863 }
@@ -1910,7 +1908,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1910 IRDA_WARNING("%s(), memory squeeze, " 1908 IRDA_WARNING("%s(), memory squeeze, "
1911 "dropping frame.\n", 1909 "dropping frame.\n",
1912 __func__); 1910 __func__);
1913 self->stats.rx_dropped++; 1911 self->netdev->stats.rx_dropped++;
1914 1912
1915 return FALSE; 1913 return FALSE;
1916 } 1914 }
@@ -1924,8 +1922,8 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1924 1922
1925 /* Move to next frame */ 1923 /* Move to next frame */
1926 self->rx_buff.data += len; 1924 self->rx_buff.data += len;
1927 self->stats.rx_bytes += len; 1925 self->netdev->stats.rx_bytes += len;
1928 self->stats.rx_packets++; 1926 self->netdev->stats.rx_packets++;
1929 1927
1930 skb->dev = self->netdev; 1928 skb->dev = self->netdev;
1931 skb_reset_mac_header(skb); 1929 skb_reset_mac_header(skb);
@@ -1994,7 +1992,7 @@ static int ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
1994 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, 1992 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
1995 self->tx_buff.truesize); 1993 self->tx_buff.truesize);
1996 1994
1997 self->stats.tx_bytes += self->tx_buff.len; 1995 self->netdev->stats.tx_bytes += self->tx_buff.len;
1998 1996
1999 /* Turn on transmit finished interrupt. Will fire immediately! */ 1997 /* Turn on transmit finished interrupt. Will fire immediately! */
2000 outb(UART_IER_THRI, iobase+UART_IER); 1998 outb(UART_IER_THRI, iobase+UART_IER);
@@ -2111,17 +2109,6 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
2111 return status; 2109 return status;
2112} 2110}
2113 2111
2114static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev)
2115{
2116 struct ali_ircc_cb *self = netdev_priv(dev);
2117
2118 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
2119
2120 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
2121
2122 return &self->stats;
2123}
2124
2125static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state) 2112static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state)
2126{ 2113{
2127 struct ali_ircc_cb *self = platform_get_drvdata(dev); 2114 struct ali_ircc_cb *self = platform_get_drvdata(dev);
diff --git a/drivers/net/irda/ali-ircc.h b/drivers/net/irda/ali-ircc.h
index ed35d99763d5..0c8edb41bd0a 100644
--- a/drivers/net/irda/ali-ircc.h
+++ b/drivers/net/irda/ali-ircc.h
@@ -191,7 +191,6 @@ struct ali_ircc_cb {
191 struct tx_fifo tx_fifo; /* Info about frames to be transmitted */ 191 struct tx_fifo tx_fifo; /* Info about frames to be transmitted */
192 192
193 struct net_device *netdev; /* Yes! we are some kind of netdevice */ 193 struct net_device *netdev; /* Yes! we are some kind of netdevice */
194 struct net_device_stats stats;
195 194
196 struct irlap_cb *irlap; /* The link layer we are binded to */ 195 struct irlap_cb *irlap; /* The link layer we are binded to */
197 struct qos_info qos; /* QoS capabilities for this device */ 196 struct qos_info qos; /* QoS capabilities for this device */
diff --git a/drivers/net/irda/au1000_ircc.h b/drivers/net/irda/au1000_ircc.h
index b4763f24dded..c072c09a8d91 100644
--- a/drivers/net/irda/au1000_ircc.h
+++ b/drivers/net/irda/au1000_ircc.h
@@ -107,7 +107,6 @@ struct au1k_private {
107 iobuff_t rx_buff; 107 iobuff_t rx_buff;
108 108
109 struct net_device *netdev; 109 struct net_device *netdev;
110 struct net_device_stats stats;
111 110
112 struct timeval stamp; 111 struct timeval stamp;
113 struct timeval now; 112 struct timeval now;
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 6c4b53ffbcac..75a1d0a86dee 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -53,7 +53,6 @@ static int au1k_irda_hard_xmit(struct sk_buff *, struct net_device *);
53static int au1k_irda_rx(struct net_device *); 53static int au1k_irda_rx(struct net_device *);
54static void au1k_irda_interrupt(int, void *); 54static void au1k_irda_interrupt(int, void *);
55static void au1k_tx_timeout(struct net_device *); 55static void au1k_tx_timeout(struct net_device *);
56static struct net_device_stats *au1k_irda_stats(struct net_device *);
57static int au1k_irda_ioctl(struct net_device *, struct ifreq *, int); 56static int au1k_irda_ioctl(struct net_device *, struct ifreq *, int);
58static int au1k_irda_set_speed(struct net_device *dev, int speed); 57static int au1k_irda_set_speed(struct net_device *dev, int speed);
59 58
@@ -213,7 +212,6 @@ static int au1k_irda_net_init(struct net_device *dev)
213 dev->open = au1k_irda_start; 212 dev->open = au1k_irda_start;
214 dev->hard_start_xmit = au1k_irda_hard_xmit; 213 dev->hard_start_xmit = au1k_irda_hard_xmit;
215 dev->stop = au1k_irda_stop; 214 dev->stop = au1k_irda_stop;
216 dev->get_stats = au1k_irda_stats;
217 dev->do_ioctl = au1k_irda_ioctl; 215 dev->do_ioctl = au1k_irda_ioctl;
218 dev->tx_timeout = au1k_tx_timeout; 216 dev->tx_timeout = au1k_tx_timeout;
219 217
@@ -832,13 +830,6 @@ au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
832 return ret; 830 return ret;
833} 831}
834 832
835
836static struct net_device_stats *au1k_irda_stats(struct net_device *dev)
837{
838 struct au1k_private *aup = netdev_priv(dev);
839 return &aup->stats;
840}
841
842MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>"); 833MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
843MODULE_DESCRIPTION("Au1000 IrDA Device Driver"); 834MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
844 835
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
index 1e67720f1066..0dbd1932b72f 100644
--- a/drivers/net/irda/donauboe.h
+++ b/drivers/net/irda/donauboe.h
@@ -308,7 +308,6 @@ struct OboeRing
308struct toshoboe_cb 308struct toshoboe_cb
309{ 309{
310 struct net_device *netdev; /* Yes! we are some kind of netdevice */ 310 struct net_device *netdev; /* Yes! we are some kind of netdevice */
311 struct net_device_stats stats;
312 struct tty_driver ttydev; 311 struct tty_driver ttydev;
313 312
314 struct irlap_cb *irlap; /* The link layer we are binded to */ 313 struct irlap_cb *irlap; /* The link layer we are binded to */
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 205e4e825a97..29118f58a141 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -122,7 +122,6 @@ static int irda_usb_net_open(struct net_device *dev);
122static int irda_usb_net_close(struct net_device *dev); 122static int irda_usb_net_close(struct net_device *dev);
123static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 123static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
124static void irda_usb_net_timeout(struct net_device *dev); 124static void irda_usb_net_timeout(struct net_device *dev);
125static struct net_device_stats *irda_usb_net_get_stats(struct net_device *dev);
126 125
127/************************ TRANSMIT ROUTINES ************************/ 126/************************ TRANSMIT ROUTINES ************************/
128/* 127/*
@@ -525,13 +524,13 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
525 /* Ask USB to send the packet - Irq disabled -> GFP_ATOMIC */ 524 /* Ask USB to send the packet - Irq disabled -> GFP_ATOMIC */
526 if ((res = usb_submit_urb(urb, GFP_ATOMIC))) { 525 if ((res = usb_submit_urb(urb, GFP_ATOMIC))) {
527 IRDA_WARNING("%s(), failed Tx URB\n", __func__); 526 IRDA_WARNING("%s(), failed Tx URB\n", __func__);
528 self->stats.tx_errors++; 527 netdev->stats.tx_errors++;
529 /* Let USB recover : We will catch that in the watchdog */ 528 /* Let USB recover : We will catch that in the watchdog */
530 /*netif_start_queue(netdev);*/ 529 /*netif_start_queue(netdev);*/
531 } else { 530 } else {
532 /* Increment packet stats */ 531 /* Increment packet stats */
533 self->stats.tx_packets++; 532 netdev->stats.tx_packets++;
534 self->stats.tx_bytes += skb->len; 533 netdev->stats.tx_bytes += skb->len;
535 534
536 netdev->trans_start = jiffies; 535 netdev->trans_start = jiffies;
537 } 536 }
@@ -677,7 +676,7 @@ static void irda_usb_net_timeout(struct net_device *netdev)
677 IRDA_DEBUG(0, "%s: Tx timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, urb->status, urb->transfer_flags); 676 IRDA_DEBUG(0, "%s: Tx timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, urb->status, urb->transfer_flags);
678 677
679 /* Increase error count */ 678 /* Increase error count */
680 self->stats.tx_errors++; 679 netdev->stats.tx_errors++;
681 680
682#ifdef IU_BUG_KICK_TIMEOUT 681#ifdef IU_BUG_KICK_TIMEOUT
683 /* Can't be a bad idea to reset the speed ;-) - Jean II */ 682 /* Can't be a bad idea to reset the speed ;-) - Jean II */
@@ -826,7 +825,7 @@ static void irda_usb_receive(struct urb *urb)
826 if (urb->status != 0) { 825 if (urb->status != 0) {
827 switch (urb->status) { 826 switch (urb->status) {
828 case -EILSEQ: 827 case -EILSEQ:
829 self->stats.rx_crc_errors++; 828 self->netdev->stats.rx_crc_errors++;
830 /* Also precursor to a hot-unplug on UHCI. */ 829 /* Also precursor to a hot-unplug on UHCI. */
831 /* Fallthrough... */ 830 /* Fallthrough... */
832 case -ECONNRESET: 831 case -ECONNRESET:
@@ -839,7 +838,7 @@ static void irda_usb_receive(struct urb *urb)
839 case -ETIME: 838 case -ETIME:
840 /* Usually precursor to a hot-unplug on OHCI. */ 839 /* Usually precursor to a hot-unplug on OHCI. */
841 default: 840 default:
842 self->stats.rx_errors++; 841 self->netdev->stats.rx_errors++;
843 IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __func__, urb->status, urb->transfer_flags); 842 IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __func__, urb->status, urb->transfer_flags);
844 break; 843 break;
845 } 844 }
@@ -890,7 +889,7 @@ static void irda_usb_receive(struct urb *urb)
890 IRDA_SKB_MAX_MTU); 889 IRDA_SKB_MAX_MTU);
891 890
892 if (!newskb) { 891 if (!newskb) {
893 self->stats.rx_dropped++; 892 self->netdev->stats.rx_dropped++;
894 /* We could deliver the current skb, but this would stall 893 /* We could deliver the current skb, but this would stall
895 * the Rx path. Better drop the packet... Jean II */ 894 * the Rx path. Better drop the packet... Jean II */
896 goto done; 895 goto done;
@@ -927,8 +926,8 @@ static void irda_usb_receive(struct urb *urb)
927 netif_rx(dataskb); 926 netif_rx(dataskb);
928 927
929 /* Keep stats up to date */ 928 /* Keep stats up to date */
930 self->stats.rx_bytes += len; 929 self->netdev->stats.rx_bytes += len;
931 self->stats.rx_packets++; 930 self->netdev->stats.rx_packets++;
932 931
933done: 932done:
934 /* Note : at this point, the URB we've just received (urb) 933 /* Note : at this point, the URB we've just received (urb)
@@ -1342,14 +1341,6 @@ static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1342} 1341}
1343 1342
1344/*------------------------------------------------------------------*/ 1343/*------------------------------------------------------------------*/
1345/*
1346 * Get device stats (for /proc/net/dev and ifconfig)
1347 */
1348static struct net_device_stats *irda_usb_net_get_stats(struct net_device *dev)
1349{
1350 struct irda_usb_cb *self = netdev_priv(dev);
1351 return &self->stats;
1352}
1353 1344
1354/********************* IRDA CONFIG SUBROUTINES *********************/ 1345/********************* IRDA CONFIG SUBROUTINES *********************/
1355/* 1346/*
@@ -1428,7 +1419,6 @@ static inline int irda_usb_open(struct irda_usb_cb *self)
1428 netdev->watchdog_timeo = 250*HZ/1000; /* 250 ms > USB timeout */ 1419 netdev->watchdog_timeo = 250*HZ/1000; /* 250 ms > USB timeout */
1429 netdev->open = irda_usb_net_open; 1420 netdev->open = irda_usb_net_open;
1430 netdev->stop = irda_usb_net_close; 1421 netdev->stop = irda_usb_net_close;
1431 netdev->get_stats = irda_usb_net_get_stats;
1432 netdev->do_ioctl = irda_usb_net_ioctl; 1422 netdev->do_ioctl = irda_usb_net_ioctl;
1433 1423
1434 return register_netdev(netdev); 1424 return register_netdev(netdev);
diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h
index a0ca9c1fe196..ac0443d52e50 100644
--- a/drivers/net/irda/irda-usb.h
+++ b/drivers/net/irda/irda-usb.h
@@ -152,7 +152,6 @@ struct irda_usb_cb {
152 struct urb *speed_urb; /* URB used to send speed commands */ 152 struct urb *speed_urb; /* URB used to send speed commands */
153 153
154 struct net_device *netdev; /* Yes! we are some kind of netdev. */ 154 struct net_device *netdev; /* Yes! we are some kind of netdev. */
155 struct net_device_stats stats;
156 struct irlap_cb *irlap; /* The link layer we are binded to */ 155 struct irlap_cb *irlap; /* The link layer we are binded to */
157 struct qos_info qos; 156 struct qos_info qos;
158 char *speed_buff; /* Buffer for speed changes */ 157 char *speed_buff; /* Buffer for speed changes */
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index c747c874d44d..b4a61717254a 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -105,7 +105,7 @@ struct kingsun_cb {
105 struct usb_device *usbdev; /* init: probe_irda */ 105 struct usb_device *usbdev; /* init: probe_irda */
106 struct net_device *netdev; /* network layer */ 106 struct net_device *netdev; /* network layer */
107 struct irlap_cb *irlap; /* The link layer we are binded to */ 107 struct irlap_cb *irlap; /* The link layer we are binded to */
108 struct net_device_stats stats; /* network statistics */ 108
109 struct qos_info qos; 109 struct qos_info qos;
110 110
111 __u8 *in_buf; /* receive buffer */ 111 __u8 *in_buf; /* receive buffer */
@@ -186,12 +186,12 @@ static int kingsun_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
186 case -EPIPE: 186 case -EPIPE:
187 break; 187 break;
188 default: 188 default:
189 kingsun->stats.tx_errors++; 189 netdev->stats.tx_errors++;
190 netif_start_queue(netdev); 190 netif_start_queue(netdev);
191 } 191 }
192 } else { 192 } else {
193 kingsun->stats.tx_packets++; 193 netdev->stats.tx_packets++;
194 kingsun->stats.tx_bytes += skb->len; 194 netdev->stats.tx_bytes += skb->len;
195 } 195 }
196 196
197 dev_kfree_skb(skb); 197 dev_kfree_skb(skb);
@@ -232,7 +232,7 @@ static void kingsun_rcv_irq(struct urb *urb)
232 if (bytes[0] >= 1 && bytes[0] < kingsun->max_rx) { 232 if (bytes[0] >= 1 && bytes[0] < kingsun->max_rx) {
233 for (i = 1; i <= bytes[0]; i++) { 233 for (i = 1; i <= bytes[0]; i++) {
234 async_unwrap_char(kingsun->netdev, 234 async_unwrap_char(kingsun->netdev,
235 &kingsun->stats, 235 &kingsun->netdev->stats,
236 &kingsun->rx_buff, bytes[i]); 236 &kingsun->rx_buff, bytes[i]);
237 } 237 }
238 do_gettimeofday(&kingsun->rx_time); 238 do_gettimeofday(&kingsun->rx_time);
@@ -418,15 +418,6 @@ static int kingsun_net_ioctl(struct net_device *netdev, struct ifreq *rq,
418 return ret; 418 return ret;
419} 419}
420 420
421/*
422 * Get device stats (for /proc/net/dev and ifconfig)
423 */
424static struct net_device_stats *
425kingsun_net_get_stats(struct net_device *netdev)
426{
427 struct kingsun_cb *kingsun = netdev_priv(netdev);
428 return &kingsun->stats;
429}
430 421
431/* 422/*
432 * This routine is called by the USB subsystem for each new device 423 * This routine is called by the USB subsystem for each new device
@@ -532,7 +523,6 @@ static int kingsun_probe(struct usb_interface *intf,
532 net->hard_start_xmit = kingsun_hard_xmit; 523 net->hard_start_xmit = kingsun_hard_xmit;
533 net->open = kingsun_net_open; 524 net->open = kingsun_net_open;
534 net->stop = kingsun_net_close; 525 net->stop = kingsun_net_close;
535 net->get_stats = kingsun_net_get_stats;
536 net->do_ioctl = kingsun_net_ioctl; 526 net->do_ioctl = kingsun_net_ioctl;
537 527
538 ret = register_netdev(net); 528 ret = register_netdev(net);
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index 600d96f9cdb7..55322fb92cf1 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -174,7 +174,7 @@ struct ks959_cb {
174 struct usb_device *usbdev; /* init: probe_irda */ 174 struct usb_device *usbdev; /* init: probe_irda */
175 struct net_device *netdev; /* network layer */ 175 struct net_device *netdev; /* network layer */
176 struct irlap_cb *irlap; /* The link layer we are binded to */ 176 struct irlap_cb *irlap; /* The link layer we are binded to */
177 struct net_device_stats stats; /* network statistics */ 177
178 struct qos_info qos; 178 struct qos_info qos;
179 179
180 struct usb_ctrlrequest *tx_setuprequest; 180 struct usb_ctrlrequest *tx_setuprequest;
@@ -366,7 +366,7 @@ static void ks959_send_irq(struct urb *urb)
366 case -EPIPE: 366 case -EPIPE:
367 break; 367 break;
368 default: 368 default:
369 kingsun->stats.tx_errors++; 369 netdev->stats.tx_errors++;
370 netif_start_queue(netdev); 370 netif_start_queue(netdev);
371 } 371 }
372 } 372 }
@@ -416,12 +416,12 @@ static int ks959_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
416 case -EPIPE: 416 case -EPIPE:
417 break; 417 break;
418 default: 418 default:
419 kingsun->stats.tx_errors++; 419 netdev->stats.tx_errors++;
420 netif_start_queue(netdev); 420 netif_start_queue(netdev);
421 } 421 }
422 } else { 422 } else {
423 kingsun->stats.tx_packets++; 423 netdev->stats.tx_packets++;
424 kingsun->stats.tx_bytes += skb->len; 424 netdev->stats.tx_bytes += skb->len;
425 425
426 } 426 }
427 427
@@ -469,7 +469,7 @@ static void ks959_rcv_irq(struct urb *urb)
469 */ 469 */
470 if (kingsun->rx_variable_xormask != 0) { 470 if (kingsun->rx_variable_xormask != 0) {
471 async_unwrap_char(kingsun->netdev, 471 async_unwrap_char(kingsun->netdev,
472 &kingsun->stats, 472 &kingsun->netdev->stats,
473 &kingsun->rx_unwrap_buff, 473 &kingsun->rx_unwrap_buff,
474 bytes[i]); 474 bytes[i]);
475 } 475 }
@@ -669,15 +669,6 @@ static int ks959_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
669} 669}
670 670
671/* 671/*
672 * Get device stats (for /proc/net/dev and ifconfig)
673 */
674static struct net_device_stats *ks959_net_get_stats(struct net_device *netdev)
675{
676 struct ks959_cb *kingsun = netdev_priv(netdev);
677 return &kingsun->stats;
678}
679
680/*
681 * This routine is called by the USB subsystem for each new device 672 * This routine is called by the USB subsystem for each new device
682 * in the system. We need to check if the device is ours, and in 673 * in the system. We need to check if the device is ours, and in
683 * this case start handling it. 674 * this case start handling it.
@@ -792,7 +783,6 @@ static int ks959_probe(struct usb_interface *intf,
792 net->hard_start_xmit = ks959_hard_xmit; 783 net->hard_start_xmit = ks959_hard_xmit;
793 net->open = ks959_net_open; 784 net->open = ks959_net_open;
794 net->stop = ks959_net_close; 785 net->stop = ks959_net_close;
795 net->get_stats = ks959_net_get_stats;
796 net->do_ioctl = ks959_net_ioctl; 786 net->do_ioctl = ks959_net_ioctl;
797 787
798 ret = register_netdev(net); 788 ret = register_netdev(net);
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 0e7f89337b25..5b327b09acd8 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -140,7 +140,7 @@ struct ksdazzle_cb {
140 struct usb_device *usbdev; /* init: probe_irda */ 140 struct usb_device *usbdev; /* init: probe_irda */
141 struct net_device *netdev; /* network layer */ 141 struct net_device *netdev; /* network layer */
142 struct irlap_cb *irlap; /* The link layer we are binded to */ 142 struct irlap_cb *irlap; /* The link layer we are binded to */
143 struct net_device_stats stats; /* network statistics */ 143
144 struct qos_info qos; 144 struct qos_info qos;
145 145
146 struct urb *tx_urb; 146 struct urb *tx_urb;
@@ -278,7 +278,7 @@ static void ksdazzle_send_irq(struct urb *urb)
278 case -EPIPE: 278 case -EPIPE:
279 break; 279 break;
280 default: 280 default:
281 kingsun->stats.tx_errors++; 281 netdev->stats.tx_errors++;
282 netif_start_queue(netdev); 282 netif_start_queue(netdev);
283 } 283 }
284 } 284 }
@@ -329,12 +329,12 @@ static int ksdazzle_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
329 case -EPIPE: 329 case -EPIPE:
330 break; 330 break;
331 default: 331 default:
332 kingsun->stats.tx_errors++; 332 netdev->stats.tx_errors++;
333 netif_start_queue(netdev); 333 netif_start_queue(netdev);
334 } 334 }
335 } else { 335 } else {
336 kingsun->stats.tx_packets++; 336 netdev->stats.tx_packets++;
337 kingsun->stats.tx_bytes += skb->len; 337 netdev->stats.tx_bytes += skb->len;
338 338
339 } 339 }
340 340
@@ -348,9 +348,10 @@ static int ksdazzle_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
348static void ksdazzle_rcv_irq(struct urb *urb) 348static void ksdazzle_rcv_irq(struct urb *urb)
349{ 349{
350 struct ksdazzle_cb *kingsun = urb->context; 350 struct ksdazzle_cb *kingsun = urb->context;
351 struct net_device *netdev = kingsun->netdev;
351 352
352 /* in process of stopping, just drop data */ 353 /* in process of stopping, just drop data */
353 if (!netif_running(kingsun->netdev)) { 354 if (!netif_running(netdev)) {
354 kingsun->receiving = 0; 355 kingsun->receiving = 0;
355 return; 356 return;
356 } 357 }
@@ -368,7 +369,7 @@ static void ksdazzle_rcv_irq(struct urb *urb)
368 unsigned int i; 369 unsigned int i;
369 370
370 for (i = 0; i < urb->actual_length; i++) { 371 for (i = 0; i < urb->actual_length; i++) {
371 async_unwrap_char(kingsun->netdev, &kingsun->stats, 372 async_unwrap_char(netdev, &netdev->stats,
372 &kingsun->rx_unwrap_buff, bytes[i]); 373 &kingsun->rx_unwrap_buff, bytes[i]);
373 } 374 }
374 kingsun->receiving = 375 kingsun->receiving =
@@ -562,16 +563,6 @@ static int ksdazzle_net_ioctl(struct net_device *netdev, struct ifreq *rq,
562} 563}
563 564
564/* 565/*
565 * Get device stats (for /proc/net/dev and ifconfig)
566 */
567static struct net_device_stats *ksdazzle_net_get_stats(struct net_device
568 *netdev)
569{
570 struct ksdazzle_cb *kingsun = netdev_priv(netdev);
571 return &kingsun->stats;
572}
573
574/*
575 * This routine is called by the USB subsystem for each new device 566 * This routine is called by the USB subsystem for each new device
576 * in the system. We need to check if the device is ours, and in 567 * in the system. We need to check if the device is ours, and in
577 * this case start handling it. 568 * this case start handling it.
@@ -696,7 +687,6 @@ static int ksdazzle_probe(struct usb_interface *intf,
696 net->hard_start_xmit = ksdazzle_hard_xmit; 687 net->hard_start_xmit = ksdazzle_hard_xmit;
697 net->open = ksdazzle_net_open; 688 net->open = ksdazzle_net_open;
698 net->stop = ksdazzle_net_close; 689 net->stop = ksdazzle_net_close;
699 net->get_stats = ksdazzle_net_get_stats;
700 net->do_ioctl = ksdazzle_net_ioctl; 690 net->do_ioctl = ksdazzle_net_ioctl;
701 691
702 ret = register_netdev(net); 692 ret = register_netdev(net);
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 904c9610c0dd..7eafdca19f34 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -403,8 +403,8 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
403 if(unlikely(new_len <= 0)) { 403 if(unlikely(new_len <= 0)) {
404 IRDA_ERROR("%s short frame length %d\n", 404 IRDA_ERROR("%s short frame length %d\n",
405 mcs->netdev->name, new_len); 405 mcs->netdev->name, new_len);
406 ++mcs->stats.rx_errors; 406 ++mcs->netdev->stats.rx_errors;
407 ++mcs->stats.rx_length_errors; 407 ++mcs->netdev->stats.rx_length_errors;
408 return; 408 return;
409 } 409 }
410 fcs = 0; 410 fcs = 0;
@@ -413,14 +413,14 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
413 if(fcs != GOOD_FCS) { 413 if(fcs != GOOD_FCS) {
414 IRDA_ERROR("crc error calc 0x%x len %d\n", 414 IRDA_ERROR("crc error calc 0x%x len %d\n",
415 fcs, new_len); 415 fcs, new_len);
416 mcs->stats.rx_errors++; 416 mcs->netdev->stats.rx_errors++;
417 mcs->stats.rx_crc_errors++; 417 mcs->netdev->stats.rx_crc_errors++;
418 return; 418 return;
419 } 419 }
420 420
421 skb = dev_alloc_skb(new_len + 1); 421 skb = dev_alloc_skb(new_len + 1);
422 if(unlikely(!skb)) { 422 if(unlikely(!skb)) {
423 ++mcs->stats.rx_dropped; 423 ++mcs->netdev->stats.rx_dropped;
424 return; 424 return;
425 } 425 }
426 426
@@ -433,8 +433,8 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
433 433
434 netif_rx(skb); 434 netif_rx(skb);
435 435
436 mcs->stats.rx_packets++; 436 mcs->netdev->stats.rx_packets++;
437 mcs->stats.rx_bytes += new_len; 437 mcs->netdev->stats.rx_bytes += new_len;
438 438
439 return; 439 return;
440} 440}
@@ -458,22 +458,22 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
458 if(unlikely(new_len <= 0)) { 458 if(unlikely(new_len <= 0)) {
459 IRDA_ERROR("%s short frame length %d\n", 459 IRDA_ERROR("%s short frame length %d\n",
460 mcs->netdev->name, new_len); 460 mcs->netdev->name, new_len);
461 ++mcs->stats.rx_errors; 461 ++mcs->netdev->stats.rx_errors;
462 ++mcs->stats.rx_length_errors; 462 ++mcs->netdev->stats.rx_length_errors;
463 return; 463 return;
464 } 464 }
465 465
466 fcs = ~(crc32_le(~0, buf, new_len)); 466 fcs = ~(crc32_le(~0, buf, new_len));
467 if(fcs != get_unaligned_le32(buf + new_len)) { 467 if(fcs != get_unaligned_le32(buf + new_len)) {
468 IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len); 468 IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len);
469 mcs->stats.rx_errors++; 469 mcs->netdev->stats.rx_errors++;
470 mcs->stats.rx_crc_errors++; 470 mcs->netdev->stats.rx_crc_errors++;
471 return; 471 return;
472 } 472 }
473 473
474 skb = dev_alloc_skb(new_len + 1); 474 skb = dev_alloc_skb(new_len + 1);
475 if(unlikely(!skb)) { 475 if(unlikely(!skb)) {
476 ++mcs->stats.rx_dropped; 476 ++mcs->netdev->stats.rx_dropped;
477 return; 477 return;
478 } 478 }
479 479
@@ -486,8 +486,8 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
486 486
487 netif_rx(skb); 487 netif_rx(skb);
488 488
489 mcs->stats.rx_packets++; 489 mcs->netdev->stats.rx_packets++;
490 mcs->stats.rx_bytes += new_len; 490 mcs->netdev->stats.rx_bytes += new_len;
491 491
492 return; 492 return;
493} 493}
@@ -756,14 +756,6 @@ static int mcs_net_open(struct net_device *netdev)
756 return ret; 756 return ret;
757} 757}
758 758
759
760/* Get device stats for /proc/net/dev and ifconfig */
761static struct net_device_stats *mcs_net_get_stats(struct net_device *netdev)
762{
763 struct mcs_cb *mcs = netdev_priv(netdev);
764 return &mcs->stats;
765}
766
767/* Receive callback function. */ 759/* Receive callback function. */
768static void mcs_receive_irq(struct urb *urb) 760static void mcs_receive_irq(struct urb *urb)
769{ 761{
@@ -786,14 +778,14 @@ static void mcs_receive_irq(struct urb *urb)
786 */ 778 */
787 /* SIR speed */ 779 /* SIR speed */
788 if(mcs->speed < 576000) { 780 if(mcs->speed < 576000) {
789 async_unwrap_char(mcs->netdev, &mcs->stats, 781 async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
790 &mcs->rx_buff, 0xc0); 782 &mcs->rx_buff, 0xc0);
791 783
792 for (i = 0; i < urb->actual_length; i++) 784 for (i = 0; i < urb->actual_length; i++)
793 async_unwrap_char(mcs->netdev, &mcs->stats, 785 async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
794 &mcs->rx_buff, bytes[i]); 786 &mcs->rx_buff, bytes[i]);
795 787
796 async_unwrap_char(mcs->netdev, &mcs->stats, 788 async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
797 &mcs->rx_buff, 0xc1); 789 &mcs->rx_buff, 0xc1);
798 } 790 }
799 /* MIR speed */ 791 /* MIR speed */
@@ -868,12 +860,12 @@ static int mcs_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
868 case -EPIPE: 860 case -EPIPE:
869 break; 861 break;
870 default: 862 default:
871 mcs->stats.tx_errors++; 863 mcs->netdev->stats.tx_errors++;
872 netif_start_queue(ndev); 864 netif_start_queue(ndev);
873 } 865 }
874 } else { 866 } else {
875 mcs->stats.tx_packets++; 867 mcs->netdev->stats.tx_packets++;
876 mcs->stats.tx_bytes += skb->len; 868 mcs->netdev->stats.tx_bytes += skb->len;
877 } 869 }
878 870
879 dev_kfree_skb(skb); 871 dev_kfree_skb(skb);
@@ -931,7 +923,6 @@ static int mcs_probe(struct usb_interface *intf,
931 ndev->hard_start_xmit = mcs_hard_xmit; 923 ndev->hard_start_xmit = mcs_hard_xmit;
932 ndev->open = mcs_net_open; 924 ndev->open = mcs_net_open;
933 ndev->stop = mcs_net_close; 925 ndev->stop = mcs_net_close;
934 ndev->get_stats = mcs_net_get_stats;
935 ndev->do_ioctl = mcs_net_ioctl; 926 ndev->do_ioctl = mcs_net_ioctl;
936 927
937 if (!intf->cur_altsetting) 928 if (!intf->cur_altsetting)
diff --git a/drivers/net/irda/mcs7780.h b/drivers/net/irda/mcs7780.h
index b18148cee638..6bdc621e67c6 100644
--- a/drivers/net/irda/mcs7780.h
+++ b/drivers/net/irda/mcs7780.h
@@ -104,7 +104,6 @@ struct mcs_cb {
104 struct usb_device *usbdev; /* init: probe_irda */ 104 struct usb_device *usbdev; /* init: probe_irda */
105 struct net_device *netdev; /* network layer */ 105 struct net_device *netdev; /* network layer */
106 struct irlap_cb *irlap; /* The link layer we are binded to */ 106 struct irlap_cb *irlap; /* The link layer we are binded to */
107 struct net_device_stats stats; /* network statistics */
108 struct qos_info qos; 107 struct qos_info qos;
109 unsigned int speed; /* Current speed */ 108 unsigned int speed; /* Current speed */
110 unsigned int new_speed; /* new speed */ 109 unsigned int new_speed; /* new speed */
@@ -154,7 +153,6 @@ static int mcs_speed_change(struct mcs_cb *mcs);
154static int mcs_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd); 153static int mcs_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd);
155static int mcs_net_close(struct net_device *netdev); 154static int mcs_net_close(struct net_device *netdev);
156static int mcs_net_open(struct net_device *netdev); 155static int mcs_net_open(struct net_device *netdev);
157static struct net_device_stats *mcs_net_get_stats(struct net_device *netdev);
158 156
159static void mcs_receive_irq(struct urb *urb); 157static void mcs_receive_irq(struct urb *urb);
160static void mcs_send_irq(struct urb *urb); 158static void mcs_send_irq(struct urb *urb);
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 2c6bf2d11bb1..61e509cb712a 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -185,7 +185,6 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id);
185static int nsc_ircc_net_open(struct net_device *dev); 185static int nsc_ircc_net_open(struct net_device *dev);
186static int nsc_ircc_net_close(struct net_device *dev); 186static int nsc_ircc_net_close(struct net_device *dev);
187static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 187static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
188static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev);
189 188
190/* Globals */ 189/* Globals */
191static int pnp_registered; 190static int pnp_registered;
@@ -446,7 +445,6 @@ static int __init nsc_ircc_open(chipio_t *info)
446 dev->open = nsc_ircc_net_open; 445 dev->open = nsc_ircc_net_open;
447 dev->stop = nsc_ircc_net_close; 446 dev->stop = nsc_ircc_net_close;
448 dev->do_ioctl = nsc_ircc_net_ioctl; 447 dev->do_ioctl = nsc_ircc_net_ioctl;
449 dev->get_stats = nsc_ircc_net_get_stats;
450 448
451 err = register_netdev(dev); 449 err = register_netdev(dev);
452 if (err) { 450 if (err) {
@@ -1401,7 +1399,7 @@ static int nsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev)
1401 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, 1399 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
1402 self->tx_buff.truesize); 1400 self->tx_buff.truesize);
1403 1401
1404 self->stats.tx_bytes += self->tx_buff.len; 1402 dev->stats.tx_bytes += self->tx_buff.len;
1405 1403
1406 /* Add interrupt on tx low level (will fire immediately) */ 1404 /* Add interrupt on tx low level (will fire immediately) */
1407 switch_bank(iobase, BANK0); 1405 switch_bank(iobase, BANK0);
@@ -1473,7 +1471,7 @@ static int nsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
1473 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len; 1471 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
1474 self->tx_fifo.tail += skb->len; 1472 self->tx_fifo.tail += skb->len;
1475 1473
1476 self->stats.tx_bytes += skb->len; 1474 dev->stats.tx_bytes += skb->len;
1477 1475
1478 skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start, 1476 skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
1479 skb->len); 1477 skb->len);
@@ -1652,13 +1650,13 @@ static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self)
1652 1650
1653 /* Check for underrrun! */ 1651 /* Check for underrrun! */
1654 if (inb(iobase+ASCR) & ASCR_TXUR) { 1652 if (inb(iobase+ASCR) & ASCR_TXUR) {
1655 self->stats.tx_errors++; 1653 self->netdev->stats.tx_errors++;
1656 self->stats.tx_fifo_errors++; 1654 self->netdev->stats.tx_fifo_errors++;
1657 1655
1658 /* Clear bit, by writing 1 into it */ 1656 /* Clear bit, by writing 1 into it */
1659 outb(ASCR_TXUR, iobase+ASCR); 1657 outb(ASCR_TXUR, iobase+ASCR);
1660 } else { 1658 } else {
1661 self->stats.tx_packets++; 1659 self->netdev->stats.tx_packets++;
1662 } 1660 }
1663 1661
1664 /* Finished with this frame, so prepare for next */ 1662 /* Finished with this frame, so prepare for next */
@@ -1793,28 +1791,28 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
1793 if (status & FRM_ST_ERR_MSK) { 1791 if (status & FRM_ST_ERR_MSK) {
1794 if (status & FRM_ST_LOST_FR) { 1792 if (status & FRM_ST_LOST_FR) {
1795 /* Add number of lost frames to stats */ 1793 /* Add number of lost frames to stats */
1796 self->stats.rx_errors += len; 1794 self->netdev->stats.rx_errors += len;
1797 } else { 1795 } else {
1798 /* Skip frame */ 1796 /* Skip frame */
1799 self->stats.rx_errors++; 1797 self->netdev->stats.rx_errors++;
1800 1798
1801 self->rx_buff.data += len; 1799 self->rx_buff.data += len;
1802 1800
1803 if (status & FRM_ST_MAX_LEN) 1801 if (status & FRM_ST_MAX_LEN)
1804 self->stats.rx_length_errors++; 1802 self->netdev->stats.rx_length_errors++;
1805 1803
1806 if (status & FRM_ST_PHY_ERR) 1804 if (status & FRM_ST_PHY_ERR)
1807 self->stats.rx_frame_errors++; 1805 self->netdev->stats.rx_frame_errors++;
1808 1806
1809 if (status & FRM_ST_BAD_CRC) 1807 if (status & FRM_ST_BAD_CRC)
1810 self->stats.rx_crc_errors++; 1808 self->netdev->stats.rx_crc_errors++;
1811 } 1809 }
1812 /* The errors below can be reported in both cases */ 1810 /* The errors below can be reported in both cases */
1813 if (status & FRM_ST_OVR1) 1811 if (status & FRM_ST_OVR1)
1814 self->stats.rx_fifo_errors++; 1812 self->netdev->stats.rx_fifo_errors++;
1815 1813
1816 if (status & FRM_ST_OVR2) 1814 if (status & FRM_ST_OVR2)
1817 self->stats.rx_fifo_errors++; 1815 self->netdev->stats.rx_fifo_errors++;
1818 } else { 1816 } else {
1819 /* 1817 /*
1820 * First we must make sure that the frame we 1818 * First we must make sure that the frame we
@@ -1863,7 +1861,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
1863 IRDA_WARNING("%s(), memory squeeze, " 1861 IRDA_WARNING("%s(), memory squeeze, "
1864 "dropping frame.\n", 1862 "dropping frame.\n",
1865 __func__); 1863 __func__);
1866 self->stats.rx_dropped++; 1864 self->netdev->stats.rx_dropped++;
1867 1865
1868 /* Restore bank register */ 1866 /* Restore bank register */
1869 outb(bank, iobase+BSR); 1867 outb(bank, iobase+BSR);
@@ -1889,8 +1887,8 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
1889 1887
1890 /* Move to next frame */ 1888 /* Move to next frame */
1891 self->rx_buff.data += len; 1889 self->rx_buff.data += len;
1892 self->stats.rx_bytes += len; 1890 self->netdev->stats.rx_bytes += len;
1893 self->stats.rx_packets++; 1891 self->netdev->stats.rx_packets++;
1894 1892
1895 skb->dev = self->netdev; 1893 skb->dev = self->netdev;
1896 skb_reset_mac_header(skb); 1894 skb_reset_mac_header(skb);
@@ -1920,8 +1918,8 @@ static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self)
1920 /* Receive all characters in Rx FIFO */ 1918 /* Receive all characters in Rx FIFO */
1921 do { 1919 do {
1922 byte = inb(iobase+RXD); 1920 byte = inb(iobase+RXD);
1923 async_unwrap_char(self->netdev, &self->stats, &self->rx_buff, 1921 async_unwrap_char(self->netdev, &self->netdev->stats,
1924 byte); 1922 &self->rx_buff, byte);
1925 } while (inb(iobase+LSR) & LSR_RXDA); /* Data available */ 1923 } while (inb(iobase+LSR) & LSR_RXDA); /* Data available */
1926} 1924}
1927 1925
@@ -1952,7 +1950,7 @@ static void nsc_ircc_sir_interrupt(struct nsc_ircc_cb *self, int eir)
1952 self->ier = IER_TXLDL_IE; 1950 self->ier = IER_TXLDL_IE;
1953 else { 1951 else {
1954 1952
1955 self->stats.tx_packets++; 1953 self->netdev->stats.tx_packets++;
1956 netif_wake_queue(self->netdev); 1954 netif_wake_queue(self->netdev);
1957 self->ier = IER_TXEMP_IE; 1955 self->ier = IER_TXEMP_IE;
1958 } 1956 }
@@ -2307,13 +2305,6 @@ static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2307 return ret; 2305 return ret;
2308} 2306}
2309 2307
2310static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev)
2311{
2312 struct nsc_ircc_cb *self = netdev_priv(dev);
2313
2314 return &self->stats;
2315}
2316
2317static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state) 2308static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state)
2318{ 2309{
2319 struct nsc_ircc_cb *self = platform_get_drvdata(dev); 2310 struct nsc_ircc_cb *self = platform_get_drvdata(dev);
diff --git a/drivers/net/irda/nsc-ircc.h b/drivers/net/irda/nsc-ircc.h
index 71cd3c5a0762..7ba7738759b9 100644
--- a/drivers/net/irda/nsc-ircc.h
+++ b/drivers/net/irda/nsc-ircc.h
@@ -251,7 +251,6 @@ struct nsc_ircc_cb {
251 struct tx_fifo tx_fifo; /* Info about frames to be transmitted */ 251 struct tx_fifo tx_fifo; /* Info about frames to be transmitted */
252 252
253 struct net_device *netdev; /* Yes! we are some kind of netdevice */ 253 struct net_device *netdev; /* Yes! we are some kind of netdevice */
254 struct net_device_stats stats;
255 254
256 struct irlap_cb *irlap; /* The link layer we are binded to */ 255 struct irlap_cb *irlap; /* The link layer we are binded to */
257 struct qos_info qos; /* QoS capabilities for this device */ 256 struct qos_info qos; /* QoS capabilities for this device */
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 004a9aab3a50..31794c2363ec 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -108,7 +108,6 @@ struct pxa_irda {
108 int txdma; 108 int txdma;
109 int rxdma; 109 int rxdma;
110 110
111 struct net_device_stats stats;
112 struct irlap_cb *irlap; 111 struct irlap_cb *irlap;
113 struct qos_info qos; 112 struct qos_info qos;
114 113
@@ -258,14 +257,15 @@ static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
258 data = STRBR; 257 data = STRBR;
259 if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) { 258 if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) {
260 printk(KERN_DEBUG "pxa_ir: sir receiving error\n"); 259 printk(KERN_DEBUG "pxa_ir: sir receiving error\n");
261 si->stats.rx_errors++; 260 dev->stats.rx_errors++;
262 if (lsr & LSR_FE) 261 if (lsr & LSR_FE)
263 si->stats.rx_frame_errors++; 262 dev->stats.rx_frame_errors++;
264 if (lsr & LSR_OE) 263 if (lsr & LSR_OE)
265 si->stats.rx_fifo_errors++; 264 dev->stats.rx_fifo_errors++;
266 } else { 265 } else {
267 si->stats.rx_bytes++; 266 dev->stats.rx_bytes++;
268 async_unwrap_char(dev, &si->stats, &si->rx_buff, data); 267 async_unwrap_char(dev, &dev->stats,
268 &si->rx_buff, data);
269 } 269 }
270 lsr = STLSR; 270 lsr = STLSR;
271 } 271 }
@@ -277,8 +277,8 @@ static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
277 277
278 case 0x0C: /* Character Timeout Indication */ 278 case 0x0C: /* Character Timeout Indication */
279 do { 279 do {
280 si->stats.rx_bytes++; 280 dev->stats.rx_bytes++;
281 async_unwrap_char(dev, &si->stats, &si->rx_buff, STRBR); 281 async_unwrap_char(dev, &dev->stats, &si->rx_buff, STRBR);
282 } while (STLSR & LSR_DR); 282 } while (STLSR & LSR_DR);
283 si->last_oscr = OSCR; 283 si->last_oscr = OSCR;
284 break; 284 break;
@@ -290,9 +290,8 @@ static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
290 } 290 }
291 291
292 if (si->tx_buff.len == 0) { 292 if (si->tx_buff.len == 0) {
293 si->stats.tx_packets++; 293 dev->stats.tx_packets++;
294 si->stats.tx_bytes += si->tx_buff.data - 294 dev->stats.tx_bytes += si->tx_buff.data - si->tx_buff.head;
295 si->tx_buff.head;
296 295
297 /* We need to ensure that the transmitter has finished. */ 296 /* We need to ensure that the transmitter has finished. */
298 while ((STLSR & LSR_TEMT) == 0) 297 while ((STLSR & LSR_TEMT) == 0)
@@ -343,10 +342,10 @@ static void pxa_irda_fir_dma_tx_irq(int channel, void *data)
343 DCSR(channel) = dcsr & ~DCSR_RUN; 342 DCSR(channel) = dcsr & ~DCSR_RUN;
344 343
345 if (dcsr & DCSR_ENDINTR) { 344 if (dcsr & DCSR_ENDINTR) {
346 si->stats.tx_packets++; 345 dev->stats.tx_packets++;
347 si->stats.tx_bytes += si->dma_tx_buff_len; 346 dev->stats.tx_bytes += si->dma_tx_buff_len;
348 } else { 347 } else {
349 si->stats.tx_errors++; 348 dev->stats.tx_errors++;
350 } 349 }
351 350
352 while (ICSR1 & ICSR1_TBY) 351 while (ICSR1 & ICSR1_TBY)
@@ -392,14 +391,14 @@ static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, in
392 data = ICDR; 391 data = ICDR;
393 392
394 if (stat & (ICSR1_CRE | ICSR1_ROR)) { 393 if (stat & (ICSR1_CRE | ICSR1_ROR)) {
395 si->stats.rx_errors++; 394 dev->stats.rx_errors++;
396 if (stat & ICSR1_CRE) { 395 if (stat & ICSR1_CRE) {
397 printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n"); 396 printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
398 si->stats.rx_crc_errors++; 397 dev->stats.rx_crc_errors++;
399 } 398 }
400 if (stat & ICSR1_ROR) { 399 if (stat & ICSR1_ROR) {
401 printk(KERN_DEBUG "pxa_ir: fir receive overrun\n"); 400 printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
402 si->stats.rx_over_errors++; 401 dev->stats.rx_over_errors++;
403 } 402 }
404 } else { 403 } else {
405 si->dma_rx_buff[len++] = data; 404 si->dma_rx_buff[len++] = data;
@@ -415,14 +414,14 @@ static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, in
415 414
416 if (icsr0 & ICSR0_FRE) { 415 if (icsr0 & ICSR0_FRE) {
417 printk(KERN_ERR "pxa_ir: dropping erroneous frame\n"); 416 printk(KERN_ERR "pxa_ir: dropping erroneous frame\n");
418 si->stats.rx_dropped++; 417 dev->stats.rx_dropped++;
419 return; 418 return;
420 } 419 }
421 420
422 skb = alloc_skb(len+1,GFP_ATOMIC); 421 skb = alloc_skb(len+1,GFP_ATOMIC);
423 if (!skb) { 422 if (!skb) {
424 printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n"); 423 printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
425 si->stats.rx_dropped++; 424 dev->stats.rx_dropped++;
426 return; 425 return;
427 } 426 }
428 427
@@ -437,8 +436,8 @@ static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, in
437 skb->protocol = htons(ETH_P_IRDA); 436 skb->protocol = htons(ETH_P_IRDA);
438 netif_rx(skb); 437 netif_rx(skb);
439 438
440 si->stats.rx_packets++; 439 dev->stats.rx_packets++;
441 si->stats.rx_bytes += len; 440 dev->stats.rx_bytes += len;
442 } 441 }
443} 442}
444 443
@@ -457,10 +456,10 @@ static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id)
457 if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) { 456 if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) {
458 if (icsr0 & ICSR0_FRE) { 457 if (icsr0 & ICSR0_FRE) {
459 printk(KERN_DEBUG "pxa_ir: fir receive frame error\n"); 458 printk(KERN_DEBUG "pxa_ir: fir receive frame error\n");
460 si->stats.rx_frame_errors++; 459 dev->stats.rx_frame_errors++;
461 } else { 460 } else {
462 printk(KERN_DEBUG "pxa_ir: fir receive abort\n"); 461 printk(KERN_DEBUG "pxa_ir: fir receive abort\n");
463 si->stats.rx_errors++; 462 dev->stats.rx_errors++;
464 } 463 }
465 ICSR0 = icsr0 & (ICSR0_FRE | ICSR0_RAB); 464 ICSR0 = icsr0 & (ICSR0_FRE | ICSR0_RAB);
466 } 465 }
@@ -589,12 +588,6 @@ static int pxa_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
589 return ret; 588 return ret;
590} 589}
591 590
592static struct net_device_stats *pxa_irda_stats(struct net_device *dev)
593{
594 struct pxa_irda *si = netdev_priv(dev);
595 return &si->stats;
596}
597
598static void pxa_irda_startup(struct pxa_irda *si) 591static void pxa_irda_startup(struct pxa_irda *si)
599{ 592{
600 /* Disable STUART interrupts */ 593 /* Disable STUART interrupts */
@@ -857,7 +850,6 @@ static int pxa_irda_probe(struct platform_device *pdev)
857 dev->open = pxa_irda_start; 850 dev->open = pxa_irda_start;
858 dev->stop = pxa_irda_stop; 851 dev->stop = pxa_irda_stop;
859 dev->do_ioctl = pxa_irda_ioctl; 852 dev->do_ioctl = pxa_irda_ioctl;
860 dev->get_stats = pxa_irda_stats;
861 853
862 irda_init_max_qos_capabilies(&si->qos); 854 irda_init_max_qos_capabilies(&si->qos);
863 855
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index d302bcf4c148..7a2b003954ca 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -60,7 +60,6 @@ struct sa1100_irda {
60 dma_regs_t *txdma; 60 dma_regs_t *txdma;
61 dma_regs_t *rxdma; 61 dma_regs_t *rxdma;
62 62
63 struct net_device_stats stats;
64 struct device *dev; 63 struct device *dev;
65 struct irda_platform_data *pdata; 64 struct irda_platform_data *pdata;
66 struct irlap_cb *irlap; 65 struct irlap_cb *irlap;
@@ -375,13 +374,13 @@ static void sa1100_irda_hpsir_irq(struct net_device *dev)
375 data = Ser2UTDR; 374 data = Ser2UTDR;
376 375
377 if (stat & (UTSR1_FRE | UTSR1_ROR)) { 376 if (stat & (UTSR1_FRE | UTSR1_ROR)) {
378 si->stats.rx_errors++; 377 dev->stats.rx_errors++;
379 if (stat & UTSR1_FRE) 378 if (stat & UTSR1_FRE)
380 si->stats.rx_frame_errors++; 379 dev->stats.rx_frame_errors++;
381 if (stat & UTSR1_ROR) 380 if (stat & UTSR1_ROR)
382 si->stats.rx_fifo_errors++; 381 dev->stats.rx_fifo_errors++;
383 } else 382 } else
384 async_unwrap_char(dev, &si->stats, &si->rx_buff, data); 383 async_unwrap_char(dev, &dev->stats, &si->rx_buff, data);
385 384
386 status = Ser2UTSR0; 385 status = Ser2UTSR0;
387 } 386 }
@@ -396,9 +395,9 @@ static void sa1100_irda_hpsir_irq(struct net_device *dev)
396 * There are at least 4 bytes in the FIFO. Read 3 bytes 395 * There are at least 4 bytes in the FIFO. Read 3 bytes
397 * and leave the rest to the block below. 396 * and leave the rest to the block below.
398 */ 397 */
399 async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR); 398 async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
400 async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR); 399 async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
401 async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR); 400 async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
402 } 401 }
403 402
404 if (status & (UTSR0_RFS | UTSR0_RID)) { 403 if (status & (UTSR0_RFS | UTSR0_RID)) {
@@ -406,7 +405,7 @@ static void sa1100_irda_hpsir_irq(struct net_device *dev)
406 * Fifo contains more than 1 character. 405 * Fifo contains more than 1 character.
407 */ 406 */
408 do { 407 do {
409 async_unwrap_char(dev, &si->stats, &si->rx_buff, 408 async_unwrap_char(dev, &dev->stats, &si->rx_buff,
410 Ser2UTDR); 409 Ser2UTDR);
411 } while (Ser2UTSR1 & UTSR1_RNE); 410 } while (Ser2UTSR1 & UTSR1_RNE);
412 411
@@ -422,8 +421,8 @@ static void sa1100_irda_hpsir_irq(struct net_device *dev)
422 } while (Ser2UTSR1 & UTSR1_TNF && si->tx_buff.len); 421 } while (Ser2UTSR1 & UTSR1_TNF && si->tx_buff.len);
423 422
424 if (si->tx_buff.len == 0) { 423 if (si->tx_buff.len == 0) {
425 si->stats.tx_packets++; 424 dev->stats.tx_packets++;
426 si->stats.tx_bytes += si->tx_buff.data - 425 dev->stats.tx_bytes += si->tx_buff.data -
427 si->tx_buff.head; 426 si->tx_buff.head;
428 427
429 /* 428 /*
@@ -482,11 +481,11 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
482 data = Ser2HSDR; 481 data = Ser2HSDR;
483 482
484 if (stat & (HSSR1_CRE | HSSR1_ROR)) { 483 if (stat & (HSSR1_CRE | HSSR1_ROR)) {
485 si->stats.rx_errors++; 484 dev->stats.rx_errors++;
486 if (stat & HSSR1_CRE) 485 if (stat & HSSR1_CRE)
487 si->stats.rx_crc_errors++; 486 dev->stats.rx_crc_errors++;
488 if (stat & HSSR1_ROR) 487 if (stat & HSSR1_ROR)
489 si->stats.rx_frame_errors++; 488 dev->stats.rx_frame_errors++;
490 } else 489 } else
491 skb->data[len++] = data; 490 skb->data[len++] = data;
492 491
@@ -505,8 +504,8 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
505 skb->dev = dev; 504 skb->dev = dev;
506 skb_reset_mac_header(skb); 505 skb_reset_mac_header(skb);
507 skb->protocol = htons(ETH_P_IRDA); 506 skb->protocol = htons(ETH_P_IRDA);
508 si->stats.rx_packets++; 507 dev->stats.rx_packets++;
509 si->stats.rx_bytes += len; 508 dev->stats.rx_bytes += len;
510 509
511 /* 510 /*
512 * Before we pass the buffer up, allocate a new one. 511 * Before we pass the buffer up, allocate a new one.
@@ -545,10 +544,10 @@ static void sa1100_irda_fir_irq(struct net_device *dev)
545 * from the fifo. 544 * from the fifo.
546 */ 545 */
547 if (Ser2HSSR0 & (HSSR0_FRE | HSSR0_RAB)) { 546 if (Ser2HSSR0 & (HSSR0_FRE | HSSR0_RAB)) {
548 si->stats.rx_errors++; 547 dev->stats.rx_errors++;
549 548
550 if (Ser2HSSR0 & HSSR0_FRE) 549 if (Ser2HSSR0 & HSSR0_FRE)
551 si->stats.rx_frame_errors++; 550 dev->stats.rx_frame_errors++;
552 551
553 /* 552 /*
554 * Clear out the DMA... 553 * Clear out the DMA...
@@ -633,8 +632,8 @@ static void sa1100_irda_txdma_irq(void *id)
633 */ 632 */
634 if (skb) { 633 if (skb) {
635 dma_unmap_single(si->dev, si->txbuf_dma, skb->len, DMA_TO_DEVICE); 634 dma_unmap_single(si->dev, si->txbuf_dma, skb->len, DMA_TO_DEVICE);
636 si->stats.tx_packets ++; 635 dev->stats.tx_packets ++;
637 si->stats.tx_bytes += skb->len; 636 dev->stats.tx_bytes += skb->len;
638 dev_kfree_skb_irq(skb); 637 dev_kfree_skb_irq(skb);
639 } 638 }
640 639
@@ -762,12 +761,6 @@ sa1100_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
762 return ret; 761 return ret;
763} 762}
764 763
765static struct net_device_stats *sa1100_irda_stats(struct net_device *dev)
766{
767 struct sa1100_irda *si = netdev_priv(dev);
768 return &si->stats;
769}
770
771static int sa1100_irda_start(struct net_device *dev) 764static int sa1100_irda_start(struct net_device *dev)
772{ 765{
773 struct sa1100_irda *si = netdev_priv(dev); 766 struct sa1100_irda *si = netdev_priv(dev);
@@ -924,7 +917,6 @@ static int sa1100_irda_probe(struct platform_device *pdev)
924 dev->open = sa1100_irda_start; 917 dev->open = sa1100_irda_start;
925 dev->stop = sa1100_irda_stop; 918 dev->stop = sa1100_irda_stop;
926 dev->do_ioctl = sa1100_irda_ioctl; 919 dev->do_ioctl = sa1100_irda_ioctl;
927 dev->get_stats = sa1100_irda_stats;
928 dev->irq = IRQ_Ser2ICP; 920 dev->irq = IRQ_Ser2ICP;
929 921
930 irda_init_max_qos_capabilies(&si->qos); 922 irda_init_max_qos_capabilies(&si->qos);
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index 2a57bc67ce35..6d5b1e2b1289 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -160,7 +160,6 @@ static inline int sirdev_schedule_mode(struct sir_dev *dev, int mode)
160 160
161struct sir_dev { 161struct sir_dev {
162 struct net_device *netdev; 162 struct net_device *netdev;
163 struct net_device_stats stats;
164 163
165 struct irlap_cb *irlap; 164 struct irlap_cb *irlap;
166 165
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index ceef040aa76d..5b5862499def 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -455,8 +455,8 @@ void sirdev_write_complete(struct sir_dev *dev)
455 if ((skb=dev->tx_skb) != NULL) { 455 if ((skb=dev->tx_skb) != NULL) {
456 dev->tx_skb = NULL; 456 dev->tx_skb = NULL;
457 dev_kfree_skb_any(skb); 457 dev_kfree_skb_any(skb);
458 dev->stats.tx_errors++; 458 dev->netdev->stats.tx_errors++;
459 dev->stats.tx_dropped++; 459 dev->netdev->stats.tx_dropped++;
460 } 460 }
461 dev->tx_buff.len = 0; 461 dev->tx_buff.len = 0;
462 } 462 }
@@ -493,8 +493,8 @@ void sirdev_write_complete(struct sir_dev *dev)
493 493
494 if ((skb=dev->tx_skb) != NULL) { 494 if ((skb=dev->tx_skb) != NULL) {
495 dev->tx_skb = NULL; 495 dev->tx_skb = NULL;
496 dev->stats.tx_packets++; 496 dev->netdev->stats.tx_packets++;
497 dev->stats.tx_bytes += skb->len; 497 dev->netdev->stats.tx_bytes += skb->len;
498 dev_kfree_skb_any(skb); 498 dev_kfree_skb_any(skb);
499 } 499 }
500 500
@@ -548,7 +548,7 @@ int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
548 * just update stats and set media busy 548 * just update stats and set media busy
549 */ 549 */
550 irda_device_set_media_busy(dev->netdev, TRUE); 550 irda_device_set_media_busy(dev->netdev, TRUE);
551 dev->stats.rx_dropped++; 551 dev->netdev->stats.rx_dropped++;
552 IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count); 552 IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count);
553 return 0; 553 return 0;
554 } 554 }
@@ -557,7 +557,7 @@ int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
557 if (likely(atomic_read(&dev->enable_rx))) { 557 if (likely(atomic_read(&dev->enable_rx))) {
558 while (count--) 558 while (count--)
559 /* Unwrap and destuff one byte */ 559 /* Unwrap and destuff one byte */
560 async_unwrap_char(dev->netdev, &dev->stats, 560 async_unwrap_char(dev->netdev, &dev->netdev->stats,
561 &dev->rx_buff, *cp++); 561 &dev->rx_buff, *cp++);
562 } else { 562 } else {
563 while (count--) { 563 while (count--) {
@@ -582,13 +582,6 @@ EXPORT_SYMBOL(sirdev_receive);
582 582
583/* callbacks from network layer */ 583/* callbacks from network layer */
584 584
585static struct net_device_stats *sirdev_get_stats(struct net_device *ndev)
586{
587 struct sir_dev *dev = netdev_priv(ndev);
588
589 return (dev) ? &dev->stats : NULL;
590}
591
592static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev) 585static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
593{ 586{
594 struct sir_dev *dev = netdev_priv(ndev); 587 struct sir_dev *dev = netdev_priv(ndev);
@@ -654,7 +647,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
654 */ 647 */
655 atomic_set(&dev->enable_rx, 0); 648 atomic_set(&dev->enable_rx, 0);
656 if (unlikely(sirdev_is_receiving(dev))) 649 if (unlikely(sirdev_is_receiving(dev)))
657 dev->stats.collisions++; 650 dev->netdev->stats.collisions++;
658 651
659 actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); 652 actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
660 653
@@ -669,8 +662,8 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
669 IRDA_ERROR("%s: drv->do_write failed (%d)\n", 662 IRDA_ERROR("%s: drv->do_write failed (%d)\n",
670 __func__, actual); 663 __func__, actual);
671 dev_kfree_skb_any(skb); 664 dev_kfree_skb_any(skb);
672 dev->stats.tx_errors++; 665 dev->netdev->stats.tx_errors++;
673 dev->stats.tx_dropped++; 666 dev->netdev->stats.tx_dropped++;
674 netif_wake_queue(ndev); 667 netif_wake_queue(ndev);
675 } 668 }
676 spin_unlock_irqrestore(&dev->tx_lock, flags); 669 spin_unlock_irqrestore(&dev->tx_lock, flags);
@@ -918,7 +911,6 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
918 ndev->hard_start_xmit = sirdev_hard_xmit; 911 ndev->hard_start_xmit = sirdev_hard_xmit;
919 ndev->open = sirdev_open; 912 ndev->open = sirdev_open;
920 ndev->stop = sirdev_close; 913 ndev->stop = sirdev_close;
921 ndev->get_stats = sirdev_get_stats;
922 ndev->do_ioctl = sirdev_ioctl; 914 ndev->do_ioctl = sirdev_ioctl;
923 915
924 if (register_netdev(ndev)) { 916 if (register_netdev(ndev)) {
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 5d09e157e15b..dd73cce10991 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -150,7 +150,6 @@ struct smsc_chip_address {
150/* Private data for each instance */ 150/* Private data for each instance */
151struct smsc_ircc_cb { 151struct smsc_ircc_cb {
152 struct net_device *netdev; /* Yes! we are some kind of netdevice */ 152 struct net_device *netdev; /* Yes! we are some kind of netdevice */
153 struct net_device_stats stats;
154 struct irlap_cb *irlap; /* The link layer we are binded to */ 153 struct irlap_cb *irlap; /* The link layer we are binded to */
155 154
156 chipio_t io; /* IrDA controller information */ 155 chipio_t io; /* IrDA controller information */
@@ -215,7 +214,6 @@ static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cm
215#if SMSC_IRCC2_C_NET_TIMEOUT 214#if SMSC_IRCC2_C_NET_TIMEOUT
216static void smsc_ircc_timeout(struct net_device *dev); 215static void smsc_ircc_timeout(struct net_device *dev);
217#endif 216#endif
218static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev);
219static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self); 217static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self);
220static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self); 218static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self);
221static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed); 219static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed);
@@ -529,7 +527,6 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
529 dev->open = smsc_ircc_net_open; 527 dev->open = smsc_ircc_net_open;
530 dev->stop = smsc_ircc_net_close; 528 dev->stop = smsc_ircc_net_close;
531 dev->do_ioctl = smsc_ircc_net_ioctl; 529 dev->do_ioctl = smsc_ircc_net_ioctl;
532 dev->get_stats = smsc_ircc_net_get_stats;
533 530
534 self = netdev_priv(dev); 531 self = netdev_priv(dev);
535 self->netdev = dev; 532 self->netdev = dev;
@@ -834,13 +831,6 @@ static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd
834 return ret; 831 return ret;
835} 832}
836 833
837static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev)
838{
839 struct smsc_ircc_cb *self = netdev_priv(dev);
840
841 return &self->stats;
842}
843
844#if SMSC_IRCC2_C_NET_TIMEOUT 834#if SMSC_IRCC2_C_NET_TIMEOUT
845/* 835/*
846 * Function smsc_ircc_timeout (struct net_device *dev) 836 * Function smsc_ircc_timeout (struct net_device *dev)
@@ -920,7 +910,7 @@ static int smsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev)
920 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, 910 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
921 self->tx_buff.truesize); 911 self->tx_buff.truesize);
922 912
923 self->stats.tx_bytes += self->tx_buff.len; 913 dev->stats.tx_bytes += self->tx_buff.len;
924 914
925 /* Turn on transmit finished interrupt. Will fire immediately! */ 915 /* Turn on transmit finished interrupt. Will fire immediately! */
926 outb(UART_IER_THRI, self->io.sir_base + UART_IER); 916 outb(UART_IER_THRI, self->io.sir_base + UART_IER);
@@ -1320,16 +1310,16 @@ static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self)
1320 /* Check for underrun! */ 1310 /* Check for underrun! */
1321 register_bank(iobase, 0); 1311 register_bank(iobase, 0);
1322 if (inb(iobase + IRCC_LSR) & IRCC_LSR_UNDERRUN) { 1312 if (inb(iobase + IRCC_LSR) & IRCC_LSR_UNDERRUN) {
1323 self->stats.tx_errors++; 1313 self->netdev->stats.tx_errors++;
1324 self->stats.tx_fifo_errors++; 1314 self->netdev->stats.tx_fifo_errors++;
1325 1315
1326 /* Reset error condition */ 1316 /* Reset error condition */
1327 register_bank(iobase, 0); 1317 register_bank(iobase, 0);
1328 outb(IRCC_MASTER_ERROR_RESET, iobase + IRCC_MASTER); 1318 outb(IRCC_MASTER_ERROR_RESET, iobase + IRCC_MASTER);
1329 outb(0x00, iobase + IRCC_MASTER); 1319 outb(0x00, iobase + IRCC_MASTER);
1330 } else { 1320 } else {
1331 self->stats.tx_packets++; 1321 self->netdev->stats.tx_packets++;
1332 self->stats.tx_bytes += self->tx_buff.len; 1322 self->netdev->stats.tx_bytes += self->tx_buff.len;
1333 } 1323 }
1334 1324
1335 /* Check if it's time to change the speed */ 1325 /* Check if it's time to change the speed */
@@ -1429,15 +1419,15 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
1429 1419
1430 /* Look for errors */ 1420 /* Look for errors */
1431 if (lsr & (IRCC_LSR_FRAME_ERROR | IRCC_LSR_CRC_ERROR | IRCC_LSR_SIZE_ERROR)) { 1421 if (lsr & (IRCC_LSR_FRAME_ERROR | IRCC_LSR_CRC_ERROR | IRCC_LSR_SIZE_ERROR)) {
1432 self->stats.rx_errors++; 1422 self->netdev->stats.rx_errors++;
1433 if (lsr & IRCC_LSR_FRAME_ERROR) 1423 if (lsr & IRCC_LSR_FRAME_ERROR)
1434 self->stats.rx_frame_errors++; 1424 self->netdev->stats.rx_frame_errors++;
1435 if (lsr & IRCC_LSR_CRC_ERROR) 1425 if (lsr & IRCC_LSR_CRC_ERROR)
1436 self->stats.rx_crc_errors++; 1426 self->netdev->stats.rx_crc_errors++;
1437 if (lsr & IRCC_LSR_SIZE_ERROR) 1427 if (lsr & IRCC_LSR_SIZE_ERROR)
1438 self->stats.rx_length_errors++; 1428 self->netdev->stats.rx_length_errors++;
1439 if (lsr & (IRCC_LSR_UNDERRUN | IRCC_LSR_OVERRUN)) 1429 if (lsr & (IRCC_LSR_UNDERRUN | IRCC_LSR_OVERRUN))
1440 self->stats.rx_length_errors++; 1430 self->netdev->stats.rx_length_errors++;
1441 return; 1431 return;
1442 } 1432 }
1443 1433
@@ -1460,8 +1450,8 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
1460 skb_reserve(skb, 1); 1450 skb_reserve(skb, 1);
1461 1451
1462 memcpy(skb_put(skb, len), self->rx_buff.data, len); 1452 memcpy(skb_put(skb, len), self->rx_buff.data, len);
1463 self->stats.rx_packets++; 1453 self->netdev->stats.rx_packets++;
1464 self->stats.rx_bytes += len; 1454 self->netdev->stats.rx_bytes += len;
1465 1455
1466 skb->dev = self->netdev; 1456 skb->dev = self->netdev;
1467 skb_reset_mac_header(skb); 1457 skb_reset_mac_header(skb);
@@ -1489,7 +1479,7 @@ static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self)
1489 * async_unwrap_char will deliver all found frames 1479 * async_unwrap_char will deliver all found frames
1490 */ 1480 */
1491 do { 1481 do {
1492 async_unwrap_char(self->netdev, &self->stats, &self->rx_buff, 1482 async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
1493 inb(iobase + UART_RX)); 1483 inb(iobase + UART_RX));
1494 1484
1495 /* Make sure we don't stay here to long */ 1485 /* Make sure we don't stay here to long */
@@ -1992,7 +1982,7 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
1992 /* Tell network layer that we want more frames */ 1982 /* Tell network layer that we want more frames */
1993 netif_wake_queue(self->netdev); 1983 netif_wake_queue(self->netdev);
1994 } 1984 }
1995 self->stats.tx_packets++; 1985 self->netdev->stats.tx_packets++;
1996 1986
1997 if (self->io.speed <= 115200) { 1987 if (self->io.speed <= 115200) {
1998 /* 1988 /*
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index ca4cd9266e55..8b1658c6c925 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -164,7 +164,7 @@ struct stir_cb {
164 struct usb_device *usbdev; /* init: probe_irda */ 164 struct usb_device *usbdev; /* init: probe_irda */
165 struct net_device *netdev; /* network layer */ 165 struct net_device *netdev; /* network layer */
166 struct irlap_cb *irlap; /* The link layer we are binded to */ 166 struct irlap_cb *irlap; /* The link layer we are binded to */
167 struct net_device_stats stats; /* network statistics */ 167
168 struct qos_info qos; 168 struct qos_info qos;
169 unsigned speed; /* Current speed */ 169 unsigned speed; /* Current speed */
170 170
@@ -323,16 +323,16 @@ static void fir_eof(struct stir_cb *stir)
323 pr_debug("%s: short frame len %d\n", 323 pr_debug("%s: short frame len %d\n",
324 stir->netdev->name, len); 324 stir->netdev->name, len);
325 325
326 ++stir->stats.rx_errors; 326 ++stir->netdev->stats.rx_errors;
327 ++stir->stats.rx_length_errors; 327 ++stir->netdev->stats.rx_length_errors;
328 return; 328 return;
329 } 329 }
330 330
331 fcs = ~(crc32_le(~0, rx_buff->data, len)); 331 fcs = ~(crc32_le(~0, rx_buff->data, len));
332 if (fcs != get_unaligned_le32(rx_buff->data + len)) { 332 if (fcs != get_unaligned_le32(rx_buff->data + len)) {
333 pr_debug("crc error calc 0x%x len %d\n", fcs, len); 333 pr_debug("crc error calc 0x%x len %d\n", fcs, len);
334 stir->stats.rx_errors++; 334 stir->netdev->stats.rx_errors++;
335 stir->stats.rx_crc_errors++; 335 stir->netdev->stats.rx_crc_errors++;
336 return; 336 return;
337 } 337 }
338 338
@@ -340,7 +340,7 @@ static void fir_eof(struct stir_cb *stir)
340 if (len < IRDA_RX_COPY_THRESHOLD) { 340 if (len < IRDA_RX_COPY_THRESHOLD) {
341 nskb = dev_alloc_skb(len + 1); 341 nskb = dev_alloc_skb(len + 1);
342 if (unlikely(!nskb)) { 342 if (unlikely(!nskb)) {
343 ++stir->stats.rx_dropped; 343 ++stir->netdev->stats.rx_dropped;
344 return; 344 return;
345 } 345 }
346 skb_reserve(nskb, 1); 346 skb_reserve(nskb, 1);
@@ -349,7 +349,7 @@ static void fir_eof(struct stir_cb *stir)
349 } else { 349 } else {
350 nskb = dev_alloc_skb(rx_buff->truesize); 350 nskb = dev_alloc_skb(rx_buff->truesize);
351 if (unlikely(!nskb)) { 351 if (unlikely(!nskb)) {
352 ++stir->stats.rx_dropped; 352 ++stir->netdev->stats.rx_dropped;
353 return; 353 return;
354 } 354 }
355 skb_reserve(nskb, 1); 355 skb_reserve(nskb, 1);
@@ -366,8 +366,8 @@ static void fir_eof(struct stir_cb *stir)
366 366
367 netif_rx(skb); 367 netif_rx(skb);
368 368
369 stir->stats.rx_packets++; 369 stir->netdev->stats.rx_packets++;
370 stir->stats.rx_bytes += len; 370 stir->netdev->stats.rx_bytes += len;
371 371
372 rx_buff->data = rx_buff->head; 372 rx_buff->data = rx_buff->head;
373 rx_buff->len = 0; 373 rx_buff->len = 0;
@@ -437,7 +437,7 @@ static void stir_fir_chars(struct stir_cb *stir,
437 if (unlikely(rx_buff->len >= rx_buff->truesize)) { 437 if (unlikely(rx_buff->len >= rx_buff->truesize)) {
438 pr_debug("%s: fir frame exceeds %d\n", 438 pr_debug("%s: fir frame exceeds %d\n",
439 stir->netdev->name, rx_buff->truesize); 439 stir->netdev->name, rx_buff->truesize);
440 ++stir->stats.rx_over_errors; 440 ++stir->netdev->stats.rx_over_errors;
441 goto error_recovery; 441 goto error_recovery;
442 } 442 }
443 443
@@ -445,10 +445,10 @@ static void stir_fir_chars(struct stir_cb *stir,
445 continue; 445 continue;
446 446
447 frame_error: 447 frame_error:
448 ++stir->stats.rx_frame_errors; 448 ++stir->netdev->stats.rx_frame_errors;
449 449
450 error_recovery: 450 error_recovery:
451 ++stir->stats.rx_errors; 451 ++stir->netdev->stats.rx_errors;
452 rx_buff->state = OUTSIDE_FRAME; 452 rx_buff->state = OUTSIDE_FRAME;
453 rx_buff->in_frame = FALSE; 453 rx_buff->in_frame = FALSE;
454 } 454 }
@@ -461,7 +461,7 @@ static void stir_sir_chars(struct stir_cb *stir,
461 int i; 461 int i;
462 462
463 for (i = 0; i < len; i++) 463 for (i = 0; i < len; i++)
464 async_unwrap_char(stir->netdev, &stir->stats, 464 async_unwrap_char(stir->netdev, &stir->netdev->stats,
465 &stir->rx_buff, bytes[i]); 465 &stir->rx_buff, bytes[i]);
466} 466}
467 467
@@ -692,7 +692,7 @@ static void receive_stop(struct stir_cb *stir)
692 usb_kill_urb(stir->rx_urb); 692 usb_kill_urb(stir->rx_urb);
693 693
694 if (stir->rx_buff.in_frame) 694 if (stir->rx_buff.in_frame)
695 stir->stats.collisions++; 695 stir->netdev->stats.collisions++;
696} 696}
697/* 697/*
698 * Wrap data in socket buffer and send it. 698 * Wrap data in socket buffer and send it.
@@ -718,15 +718,15 @@ static void stir_send(struct stir_cb *stir, struct sk_buff *skb)
718 if (!first_frame) 718 if (!first_frame)
719 fifo_txwait(stir, wraplen); 719 fifo_txwait(stir, wraplen);
720 720
721 stir->stats.tx_packets++; 721 stir->netdev->stats.tx_packets++;
722 stir->stats.tx_bytes += skb->len; 722 stir->netdev->stats.tx_bytes += skb->len;
723 stir->netdev->trans_start = jiffies; 723 stir->netdev->trans_start = jiffies;
724 pr_debug("send %d (%d)\n", skb->len, wraplen); 724 pr_debug("send %d (%d)\n", skb->len, wraplen);
725 725
726 if (usb_bulk_msg(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1), 726 if (usb_bulk_msg(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1),
727 stir->io_buf, wraplen, 727 stir->io_buf, wraplen,
728 NULL, TRANSMIT_TIMEOUT)) 728 NULL, TRANSMIT_TIMEOUT))
729 stir->stats.tx_errors++; 729 stir->netdev->stats.tx_errors++;
730} 730}
731 731
732/* 732/*
@@ -1008,15 +1008,6 @@ static int stir_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1008} 1008}
1009 1009
1010/* 1010/*
1011 * Get device stats (for /proc/net/dev and ifconfig)
1012 */
1013static struct net_device_stats *stir_net_get_stats(struct net_device *netdev)
1014{
1015 struct stir_cb *stir = netdev_priv(netdev);
1016 return &stir->stats;
1017}
1018
1019/*
1020 * This routine is called by the USB subsystem for each new device 1011 * This routine is called by the USB subsystem for each new device
1021 * in the system. We need to check if the device is ours, and in 1012 * in the system. We need to check if the device is ours, and in
1022 * this case start handling it. 1013 * this case start handling it.
@@ -1066,7 +1057,6 @@ static int stir_probe(struct usb_interface *intf,
1066 net->hard_start_xmit = stir_hard_xmit; 1057 net->hard_start_xmit = stir_hard_xmit;
1067 net->open = stir_net_open; 1058 net->open = stir_net_open;
1068 net->stop = stir_net_close; 1059 net->stop = stir_net_close;
1069 net->get_stats = stir_net_get_stats;
1070 net->do_ioctl = stir_net_ioctl; 1060 net->do_ioctl = stir_net_ioctl;
1071 1061
1072 ret = register_netdev(net); 1062 ret = register_netdev(net);
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 74c78cf7a333..8b3e545924cc 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -101,8 +101,6 @@ static int via_ircc_net_open(struct net_device *dev);
101static int via_ircc_net_close(struct net_device *dev); 101static int via_ircc_net_close(struct net_device *dev);
102static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, 102static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
103 int cmd); 103 int cmd);
104static struct net_device_stats *via_ircc_net_get_stats(struct net_device
105 *dev);
106static void via_ircc_change_dongle_speed(int iobase, int speed, 104static void via_ircc_change_dongle_speed(int iobase, int speed,
107 int dongle_id); 105 int dongle_id);
108static int RxTimerHandler(struct via_ircc_cb *self, int iobase); 106static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
@@ -434,7 +432,6 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
434 dev->open = via_ircc_net_open; 432 dev->open = via_ircc_net_open;
435 dev->stop = via_ircc_net_close; 433 dev->stop = via_ircc_net_close;
436 dev->do_ioctl = via_ircc_net_ioctl; 434 dev->do_ioctl = via_ircc_net_ioctl;
437 dev->get_stats = via_ircc_net_get_stats;
438 435
439 err = register_netdev(dev); 436 err = register_netdev(dev);
440 if (err) 437 if (err)
@@ -855,7 +852,7 @@ static int via_ircc_hard_xmit_sir(struct sk_buff *skb,
855 async_wrap_skb(skb, self->tx_buff.data, 852 async_wrap_skb(skb, self->tx_buff.data,
856 self->tx_buff.truesize); 853 self->tx_buff.truesize);
857 854
858 self->stats.tx_bytes += self->tx_buff.len; 855 dev->stats.tx_bytes += self->tx_buff.len;
859 /* Send this frame with old speed */ 856 /* Send this frame with old speed */
860 SetBaudRate(iobase, self->io.speed); 857 SetBaudRate(iobase, self->io.speed);
861 SetPulseWidth(iobase, 12); 858 SetPulseWidth(iobase, 12);
@@ -921,7 +918,7 @@ static int via_ircc_hard_xmit_fir(struct sk_buff *skb,
921 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len; 918 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
922 919
923 self->tx_fifo.tail += skb->len; 920 self->tx_fifo.tail += skb->len;
924 self->stats.tx_bytes += skb->len; 921 dev->stats.tx_bytes += skb->len;
925 skb_copy_from_linear_data(skb, 922 skb_copy_from_linear_data(skb,
926 self->tx_fifo.queue[self->tx_fifo.free].start, skb->len); 923 self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
927 self->tx_fifo.len++; 924 self->tx_fifo.len++;
@@ -990,12 +987,12 @@ static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
990 /* Clear bit, by writing 1 into it */ 987 /* Clear bit, by writing 1 into it */
991 Tx_status = GetTXStatus(iobase); 988 Tx_status = GetTXStatus(iobase);
992 if (Tx_status & 0x08) { 989 if (Tx_status & 0x08) {
993 self->stats.tx_errors++; 990 self->netdev->stats.tx_errors++;
994 self->stats.tx_fifo_errors++; 991 self->netdev->stats.tx_fifo_errors++;
995 hwreset(self); 992 hwreset(self);
996// how to clear underrrun ? 993// how to clear underrrun ?
997 } else { 994 } else {
998 self->stats.tx_packets++; 995 self->netdev->stats.tx_packets++;
999 ResetChip(iobase, 3); 996 ResetChip(iobase, 3);
1000 ResetChip(iobase, 4); 997 ResetChip(iobase, 4);
1001 } 998 }
@@ -1119,8 +1116,8 @@ static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1119 } 1116 }
1120 // Move to next frame 1117 // Move to next frame
1121 self->rx_buff.data += len; 1118 self->rx_buff.data += len;
1122 self->stats.rx_bytes += len; 1119 self->netdev->stats.rx_bytes += len;
1123 self->stats.rx_packets++; 1120 self->netdev->stats.rx_packets++;
1124 skb->dev = self->netdev; 1121 skb->dev = self->netdev;
1125 skb_reset_mac_header(skb); 1122 skb_reset_mac_header(skb);
1126 skb->protocol = htons(ETH_P_IRDA); 1123 skb->protocol = htons(ETH_P_IRDA);
@@ -1180,7 +1177,7 @@ F01_E */
1180 */ 1177 */
1181 if ((skb == NULL) || (skb->data == NULL) 1178 if ((skb == NULL) || (skb->data == NULL)
1182 || (self->rx_buff.data == NULL) || (len < 6)) { 1179 || (self->rx_buff.data == NULL) || (len < 6)) {
1183 self->stats.rx_dropped++; 1180 self->netdev->stats.rx_dropped++;
1184 return TRUE; 1181 return TRUE;
1185 } 1182 }
1186 skb_reserve(skb, 1); 1183 skb_reserve(skb, 1);
@@ -1192,8 +1189,8 @@ F01_E */
1192 1189
1193 // Move to next frame 1190 // Move to next frame
1194 self->rx_buff.data += len; 1191 self->rx_buff.data += len;
1195 self->stats.rx_bytes += len; 1192 self->netdev->stats.rx_bytes += len;
1196 self->stats.rx_packets++; 1193 self->netdev->stats.rx_packets++;
1197 skb->dev = self->netdev; 1194 skb->dev = self->netdev;
1198 skb_reset_mac_header(skb); 1195 skb_reset_mac_header(skb);
1199 skb->protocol = htons(ETH_P_IRDA); 1196 skb->protocol = htons(ETH_P_IRDA);
@@ -1220,13 +1217,13 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1220 IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len); 1217 IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
1221 1218
1222 if ((len - 4) < 2) { 1219 if ((len - 4) < 2) {
1223 self->stats.rx_dropped++; 1220 self->netdev->stats.rx_dropped++;
1224 return FALSE; 1221 return FALSE;
1225 } 1222 }
1226 1223
1227 skb = dev_alloc_skb(len + 1); 1224 skb = dev_alloc_skb(len + 1);
1228 if (skb == NULL) { 1225 if (skb == NULL) {
1229 self->stats.rx_dropped++; 1226 self->netdev->stats.rx_dropped++;
1230 return FALSE; 1227 return FALSE;
1231 } 1228 }
1232 skb_reserve(skb, 1); 1229 skb_reserve(skb, 1);
@@ -1238,8 +1235,8 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1238 st_fifo->tail = 0; 1235 st_fifo->tail = 0;
1239 // Move to next frame 1236 // Move to next frame
1240 self->rx_buff.data += len; 1237 self->rx_buff.data += len;
1241 self->stats.rx_bytes += len; 1238 self->netdev->stats.rx_bytes += len;
1242 self->stats.rx_packets++; 1239 self->netdev->stats.rx_packets++;
1243 skb->dev = self->netdev; 1240 skb->dev = self->netdev;
1244 skb_reset_mac_header(skb); 1241 skb_reset_mac_header(skb);
1245 skb->protocol = htons(ETH_P_IRDA); 1242 skb->protocol = htons(ETH_P_IRDA);
@@ -1295,7 +1292,7 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1295 */ 1292 */
1296 if ((skb == NULL) || (skb->data == NULL) 1293 if ((skb == NULL) || (skb->data == NULL)
1297 || (self->rx_buff.data == NULL) || (len < 6)) { 1294 || (self->rx_buff.data == NULL) || (len < 6)) {
1298 self->stats.rx_dropped++; 1295 self->netdev->stats.rx_dropped++;
1299 continue; 1296 continue;
1300 } 1297 }
1301 skb_reserve(skb, 1); 1298 skb_reserve(skb, 1);
@@ -1307,8 +1304,8 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1307 1304
1308 // Move to next frame 1305 // Move to next frame
1309 self->rx_buff.data += len; 1306 self->rx_buff.data += len;
1310 self->stats.rx_bytes += len; 1307 self->netdev->stats.rx_bytes += len;
1311 self->stats.rx_packets++; 1308 self->netdev->stats.rx_packets++;
1312 skb->dev = self->netdev; 1309 skb->dev = self->netdev;
1313 skb_reset_mac_header(skb); 1310 skb_reset_mac_header(skb);
1314 skb->protocol = htons(ETH_P_IRDA); 1311 skb->protocol = htons(ETH_P_IRDA);
@@ -1523,7 +1520,7 @@ static int via_ircc_net_open(struct net_device *dev)
1523 1520
1524 IRDA_ASSERT(dev != NULL, return -1;); 1521 IRDA_ASSERT(dev != NULL, return -1;);
1525 self = netdev_priv(dev); 1522 self = netdev_priv(dev);
1526 self->stats.rx_packets = 0; 1523 dev->stats.rx_packets = 0;
1527 IRDA_ASSERT(self != NULL, return 0;); 1524 IRDA_ASSERT(self != NULL, return 0;);
1528 iobase = self->io.fir_base; 1525 iobase = self->io.fir_base;
1529 if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) { 1526 if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
@@ -1660,14 +1657,6 @@ static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1660 return ret; 1657 return ret;
1661} 1658}
1662 1659
1663static struct net_device_stats *via_ircc_net_get_stats(struct net_device
1664 *dev)
1665{
1666 struct via_ircc_cb *self = netdev_priv(dev);
1667
1668 return &self->stats;
1669}
1670
1671MODULE_AUTHOR("VIA Technologies,inc"); 1660MODULE_AUTHOR("VIA Technologies,inc");
1672MODULE_DESCRIPTION("VIA IrDA Device Driver"); 1661MODULE_DESCRIPTION("VIA IrDA Device Driver");
1673MODULE_LICENSE("GPL"); 1662MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
index 403c3f77634c..d9d1db03fa2d 100644
--- a/drivers/net/irda/via-ircc.h
+++ b/drivers/net/irda/via-ircc.h
@@ -95,7 +95,6 @@ struct via_ircc_cb {
95 struct tx_fifo tx_fifo; /* Info about frames to be transmitted */ 95 struct tx_fifo tx_fifo; /* Info about frames to be transmitted */
96 96
97 struct net_device *netdev; /* Yes! we are some kind of netdevice */ 97 struct net_device *netdev; /* Yes! we are some kind of netdevice */
98 struct net_device_stats stats;
99 98
100 struct irlap_cb *irlap; /* The link layer we are binded to */ 99 struct irlap_cb *irlap; /* The link layer we are binded to */
101 struct qos_info qos; /* QoS capabilities for this device */ 100 struct qos_info qos; /* QoS capabilities for this device */
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 0d30f8d659a1..723c4588c803 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -291,14 +291,14 @@ static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
291 now.tv_sec - idev->last_rx.tv_sec - delta1, delta2); 291 now.tv_sec - idev->last_rx.tv_sec - delta1, delta2);
292 292
293 seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu", 293 seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu",
294 idev->stats.rx_packets, idev->stats.rx_bytes, idev->stats.rx_errors, 294 ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors,
295 idev->stats.rx_dropped); 295 ndev->stats.rx_dropped);
296 seq_printf(seq, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n", 296 seq_printf(seq, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n",
297 idev->stats.rx_over_errors, idev->stats.rx_length_errors, 297 ndev->stats.rx_over_errors, ndev->stats.rx_length_errors,
298 idev->stats.rx_frame_errors, idev->stats.rx_crc_errors); 298 ndev->stats.rx_frame_errors, ndev->stats.rx_crc_errors);
299 seq_printf(seq, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n", 299 seq_printf(seq, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n",
300 idev->stats.tx_packets, idev->stats.tx_bytes, idev->stats.tx_errors, 300 ndev->stats.tx_packets, ndev->stats.tx_bytes, ndev->stats.tx_errors,
301 idev->stats.tx_dropped, idev->stats.tx_fifo_errors); 301 ndev->stats.tx_dropped, ndev->stats.tx_fifo_errors);
302 302
303} 303}
304 304
@@ -651,21 +651,21 @@ static void vlsi_rx_interrupt(struct net_device *ndev)
651 651
652 if (ret < 0) { 652 if (ret < 0) {
653 ret = -ret; 653 ret = -ret;
654 idev->stats.rx_errors++; 654 ndev->stats.rx_errors++;
655 if (ret & VLSI_RX_DROP) 655 if (ret & VLSI_RX_DROP)
656 idev->stats.rx_dropped++; 656 ndev->stats.rx_dropped++;
657 if (ret & VLSI_RX_OVER) 657 if (ret & VLSI_RX_OVER)
658 idev->stats.rx_over_errors++; 658 ndev->stats.rx_over_errors++;
659 if (ret & VLSI_RX_LENGTH) 659 if (ret & VLSI_RX_LENGTH)
660 idev->stats.rx_length_errors++; 660 ndev->stats.rx_length_errors++;
661 if (ret & VLSI_RX_FRAME) 661 if (ret & VLSI_RX_FRAME)
662 idev->stats.rx_frame_errors++; 662 ndev->stats.rx_frame_errors++;
663 if (ret & VLSI_RX_CRC) 663 if (ret & VLSI_RX_CRC)
664 idev->stats.rx_crc_errors++; 664 ndev->stats.rx_crc_errors++;
665 } 665 }
666 else if (ret > 0) { 666 else if (ret > 0) {
667 idev->stats.rx_packets++; 667 ndev->stats.rx_packets++;
668 idev->stats.rx_bytes += ret; 668 ndev->stats.rx_bytes += ret;
669 } 669 }
670 } 670 }
671 671
@@ -686,6 +686,7 @@ static void vlsi_rx_interrupt(struct net_device *ndev)
686 686
687static void vlsi_unarm_rx(vlsi_irda_dev_t *idev) 687static void vlsi_unarm_rx(vlsi_irda_dev_t *idev)
688{ 688{
689 struct net_device *ndev = pci_get_drvdata(idev->pdev);
689 struct vlsi_ring *r = idev->rx_ring; 690 struct vlsi_ring *r = idev->rx_ring;
690 struct ring_descr *rd; 691 struct ring_descr *rd;
691 int ret; 692 int ret;
@@ -711,21 +712,21 @@ static void vlsi_unarm_rx(vlsi_irda_dev_t *idev)
711 712
712 if (ret < 0) { 713 if (ret < 0) {
713 ret = -ret; 714 ret = -ret;
714 idev->stats.rx_errors++; 715 ndev->stats.rx_errors++;
715 if (ret & VLSI_RX_DROP) 716 if (ret & VLSI_RX_DROP)
716 idev->stats.rx_dropped++; 717 ndev->stats.rx_dropped++;
717 if (ret & VLSI_RX_OVER) 718 if (ret & VLSI_RX_OVER)
718 idev->stats.rx_over_errors++; 719 ndev->stats.rx_over_errors++;
719 if (ret & VLSI_RX_LENGTH) 720 if (ret & VLSI_RX_LENGTH)
720 idev->stats.rx_length_errors++; 721 ndev->stats.rx_length_errors++;
721 if (ret & VLSI_RX_FRAME) 722 if (ret & VLSI_RX_FRAME)
722 idev->stats.rx_frame_errors++; 723 ndev->stats.rx_frame_errors++;
723 if (ret & VLSI_RX_CRC) 724 if (ret & VLSI_RX_CRC)
724 idev->stats.rx_crc_errors++; 725 ndev->stats.rx_crc_errors++;
725 } 726 }
726 else if (ret > 0) { 727 else if (ret > 0) {
727 idev->stats.rx_packets++; 728 ndev->stats.rx_packets++;
728 idev->stats.rx_bytes += ret; 729 ndev->stats.rx_bytes += ret;
729 } 730 }
730 } 731 }
731} 732}
@@ -1050,8 +1051,8 @@ drop_unlock:
1050drop: 1051drop:
1051 IRDA_WARNING("%s: dropping packet - %s\n", __func__, msg); 1052 IRDA_WARNING("%s: dropping packet - %s\n", __func__, msg);
1052 dev_kfree_skb_any(skb); 1053 dev_kfree_skb_any(skb);
1053 idev->stats.tx_errors++; 1054 ndev->stats.tx_errors++;
1054 idev->stats.tx_dropped++; 1055 ndev->stats.tx_dropped++;
1055 /* Don't even think about returning NET_XMIT_DROP (=1) here! 1056 /* Don't even think about returning NET_XMIT_DROP (=1) here!
1056 * In fact any retval!=0 causes the packet scheduler to requeue the 1057 * In fact any retval!=0 causes the packet scheduler to requeue the
1057 * packet for later retry of transmission - which isn't exactly 1058 * packet for later retry of transmission - which isn't exactly
@@ -1078,15 +1079,15 @@ static void vlsi_tx_interrupt(struct net_device *ndev)
1078 1079
1079 if (ret < 0) { 1080 if (ret < 0) {
1080 ret = -ret; 1081 ret = -ret;
1081 idev->stats.tx_errors++; 1082 ndev->stats.tx_errors++;
1082 if (ret & VLSI_TX_DROP) 1083 if (ret & VLSI_TX_DROP)
1083 idev->stats.tx_dropped++; 1084 ndev->stats.tx_dropped++;
1084 if (ret & VLSI_TX_FIFO) 1085 if (ret & VLSI_TX_FIFO)
1085 idev->stats.tx_fifo_errors++; 1086 ndev->stats.tx_fifo_errors++;
1086 } 1087 }
1087 else if (ret > 0){ 1088 else if (ret > 0){
1088 idev->stats.tx_packets++; 1089 ndev->stats.tx_packets++;
1089 idev->stats.tx_bytes += ret; 1090 ndev->stats.tx_bytes += ret;
1090 } 1091 }
1091 } 1092 }
1092 1093
@@ -1122,6 +1123,7 @@ static void vlsi_tx_interrupt(struct net_device *ndev)
1122 1123
1123static void vlsi_unarm_tx(vlsi_irda_dev_t *idev) 1124static void vlsi_unarm_tx(vlsi_irda_dev_t *idev)
1124{ 1125{
1126 struct net_device *ndev = pci_get_drvdata(idev->pdev);
1125 struct vlsi_ring *r = idev->tx_ring; 1127 struct vlsi_ring *r = idev->tx_ring;
1126 struct ring_descr *rd; 1128 struct ring_descr *rd;
1127 int ret; 1129 int ret;
@@ -1145,15 +1147,15 @@ static void vlsi_unarm_tx(vlsi_irda_dev_t *idev)
1145 1147
1146 if (ret < 0) { 1148 if (ret < 0) {
1147 ret = -ret; 1149 ret = -ret;
1148 idev->stats.tx_errors++; 1150 ndev->stats.tx_errors++;
1149 if (ret & VLSI_TX_DROP) 1151 if (ret & VLSI_TX_DROP)
1150 idev->stats.tx_dropped++; 1152 ndev->stats.tx_dropped++;
1151 if (ret & VLSI_TX_FIFO) 1153 if (ret & VLSI_TX_FIFO)
1152 idev->stats.tx_fifo_errors++; 1154 ndev->stats.tx_fifo_errors++;
1153 } 1155 }
1154 else if (ret > 0){ 1156 else if (ret > 0){
1155 idev->stats.tx_packets++; 1157 ndev->stats.tx_packets++;
1156 idev->stats.tx_bytes += ret; 1158 ndev->stats.tx_bytes += ret;
1157 } 1159 }
1158 } 1160 }
1159 1161
@@ -1373,13 +1375,6 @@ static int vlsi_stop_hw(vlsi_irda_dev_t *idev)
1373 1375
1374/**************************************************************/ 1376/**************************************************************/
1375 1377
1376static struct net_device_stats * vlsi_get_stats(struct net_device *ndev)
1377{
1378 vlsi_irda_dev_t *idev = netdev_priv(ndev);
1379
1380 return &idev->stats;
1381}
1382
1383static void vlsi_tx_timeout(struct net_device *ndev) 1378static void vlsi_tx_timeout(struct net_device *ndev)
1384{ 1379{
1385 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1380 vlsi_irda_dev_t *idev = netdev_priv(ndev);
@@ -1615,7 +1610,6 @@ static int vlsi_irda_init(struct net_device *ndev)
1615 1610
1616 ndev->open = vlsi_open; 1611 ndev->open = vlsi_open;
1617 ndev->stop = vlsi_close; 1612 ndev->stop = vlsi_close;
1618 ndev->get_stats = vlsi_get_stats;
1619 ndev->hard_start_xmit = vlsi_hard_start_xmit; 1613 ndev->hard_start_xmit = vlsi_hard_start_xmit;
1620 ndev->do_ioctl = vlsi_ioctl; 1614 ndev->do_ioctl = vlsi_ioctl;
1621 ndev->tx_timeout = vlsi_tx_timeout; 1615 ndev->tx_timeout = vlsi_tx_timeout;
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index 9b1884329fba..3050d1a0cccf 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -712,7 +712,6 @@ static inline struct ring_descr *ring_get(struct vlsi_ring *r)
712 712
713typedef struct vlsi_irda_dev { 713typedef struct vlsi_irda_dev {
714 struct pci_dev *pdev; 714 struct pci_dev *pdev;
715 struct net_device_stats stats;
716 715
717 struct irlap_cb *irlap; 716 struct irlap_cb *irlap;
718 717
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 30ec9131c5ce..dc0a2e4d830f 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -102,7 +102,6 @@ static int w83977af_is_receiving(struct w83977af_ir *self);
102static int w83977af_net_open(struct net_device *dev); 102static int w83977af_net_open(struct net_device *dev);
103static int w83977af_net_close(struct net_device *dev); 103static int w83977af_net_close(struct net_device *dev);
104static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 104static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
105static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev);
106 105
107/* 106/*
108 * Function w83977af_init () 107 * Function w83977af_init ()
@@ -237,7 +236,6 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
237 dev->open = w83977af_net_open; 236 dev->open = w83977af_net_open;
238 dev->stop = w83977af_net_close; 237 dev->stop = w83977af_net_close;
239 dev->do_ioctl = w83977af_net_ioctl; 238 dev->do_ioctl = w83977af_net_ioctl;
240 dev->get_stats = w83977af_net_get_stats;
241 239
242 err = register_netdev(dev); 240 err = register_netdev(dev);
243 if (err) { 241 if (err) {
@@ -702,13 +700,13 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
702 if (inb(iobase+AUDR) & AUDR_UNDR) { 700 if (inb(iobase+AUDR) & AUDR_UNDR) {
703 IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ ); 701 IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
704 702
705 self->stats.tx_errors++; 703 self->netdev->stats.tx_errors++;
706 self->stats.tx_fifo_errors++; 704 self->netdev->stats.tx_fifo_errors++;
707 705
708 /* Clear bit, by writing 1 to it */ 706 /* Clear bit, by writing 1 to it */
709 outb(AUDR_UNDR, iobase+AUDR); 707 outb(AUDR_UNDR, iobase+AUDR);
710 } else 708 } else
711 self->stats.tx_packets++; 709 self->netdev->stats.tx_packets++;
712 710
713 711
714 if (self->new_speed) { 712 if (self->new_speed) {
@@ -846,28 +844,28 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self)
846 if (status & FS_FO_ERR_MSK) { 844 if (status & FS_FO_ERR_MSK) {
847 if (status & FS_FO_LST_FR) { 845 if (status & FS_FO_LST_FR) {
848 /* Add number of lost frames to stats */ 846 /* Add number of lost frames to stats */
849 self->stats.rx_errors += len; 847 self->netdev->stats.rx_errors += len;
850 } else { 848 } else {
851 /* Skip frame */ 849 /* Skip frame */
852 self->stats.rx_errors++; 850 self->netdev->stats.rx_errors++;
853 851
854 self->rx_buff.data += len; 852 self->rx_buff.data += len;
855 853
856 if (status & FS_FO_MX_LEX) 854 if (status & FS_FO_MX_LEX)
857 self->stats.rx_length_errors++; 855 self->netdev->stats.rx_length_errors++;
858 856
859 if (status & FS_FO_PHY_ERR) 857 if (status & FS_FO_PHY_ERR)
860 self->stats.rx_frame_errors++; 858 self->netdev->stats.rx_frame_errors++;
861 859
862 if (status & FS_FO_CRC_ERR) 860 if (status & FS_FO_CRC_ERR)
863 self->stats.rx_crc_errors++; 861 self->netdev->stats.rx_crc_errors++;
864 } 862 }
865 /* The errors below can be reported in both cases */ 863 /* The errors below can be reported in both cases */
866 if (status & FS_FO_RX_OV) 864 if (status & FS_FO_RX_OV)
867 self->stats.rx_fifo_errors++; 865 self->netdev->stats.rx_fifo_errors++;
868 866
869 if (status & FS_FO_FSF_OV) 867 if (status & FS_FO_FSF_OV)
870 self->stats.rx_fifo_errors++; 868 self->netdev->stats.rx_fifo_errors++;
871 869
872 } else { 870 } else {
873 /* Check if we have transferred all data to memory */ 871 /* Check if we have transferred all data to memory */
@@ -917,7 +915,7 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self)
917 915
918 /* Move to next frame */ 916 /* Move to next frame */
919 self->rx_buff.data += len; 917 self->rx_buff.data += len;
920 self->stats.rx_packets++; 918 self->netdev->stats.rx_packets++;
921 919
922 skb->dev = self->netdev; 920 skb->dev = self->netdev;
923 skb_reset_mac_header(skb); 921 skb_reset_mac_header(skb);
@@ -951,7 +949,7 @@ static void w83977af_pio_receive(struct w83977af_ir *self)
951 /* Receive all characters in Rx FIFO */ 949 /* Receive all characters in Rx FIFO */
952 do { 950 do {
953 byte = inb(iobase+RBR); 951 byte = inb(iobase+RBR);
954 async_unwrap_char(self->netdev, &self->stats, &self->rx_buff, 952 async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
955 byte); 953 byte);
956 } while (inb(iobase+USR) & USR_RDR); /* Data available */ 954 } while (inb(iobase+USR) & USR_RDR); /* Data available */
957} 955}
@@ -994,7 +992,7 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
994 outb(AUDR_SFEND, iobase+AUDR); 992 outb(AUDR_SFEND, iobase+AUDR);
995 outb(set, iobase+SSR); 993 outb(set, iobase+SSR);
996 994
997 self->stats.tx_packets++; 995 self->netdev->stats.tx_packets++;
998 996
999 /* Feed me more packets */ 997 /* Feed me more packets */
1000 netif_wake_queue(self->netdev); 998 netif_wake_queue(self->netdev);
@@ -1336,13 +1334,6 @@ out:
1336 return ret; 1334 return ret;
1337} 1335}
1338 1336
1339static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev)
1340{
1341 struct w83977af_ir *self = netdev_priv(dev);
1342
1343 return &self->stats;
1344}
1345
1346MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>"); 1337MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
1347MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver"); 1338MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
1348MODULE_LICENSE("GPL"); 1339MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/w83977af_ir.h b/drivers/net/irda/w83977af_ir.h
index 87c3975baf62..fefe9b11e200 100644
--- a/drivers/net/irda/w83977af_ir.h
+++ b/drivers/net/irda/w83977af_ir.h
@@ -172,7 +172,6 @@ struct w83977af_ir {
172 int tx_len; /* Number of frames in tx_buff */ 172 int tx_len; /* Number of frames in tx_buff */
173 173
174 struct net_device *netdev; /* Yes! we are some kind of netdevice */ 174 struct net_device *netdev; /* Yes! we are some kind of netdevice */
175 struct net_device_stats stats;
176 175
177 struct irlap_cb *irlap; /* The link layer we are binded to */ 176 struct irlap_cb *irlap; /* The link layer we are binded to */
178 struct qos_info qos; /* QoS capabilities for this device */ 177 struct qos_info qos; /* QoS capabilities for this device */
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
index cfeef0f1bacc..c1bd040b9e05 100644
--- a/drivers/net/mlx4/en_params.c
+++ b/drivers/net/mlx4/en_params.c
@@ -399,8 +399,10 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
399 399
400 rx_size = roundup_pow_of_two(param->rx_pending); 400 rx_size = roundup_pow_of_two(param->rx_pending);
401 rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE); 401 rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
402 rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
402 tx_size = roundup_pow_of_two(param->tx_pending); 403 tx_size = roundup_pow_of_two(param->tx_pending);
403 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); 404 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
405 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
404 406
405 if (rx_size == priv->prof->rx_ring_size && 407 if (rx_size == priv->prof->rx_ring_size &&
406 tx_size == priv->prof->tx_ring_size) 408 tx_size == priv->prof->tx_ring_size)
@@ -440,8 +442,8 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
440 struct mlx4_en_dev *mdev = priv->mdev; 442 struct mlx4_en_dev *mdev = priv->mdev;
441 443
442 memset(param, 0, sizeof(*param)); 444 memset(param, 0, sizeof(*param));
443 param->rx_max_pending = mdev->dev->caps.max_rq_sg; 445 param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
444 param->tx_max_pending = mdev->dev->caps.max_sq_sg; 446 param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
445 param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size; 447 param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size;
446 param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size; 448 param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size;
447} 449}
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index ff4d75205c25..4afd5993e31c 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -203,19 +203,21 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
203 203
204 /* Optimize the common case when there are no wraparounds */ 204 /* Optimize the common case when there are no wraparounds */
205 if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { 205 if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
206 if (tx_info->linear) { 206 if (!tx_info->inl) {
207 pci_unmap_single(mdev->pdev, 207 if (tx_info->linear) {
208 (dma_addr_t) be64_to_cpu(data->addr), 208 pci_unmap_single(mdev->pdev,
209 (dma_addr_t) be64_to_cpu(data->addr),
209 be32_to_cpu(data->byte_count), 210 be32_to_cpu(data->byte_count),
210 PCI_DMA_TODEVICE); 211 PCI_DMA_TODEVICE);
211 ++data; 212 ++data;
212 } 213 }
213 214
214 for (i = 0; i < frags; i++) { 215 for (i = 0; i < frags; i++) {
215 frag = &skb_shinfo(skb)->frags[i]; 216 frag = &skb_shinfo(skb)->frags[i];
216 pci_unmap_page(mdev->pdev, 217 pci_unmap_page(mdev->pdev,
217 (dma_addr_t) be64_to_cpu(data[i].addr), 218 (dma_addr_t) be64_to_cpu(data[i].addr),
218 frag->size, PCI_DMA_TODEVICE); 219 frag->size, PCI_DMA_TODEVICE);
220 }
219 } 221 }
220 /* Stamp the freed descriptor */ 222 /* Stamp the freed descriptor */
221 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { 223 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
@@ -224,27 +226,29 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
224 } 226 }
225 227
226 } else { 228 } else {
227 if ((void *) data >= end) { 229 if (!tx_info->inl) {
228 data = (struct mlx4_wqe_data_seg *) 230 if ((void *) data >= end) {
229 (ring->buf + ((void *) data - end)); 231 data = (struct mlx4_wqe_data_seg *)
230 } 232 (ring->buf + ((void *) data - end));
233 }
231 234
232 if (tx_info->linear) { 235 if (tx_info->linear) {
233 pci_unmap_single(mdev->pdev, 236 pci_unmap_single(mdev->pdev,
234 (dma_addr_t) be64_to_cpu(data->addr), 237 (dma_addr_t) be64_to_cpu(data->addr),
235 be32_to_cpu(data->byte_count), 238 be32_to_cpu(data->byte_count),
236 PCI_DMA_TODEVICE); 239 PCI_DMA_TODEVICE);
237 ++data; 240 ++data;
238 } 241 }
239 242
240 for (i = 0; i < frags; i++) { 243 for (i = 0; i < frags; i++) {
241 /* Check for wraparound before unmapping */ 244 /* Check for wraparound before unmapping */
242 if ((void *) data >= end) 245 if ((void *) data >= end)
243 data = (struct mlx4_wqe_data_seg *) ring->buf; 246 data = (struct mlx4_wqe_data_seg *) ring->buf;
244 frag = &skb_shinfo(skb)->frags[i]; 247 frag = &skb_shinfo(skb)->frags[i];
245 pci_unmap_page(mdev->pdev, 248 pci_unmap_page(mdev->pdev,
246 (dma_addr_t) be64_to_cpu(data->addr), 249 (dma_addr_t) be64_to_cpu(data->addr),
247 frag->size, PCI_DMA_TODEVICE); 250 frag->size, PCI_DMA_TODEVICE);
251 }
248 } 252 }
249 /* Stamp the freed descriptor */ 253 /* Stamp the freed descriptor */
250 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { 254 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
@@ -790,8 +794,11 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
790 wmb(); 794 wmb();
791 data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size); 795 data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
792 } 796 }
793 } else 797 tx_info->inl = 0;
798 } else {
794 build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr); 799 build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
800 tx_info->inl = 1;
801 }
795 802
796 ring->prod += nr_txbb; 803 ring->prod += nr_txbb;
797 804
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 2e96c7b2180a..e9af32d41ca4 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -115,6 +115,10 @@ enum {
115}; 115};
116#define MLX4_EN_MAX_RX_FRAGS 4 116#define MLX4_EN_MAX_RX_FRAGS 4
117 117
118/* Maximum ring sizes */
119#define MLX4_EN_MAX_TX_SIZE 8192
120#define MLX4_EN_MAX_RX_SIZE 8192
121
118/* Minimum ring size for our page-allocation sceme to work */ 122/* Minimum ring size for our page-allocation sceme to work */
119#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES) 123#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
120#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE) 124#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
@@ -202,6 +206,7 @@ struct mlx4_en_tx_info {
202 u32 nr_txbb; 206 u32 nr_txbb;
203 u8 linear; 207 u8 linear;
204 u8 data_offset; 208 u8 data_offset;
209 u8 inl;
205}; 210};
206 211
207 212
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 478edb92bca3..c5dec54251bf 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -779,6 +779,22 @@ static void __devinit natsemi_init_media (struct net_device *dev)
779 779
780} 780}
781 781
782static const struct net_device_ops natsemi_netdev_ops = {
783 .ndo_open = netdev_open,
784 .ndo_stop = netdev_close,
785 .ndo_start_xmit = start_tx,
786 .ndo_get_stats = get_stats,
787 .ndo_set_multicast_list = set_rx_mode,
788 .ndo_change_mtu = natsemi_change_mtu,
789 .ndo_do_ioctl = netdev_ioctl,
790 .ndo_tx_timeout = ns_tx_timeout,
791 .ndo_set_mac_address = eth_mac_addr,
792 .ndo_validate_addr = eth_validate_addr,
793#ifdef CONFIG_NET_POLL_CONTROLLER
794 .ndo_poll_controller = natsemi_poll_controller,
795#endif
796};
797
782static int __devinit natsemi_probe1 (struct pci_dev *pdev, 798static int __devinit natsemi_probe1 (struct pci_dev *pdev,
783 const struct pci_device_id *ent) 799 const struct pci_device_id *ent)
784{ 800{
@@ -911,20 +927,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
911 if (find_cnt < MAX_UNITS && full_duplex[find_cnt]) 927 if (find_cnt < MAX_UNITS && full_duplex[find_cnt])
912 np->full_duplex = 1; 928 np->full_duplex = 1;
913 929
914 /* The chip-specific entries in the device structure. */ 930 dev->netdev_ops = &natsemi_netdev_ops;
915 dev->open = &netdev_open;
916 dev->hard_start_xmit = &start_tx;
917 dev->stop = &netdev_close;
918 dev->get_stats = &get_stats;
919 dev->set_multicast_list = &set_rx_mode;
920 dev->change_mtu = &natsemi_change_mtu;
921 dev->do_ioctl = &netdev_ioctl;
922 dev->tx_timeout = &ns_tx_timeout;
923 dev->watchdog_timeo = TX_TIMEOUT; 931 dev->watchdog_timeo = TX_TIMEOUT;
924 932
925#ifdef CONFIG_NET_POLL_CONTROLLER
926 dev->poll_controller = &natsemi_poll_controller;
927#endif
928 SET_ETHTOOL_OPS(dev, &ethtool_ops); 933 SET_ETHTOOL_OPS(dev, &ethtool_ops);
929 934
930 if (mtu) 935 if (mtu)
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 46b0772489e4..42021aca1ddd 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1957,6 +1957,9 @@ static const struct net_device_ops netdev_ops = {
1957 .ndo_set_multicast_list = ns83820_set_multicast, 1957 .ndo_set_multicast_list = ns83820_set_multicast,
1958 .ndo_validate_addr = eth_validate_addr, 1958 .ndo_validate_addr = eth_validate_addr,
1959 .ndo_tx_timeout = ns83820_tx_timeout, 1959 .ndo_tx_timeout = ns83820_tx_timeout,
1960#ifdef NS83820_VLAN_ACCEL_SUPPORT
1961 .ndo_vlan_rx_register = ns83820_vlan_rx_register,
1962#endif
1960}; 1963};
1961 1964
1962static int __devinit ns83820_init_one(struct pci_dev *pci_dev, 1965static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
@@ -2216,7 +2219,6 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
2216#ifdef NS83820_VLAN_ACCEL_SUPPORT 2219#ifdef NS83820_VLAN_ACCEL_SUPPORT
2217 /* We also support hardware vlan acceleration */ 2220 /* We also support hardware vlan acceleration */
2218 ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 2221 ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2219 ndev->vlan_rx_register = ns83820_vlan_rx_register;
2220#endif 2222#endif
2221 2223
2222 if (using_dac) { 2224 if (using_dac) {
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 044b7b07f5f4..665a4286da39 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1568,6 +1568,22 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1568 return err; 1568 return err;
1569} 1569}
1570 1570
1571static const struct net_device_ops pcnet32_netdev_ops = {
1572 .ndo_open = pcnet32_open,
1573 .ndo_stop = pcnet32_close,
1574 .ndo_start_xmit = pcnet32_start_xmit,
1575 .ndo_tx_timeout = pcnet32_tx_timeout,
1576 .ndo_get_stats = pcnet32_get_stats,
1577 .ndo_set_multicast_list = pcnet32_set_multicast_list,
1578 .ndo_do_ioctl = pcnet32_ioctl,
1579 .ndo_change_mtu = eth_change_mtu,
1580 .ndo_set_mac_address = eth_mac_addr,
1581 .ndo_validate_addr = eth_validate_addr,
1582#ifdef CONFIG_NET_POLL_CONTROLLER
1583 .ndo_poll_controller = pcnet32_poll_controller,
1584#endif
1585};
1586
1571/* pcnet32_probe1 1587/* pcnet32_probe1
1572 * Called from both pcnet32_probe_vlbus and pcnet_probe_pci. 1588 * Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
1573 * pdev will be NULL when called from pcnet32_probe_vlbus. 1589 * pdev will be NULL when called from pcnet32_probe_vlbus.
@@ -1934,20 +1950,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1934 lp->watchdog_timer.function = (void *)&pcnet32_watchdog; 1950 lp->watchdog_timer.function = (void *)&pcnet32_watchdog;
1935 1951
1936 /* The PCNET32-specific entries in the device structure. */ 1952 /* The PCNET32-specific entries in the device structure. */
1937 dev->open = &pcnet32_open; 1953 dev->netdev_ops = &pcnet32_netdev_ops;
1938 dev->hard_start_xmit = &pcnet32_start_xmit;
1939 dev->stop = &pcnet32_close;
1940 dev->get_stats = &pcnet32_get_stats;
1941 dev->set_multicast_list = &pcnet32_set_multicast_list;
1942 dev->do_ioctl = &pcnet32_ioctl;
1943 dev->ethtool_ops = &pcnet32_ethtool_ops; 1954 dev->ethtool_ops = &pcnet32_ethtool_ops;
1944 dev->tx_timeout = pcnet32_tx_timeout;
1945 dev->watchdog_timeo = (5 * HZ); 1955 dev->watchdog_timeo = (5 * HZ);
1946 1956
1947#ifdef CONFIG_NET_POLL_CONTROLLER
1948 dev->poll_controller = pcnet32_poll_controller;
1949#endif
1950
1951 /* Fill in the generic fields of the device structure. */ 1957 /* Fill in the generic fields of the device structure. */
1952 if (register_netdev(dev)) 1958 if (register_netdev(dev))
1953 goto err_free_ring; 1959 goto err_free_ring;
@@ -2276,7 +2282,7 @@ static int pcnet32_open(struct net_device *dev)
2276 if (lp->chip_version >= PCNET32_79C970A) { 2282 if (lp->chip_version >= PCNET32_79C970A) {
2277 /* Print the link status and start the watchdog */ 2283 /* Print the link status and start the watchdog */
2278 pcnet32_check_media(dev, 1); 2284 pcnet32_check_media(dev, 1);
2279 mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); 2285 mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT);
2280 } 2286 }
2281 2287
2282 i = 0; 2288 i = 0;
@@ -2911,7 +2917,7 @@ static void pcnet32_watchdog(struct net_device *dev)
2911 pcnet32_check_media(dev, 0); 2917 pcnet32_check_media(dev, 0);
2912 spin_unlock_irqrestore(&lp->lock, flags); 2918 spin_unlock_irqrestore(&lp->lock, flags);
2913 2919
2914 mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); 2920 mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT));
2915} 2921}
2916 2922
2917static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state) 2923static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 0c46d603b8fe..0be0f0b164f3 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -265,6 +265,13 @@ static const struct header_ops plip_header_ops = {
265 .cache = plip_hard_header_cache, 265 .cache = plip_hard_header_cache,
266}; 266};
267 267
268static const struct net_device_ops plip_netdev_ops = {
269 .ndo_open = plip_open,
270 .ndo_stop = plip_close,
271 .ndo_start_xmit = plip_tx_packet,
272 .ndo_do_ioctl = plip_ioctl,
273};
274
268/* Entry point of PLIP driver. 275/* Entry point of PLIP driver.
269 Probe the hardware, and register/initialize the driver. 276 Probe the hardware, and register/initialize the driver.
270 277
@@ -280,15 +287,11 @@ plip_init_netdev(struct net_device *dev)
280 struct net_local *nl = netdev_priv(dev); 287 struct net_local *nl = netdev_priv(dev);
281 288
282 /* Then, override parts of it */ 289 /* Then, override parts of it */
283 dev->hard_start_xmit = plip_tx_packet;
284 dev->open = plip_open;
285 dev->stop = plip_close;
286 dev->do_ioctl = plip_ioctl;
287
288 dev->tx_queue_len = 10; 290 dev->tx_queue_len = 10;
289 dev->flags = IFF_POINTOPOINT|IFF_NOARP; 291 dev->flags = IFF_POINTOPOINT|IFF_NOARP;
290 memset(dev->dev_addr, 0xfc, ETH_ALEN); 292 memset(dev->dev_addr, 0xfc, ETH_ALEN);
291 293
294 dev->netdev_ops = &plip_netdev_ops;
292 dev->header_ops = &plip_header_ops; 295 dev->header_ops = &plip_header_ops;
293 296
294 297
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 53bbddfc8c95..cf3a082bc89d 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -49,8 +49,8 @@
49#include <asm/processor.h> 49#include <asm/processor.h>
50 50
51#define DRV_NAME "r6040" 51#define DRV_NAME "r6040"
52#define DRV_VERSION "0.19" 52#define DRV_VERSION "0.20"
53#define DRV_RELDATE "18Dec2008" 53#define DRV_RELDATE "07Jan2009"
54 54
55/* PHY CHIP Address */ 55/* PHY CHIP Address */
56#define PHY1_ADDR 1 /* For MAC1 */ 56#define PHY1_ADDR 1 /* For MAC1 */
@@ -200,7 +200,7 @@ struct r6040_private {
200 200
201static char version[] __devinitdata = KERN_INFO DRV_NAME 201static char version[] __devinitdata = KERN_INFO DRV_NAME
202 ": RDC R6040 NAPI net driver," 202 ": RDC R6040 NAPI net driver,"
203 "version "DRV_VERSION " (" DRV_RELDATE ")\n"; 203 "version "DRV_VERSION " (" DRV_RELDATE ")";
204 204
205static int phy_table[] = { PHY1_ADDR, PHY2_ADDR }; 205static int phy_table[] = { PHY1_ADDR, PHY2_ADDR };
206 206
@@ -330,7 +330,7 @@ static int r6040_alloc_rxbufs(struct net_device *dev)
330 do { 330 do {
331 skb = netdev_alloc_skb(dev, MAX_BUF_SIZE); 331 skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
332 if (!skb) { 332 if (!skb) {
333 printk(KERN_ERR "%s: failed to alloc skb for rx\n", dev->name); 333 printk(KERN_ERR DRV_NAME "%s: failed to alloc skb for rx\n", dev->name);
334 rc = -ENOMEM; 334 rc = -ENOMEM;
335 goto err_exit; 335 goto err_exit;
336 } 336 }
@@ -1077,20 +1077,20 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1077 /* this should always be supported */ 1077 /* this should always be supported */
1078 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 1078 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1079 if (err) { 1079 if (err) {
1080 printk(KERN_ERR DRV_NAME "32-bit PCI DMA addresses" 1080 printk(KERN_ERR DRV_NAME ": 32-bit PCI DMA addresses"
1081 "not supported by the card\n"); 1081 "not supported by the card\n");
1082 goto err_out; 1082 goto err_out;
1083 } 1083 }
1084 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 1084 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1085 if (err) { 1085 if (err) {
1086 printk(KERN_ERR DRV_NAME "32-bit PCI DMA addresses" 1086 printk(KERN_ERR DRV_NAME ": 32-bit PCI DMA addresses"
1087 "not supported by the card\n"); 1087 "not supported by the card\n");
1088 goto err_out; 1088 goto err_out;
1089 } 1089 }
1090 1090
1091 /* IO Size check */ 1091 /* IO Size check */
1092 if (pci_resource_len(pdev, 0) < io_size) { 1092 if (pci_resource_len(pdev, 0) < io_size) {
1093 printk(KERN_ERR DRV_NAME "Insufficient PCI resources, aborting\n"); 1093 printk(KERN_ERR DRV_NAME ": Insufficient PCI resources, aborting\n");
1094 err = -EIO; 1094 err = -EIO;
1095 goto err_out; 1095 goto err_out;
1096 } 1096 }
@@ -1100,7 +1100,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1100 1100
1101 dev = alloc_etherdev(sizeof(struct r6040_private)); 1101 dev = alloc_etherdev(sizeof(struct r6040_private));
1102 if (!dev) { 1102 if (!dev) {
1103 printk(KERN_ERR DRV_NAME "Failed to allocate etherdev\n"); 1103 printk(KERN_ERR DRV_NAME ": Failed to allocate etherdev\n");
1104 err = -ENOMEM; 1104 err = -ENOMEM;
1105 goto err_out; 1105 goto err_out;
1106 } 1106 }
@@ -1116,11 +1116,15 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1116 1116
1117 ioaddr = pci_iomap(pdev, bar, io_size); 1117 ioaddr = pci_iomap(pdev, bar, io_size);
1118 if (!ioaddr) { 1118 if (!ioaddr) {
1119 printk(KERN_ERR "ioremap failed for device %s\n", 1119 printk(KERN_ERR DRV_NAME ": ioremap failed for device %s\n",
1120 pci_name(pdev)); 1120 pci_name(pdev));
1121 err = -EIO; 1121 err = -EIO;
1122 goto err_out_free_res; 1122 goto err_out_free_res;
1123 } 1123 }
1124 /* If PHY status change register is still set to zero it means the
1125 * bootloader didn't initialize it */
1126 if (ioread16(ioaddr + PHY_CC) == 0)
1127 iowrite16(0x9f07, ioaddr + PHY_CC);
1124 1128
1125 /* Init system & device */ 1129 /* Init system & device */
1126 lp->base = ioaddr; 1130 lp->base = ioaddr;
@@ -1137,6 +1141,11 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1137 adrp[1] = ioread16(ioaddr + MID_0M); 1141 adrp[1] = ioread16(ioaddr + MID_0M);
1138 adrp[2] = ioread16(ioaddr + MID_0H); 1142 adrp[2] = ioread16(ioaddr + MID_0H);
1139 1143
1144 /* Some bootloader/BIOSes do not initialize
1145 * MAC address, warn about that */
1146 if (!(adrp[0] || adrp[1] || adrp[2]))
1147 printk(KERN_WARNING DRV_NAME ": MAC address not initialized\n");
1148
1140 /* Link new device into r6040_root_dev */ 1149 /* Link new device into r6040_root_dev */
1141 lp->pdev = pdev; 1150 lp->pdev = pdev;
1142 lp->dev = dev; 1151 lp->dev = dev;
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index be3025310e90..fc0e38bddeeb 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -134,6 +134,16 @@ static const struct pnp_device_id sb1000_pnp_ids[] = {
134}; 134};
135MODULE_DEVICE_TABLE(pnp, sb1000_pnp_ids); 135MODULE_DEVICE_TABLE(pnp, sb1000_pnp_ids);
136 136
137static const struct net_device_ops sb1000_netdev_ops = {
138 .ndo_open = sb1000_open,
139 .ndo_start_xmit = sb1000_start_xmit,
140 .ndo_do_ioctl = sb1000_dev_ioctl,
141 .ndo_stop = sb1000_close,
142 .ndo_change_mtu = eth_change_mtu,
143 .ndo_set_mac_address = eth_mac_addr,
144 .ndo_validate_addr = eth_validate_addr,
145};
146
137static int 147static int
138sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id) 148sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id)
139{ 149{
@@ -192,11 +202,7 @@ sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id)
192 if (sb1000_debug > 0) 202 if (sb1000_debug > 0)
193 printk(KERN_NOTICE "%s", version); 203 printk(KERN_NOTICE "%s", version);
194 204
195 /* The SB1000-specific entries in the device structure. */ 205 dev->netdev_ops = &sb1000_netdev_ops;
196 dev->open = sb1000_open;
197 dev->do_ioctl = sb1000_dev_ioctl;
198 dev->hard_start_xmit = sb1000_start_xmit;
199 dev->stop = sb1000_close;
200 206
201 /* hardware address is 0:0:serial_number */ 207 /* hardware address is 0:0:serial_number */
202 dev->dev_addr[2] = serial_number >> 24 & 0xff; 208 dev->dev_addr[2] = serial_number >> 24 & 0xff;
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 83cc3c5f7946..a9732686134b 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1782,6 +1782,21 @@ static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1782 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL); 1782 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1783} 1783}
1784 1784
1785static const struct net_device_ops sis190_netdev_ops = {
1786 .ndo_open = sis190_open,
1787 .ndo_stop = sis190_close,
1788 .ndo_do_ioctl = sis190_ioctl,
1789 .ndo_start_xmit = sis190_start_xmit,
1790 .ndo_tx_timeout = sis190_tx_timeout,
1791 .ndo_set_multicast_list = sis190_set_rx_mode,
1792 .ndo_change_mtu = eth_change_mtu,
1793 .ndo_set_mac_address = eth_mac_addr,
1794 .ndo_validate_addr = eth_validate_addr,
1795#ifdef CONFIG_NET_POLL_CONTROLLER
1796 .ndo_poll_controller = sis190_netpoll,
1797#endif
1798};
1799
1785static int __devinit sis190_init_one(struct pci_dev *pdev, 1800static int __devinit sis190_init_one(struct pci_dev *pdev,
1786 const struct pci_device_id *ent) 1801 const struct pci_device_id *ent)
1787{ 1802{
@@ -1815,19 +1830,12 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
1815 1830
1816 INIT_WORK(&tp->phy_task, sis190_phy_task); 1831 INIT_WORK(&tp->phy_task, sis190_phy_task);
1817 1832
1818 dev->open = sis190_open; 1833 dev->netdev_ops = &sis190_netdev_ops;
1819 dev->stop = sis190_close; 1834
1820 dev->do_ioctl = sis190_ioctl;
1821 dev->tx_timeout = sis190_tx_timeout;
1822 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1823 dev->hard_start_xmit = sis190_start_xmit;
1824#ifdef CONFIG_NET_POLL_CONTROLLER
1825 dev->poll_controller = sis190_netpoll;
1826#endif
1827 dev->set_multicast_list = sis190_set_rx_mode;
1828 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops); 1835 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1829 dev->irq = pdev->irq; 1836 dev->irq = pdev->irq;
1830 dev->base_addr = (unsigned long) 0xdead; 1837 dev->base_addr = (unsigned long) 0xdead;
1838 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1831 1839
1832 spin_lock_init(&tp->lock); 1840 spin_lock_init(&tp->lock);
1833 1841
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 8e1c0baf6958..5c61d5fad908 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -603,7 +603,6 @@ static int sl_init(struct net_device *dev)
603 dev->mtu = sl->mtu; 603 dev->mtu = sl->mtu;
604 dev->type = ARPHRD_SLIP + sl->mode; 604 dev->type = ARPHRD_SLIP + sl->mode;
605#ifdef SL_CHECK_TRANSMIT 605#ifdef SL_CHECK_TRANSMIT
606 dev->tx_timeout = sl_tx_timeout;
607 dev->watchdog_timeo = 20*HZ; 606 dev->watchdog_timeo = 20*HZ;
608#endif 607#endif
609 return 0; 608 return 0;
@@ -617,19 +616,26 @@ static void sl_uninit(struct net_device *dev)
617 sl_free_bufs(sl); 616 sl_free_bufs(sl);
618} 617}
619 618
619static const struct net_device_ops sl_netdev_ops = {
620 .ndo_init = sl_init,
621 .ndo_uninit = sl_uninit,
622 .ndo_open = sl_open,
623 .ndo_stop = sl_close,
624 .ndo_start_xmit = sl_xmit,
625 .ndo_get_stats = sl_get_stats,
626 .ndo_change_mtu = sl_change_mtu,
627 .ndo_tx_timeout = sl_tx_timeout,
628#ifdef CONFIG_SLIP_SMART
629 .ndo_do_ioctl = sl_ioctl,
630#endif
631};
632
633
620static void sl_setup(struct net_device *dev) 634static void sl_setup(struct net_device *dev)
621{ 635{
622 dev->init = sl_init; 636 dev->netdev_ops = &sl_netdev_ops;
623 dev->uninit = sl_uninit;
624 dev->open = sl_open;
625 dev->destructor = free_netdev; 637 dev->destructor = free_netdev;
626 dev->stop = sl_close; 638
627 dev->get_stats = sl_get_stats;
628 dev->change_mtu = sl_change_mtu;
629 dev->hard_start_xmit = sl_xmit;
630#ifdef CONFIG_SLIP_SMART
631 dev->do_ioctl = sl_ioctl;
632#endif
633 dev->hard_header_len = 0; 639 dev->hard_header_len = 0;
634 dev->addr_len = 0; 640 dev->addr_len = 0;
635 dev->tx_queue_len = 10; 641 dev->tx_queue_len = 10;
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 57fb1f71c47b..da3a76b18eff 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -648,6 +648,24 @@ static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
648#endif /* VLAN_SUPPORT */ 648#endif /* VLAN_SUPPORT */
649 649
650 650
651static const struct net_device_ops netdev_ops = {
652 .ndo_open = netdev_open,
653 .ndo_stop = netdev_close,
654 .ndo_start_xmit = start_tx,
655 .ndo_tx_timeout = tx_timeout,
656 .ndo_get_stats = get_stats,
657 .ndo_set_multicast_list = &set_rx_mode,
658 .ndo_do_ioctl = netdev_ioctl,
659 .ndo_change_mtu = eth_change_mtu,
660 .ndo_set_mac_address = eth_mac_addr,
661 .ndo_validate_addr = eth_validate_addr,
662#ifdef VLAN_SUPPORT
663 .ndo_vlan_rx_register = netdev_vlan_rx_register,
664 .ndo_vlan_rx_add_vid = netdev_vlan_rx_add_vid,
665 .ndo_vlan_rx_kill_vid = netdev_vlan_rx_kill_vid,
666#endif
667};
668
651static int __devinit starfire_init_one(struct pci_dev *pdev, 669static int __devinit starfire_init_one(struct pci_dev *pdev,
652 const struct pci_device_id *ent) 670 const struct pci_device_id *ent)
653{ 671{
@@ -710,11 +728,9 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
710 if (enable_hw_cksum) 728 if (enable_hw_cksum)
711 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 729 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
712#endif /* ZEROCOPY */ 730#endif /* ZEROCOPY */
731
713#ifdef VLAN_SUPPORT 732#ifdef VLAN_SUPPORT
714 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; 733 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
715 dev->vlan_rx_register = netdev_vlan_rx_register;
716 dev->vlan_rx_add_vid = netdev_vlan_rx_add_vid;
717 dev->vlan_rx_kill_vid = netdev_vlan_rx_kill_vid;
718#endif /* VLAN_RX_KILL_VID */ 734#endif /* VLAN_RX_KILL_VID */
719#ifdef ADDR_64BITS 735#ifdef ADDR_64BITS
720 dev->features |= NETIF_F_HIGHDMA; 736 dev->features |= NETIF_F_HIGHDMA;
@@ -810,18 +826,12 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
810 } 826 }
811 } 827 }
812 828
813 /* The chip-specific entries in the device structure. */ 829 dev->netdev_ops = &netdev_ops;
814 dev->open = &netdev_open;
815 dev->hard_start_xmit = &start_tx;
816 dev->tx_timeout = tx_timeout;
817 dev->watchdog_timeo = TX_TIMEOUT; 830 dev->watchdog_timeo = TX_TIMEOUT;
818 netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
819 dev->stop = &netdev_close;
820 dev->get_stats = &get_stats;
821 dev->set_multicast_list = &set_rx_mode;
822 dev->do_ioctl = &netdev_ioctl;
823 SET_ETHTOOL_OPS(dev, &ethtool_ops); 831 SET_ETHTOOL_OPS(dev, &ethtool_ops);
824 832
833 netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
834
825 if (mtu) 835 if (mtu)
826 dev->mtu = mtu; 836 dev->mtu = mtu;
827 837
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 698893b92003..feaf0e0577d7 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -449,6 +449,19 @@ static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
449 } 449 }
450} 450}
451 451
452static const struct net_device_ops netdev_ops = {
453 .ndo_open = netdev_open,
454 .ndo_stop = netdev_close,
455 .ndo_start_xmit = start_tx,
456 .ndo_get_stats = get_stats,
457 .ndo_set_multicast_list = set_rx_mode,
458 .ndo_do_ioctl = netdev_ioctl,
459 .ndo_tx_timeout = tx_timeout,
460 .ndo_change_mtu = change_mtu,
461 .ndo_set_mac_address = eth_mac_addr,
462 .ndo_validate_addr = eth_validate_addr,
463};
464
452static int __devinit sundance_probe1 (struct pci_dev *pdev, 465static int __devinit sundance_probe1 (struct pci_dev *pdev,
453 const struct pci_device_id *ent) 466 const struct pci_device_id *ent)
454{ 467{
@@ -530,16 +543,10 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
530 np->mii_if.reg_num_mask = 0x1f; 543 np->mii_if.reg_num_mask = 0x1f;
531 544
532 /* The chip-specific entries in the device structure. */ 545 /* The chip-specific entries in the device structure. */
533 dev->open = &netdev_open; 546 dev->netdev_ops = &netdev_ops;
534 dev->hard_start_xmit = &start_tx;
535 dev->stop = &netdev_close;
536 dev->get_stats = &get_stats;
537 dev->set_multicast_list = &set_rx_mode;
538 dev->do_ioctl = &netdev_ioctl;
539 SET_ETHTOOL_OPS(dev, &ethtool_ops); 547 SET_ETHTOOL_OPS(dev, &ethtool_ops);
540 dev->tx_timeout = &tx_timeout;
541 dev->watchdog_timeo = TX_TIMEOUT; 548 dev->watchdog_timeo = TX_TIMEOUT;
542 dev->change_mtu = &change_mtu; 549
543 pci_set_drvdata(pdev, dev); 550 pci_set_drvdata(pdev, dev);
544 551
545 i = register_netdev(dev); 552 i = register_netdev(dev);
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 8a7460412482..86c765d83de1 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -2989,6 +2989,19 @@ static void gem_remove_one(struct pci_dev *pdev)
2989 } 2989 }
2990} 2990}
2991 2991
2992static const struct net_device_ops gem_netdev_ops = {
2993 .ndo_open = gem_open,
2994 .ndo_stop = gem_close,
2995 .ndo_start_xmit = gem_start_xmit,
2996 .ndo_get_stats = gem_get_stats,
2997 .ndo_set_multicast_list = gem_set_multicast,
2998 .ndo_do_ioctl = gem_ioctl,
2999 .ndo_tx_timeout = gem_tx_timeout,
3000 .ndo_change_mtu = gem_change_mtu,
3001 .ndo_set_mac_address = eth_mac_addr,
3002 .ndo_validate_addr = eth_validate_addr,
3003};
3004
2992static int __devinit gem_init_one(struct pci_dev *pdev, 3005static int __devinit gem_init_one(struct pci_dev *pdev,
2993 const struct pci_device_id *ent) 3006 const struct pci_device_id *ent)
2994{ 3007{
@@ -3142,17 +3155,10 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3142 if (gem_get_device_address(gp)) 3155 if (gem_get_device_address(gp))
3143 goto err_out_free_consistent; 3156 goto err_out_free_consistent;
3144 3157
3145 dev->open = gem_open; 3158 dev->netdev_ops = &gem_netdev_ops;
3146 dev->stop = gem_close;
3147 dev->hard_start_xmit = gem_start_xmit;
3148 dev->get_stats = gem_get_stats;
3149 dev->set_multicast_list = gem_set_multicast;
3150 dev->do_ioctl = gem_ioctl;
3151 netif_napi_add(dev, &gp->napi, gem_poll, 64); 3159 netif_napi_add(dev, &gp->napi, gem_poll, 64);
3152 dev->ethtool_ops = &gem_ethtool_ops; 3160 dev->ethtool_ops = &gem_ethtool_ops;
3153 dev->tx_timeout = gem_tx_timeout;
3154 dev->watchdog_timeo = 5 * HZ; 3161 dev->watchdog_timeo = 5 * HZ;
3155 dev->change_mtu = gem_change_mtu;
3156 dev->irq = pdev->irq; 3162 dev->irq = pdev->irq;
3157 dev->dma = 0; 3163 dev->dma = 0;
3158 dev->set_mac_address = gem_set_mac_address; 3164 dev->set_mac_address = gem_set_mac_address;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index b22d3355fb45..7a72a3112f0a 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -2607,6 +2607,18 @@ static struct quattro * __devinit quattro_pci_find(struct pci_dev *pdev)
2607} 2607}
2608#endif /* CONFIG_PCI */ 2608#endif /* CONFIG_PCI */
2609 2609
2610static const struct net_device_ops hme_netdev_ops = {
2611 .ndo_open = happy_meal_open,
2612 .ndo_stop = happy_meal_close,
2613 .ndo_start_xmit = happy_meal_start_xmit,
2614 .ndo_tx_timeout = happy_meal_tx_timeout,
2615 .ndo_get_stats = happy_meal_get_stats,
2616 .ndo_set_multicast_list = happy_meal_set_multicast,
2617 .ndo_change_mtu = eth_change_mtu,
2618 .ndo_set_mac_address = eth_mac_addr,
2619 .ndo_validate_addr = eth_validate_addr,
2620};
2621
2610#ifdef CONFIG_SBUS 2622#ifdef CONFIG_SBUS
2611static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe) 2623static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
2612{ 2624{
@@ -2750,12 +2762,7 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
2750 init_timer(&hp->happy_timer); 2762 init_timer(&hp->happy_timer);
2751 2763
2752 hp->dev = dev; 2764 hp->dev = dev;
2753 dev->open = &happy_meal_open; 2765 dev->netdev_ops = &hme_netdev_ops;
2754 dev->stop = &happy_meal_close;
2755 dev->hard_start_xmit = &happy_meal_start_xmit;
2756 dev->get_stats = &happy_meal_get_stats;
2757 dev->set_multicast_list = &happy_meal_set_multicast;
2758 dev->tx_timeout = &happy_meal_tx_timeout;
2759 dev->watchdog_timeo = 5*HZ; 2766 dev->watchdog_timeo = 5*HZ;
2760 dev->ethtool_ops = &hme_ethtool_ops; 2767 dev->ethtool_ops = &hme_ethtool_ops;
2761 2768
@@ -3076,12 +3083,7 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
3076 init_timer(&hp->happy_timer); 3083 init_timer(&hp->happy_timer);
3077 3084
3078 hp->dev = dev; 3085 hp->dev = dev;
3079 dev->open = &happy_meal_open; 3086 dev->netdev_ops = &hme_netdev_ops;
3080 dev->stop = &happy_meal_close;
3081 dev->hard_start_xmit = &happy_meal_start_xmit;
3082 dev->get_stats = &happy_meal_get_stats;
3083 dev->set_multicast_list = &happy_meal_set_multicast;
3084 dev->tx_timeout = &happy_meal_tx_timeout;
3085 dev->watchdog_timeo = 5*HZ; 3087 dev->watchdog_timeo = 5*HZ;
3086 dev->ethtool_ops = &hme_ethtool_ops; 3088 dev->ethtool_ops = &hme_ethtool_ops;
3087 dev->irq = pdev->irq; 3089 dev->irq = pdev->irq;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 85ef8b744557..68b967b585aa 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -831,6 +831,21 @@ static void TLan_Poll(struct net_device *dev)
831} 831}
832#endif 832#endif
833 833
834static const struct net_device_ops TLan_netdev_ops = {
835 .ndo_open = TLan_Open,
836 .ndo_stop = TLan_Close,
837 .ndo_start_xmit = TLan_StartTx,
838 .ndo_tx_timeout = TLan_tx_timeout,
839 .ndo_get_stats = TLan_GetStats,
840 .ndo_set_multicast_list = TLan_SetMulticastList,
841 .ndo_do_ioctl = TLan_ioctl,
842 .ndo_change_mtu = eth_change_mtu,
843 .ndo_set_mac_address = eth_mac_addr,
844 .ndo_validate_addr = eth_validate_addr,
845#ifdef CONFIG_NET_POLL_CONTROLLER
846 .ndo_poll_controller = TLan_Poll,
847#endif
848};
834 849
835 850
836 851
@@ -892,16 +907,7 @@ static int TLan_Init( struct net_device *dev )
892 netif_carrier_off(dev); 907 netif_carrier_off(dev);
893 908
894 /* Device methods */ 909 /* Device methods */
895 dev->open = &TLan_Open; 910 dev->netdev_ops = &TLan_netdev_ops;
896 dev->hard_start_xmit = &TLan_StartTx;
897 dev->stop = &TLan_Close;
898 dev->get_stats = &TLan_GetStats;
899 dev->set_multicast_list = &TLan_SetMulticastList;
900 dev->do_ioctl = &TLan_ioctl;
901#ifdef CONFIG_NET_POLL_CONTROLLER
902 dev->poll_controller = &TLan_Poll;
903#endif
904 dev->tx_timeout = &TLan_tx_timeout;
905 dev->watchdog_timeo = TX_TIMEOUT; 911 dev->watchdog_timeo = TX_TIMEOUT;
906 912
907 return 0; 913 return 0;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 5166be930a52..d5d53b633cf8 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -1922,6 +1922,18 @@ bad_srom:
1922 goto fill_defaults; 1922 goto fill_defaults;
1923} 1923}
1924 1924
1925static const struct net_device_ops de_netdev_ops = {
1926 .ndo_open = de_open,
1927 .ndo_stop = de_close,
1928 .ndo_set_multicast_list = de_set_rx_mode,
1929 .ndo_start_xmit = de_start_xmit,
1930 .ndo_get_stats = de_get_stats,
1931 .ndo_tx_timeout = de_tx_timeout,
1932 .ndo_change_mtu = eth_change_mtu,
1933 .ndo_set_mac_address = eth_mac_addr,
1934 .ndo_validate_addr = eth_validate_addr,
1935};
1936
1925static int __devinit de_init_one (struct pci_dev *pdev, 1937static int __devinit de_init_one (struct pci_dev *pdev,
1926 const struct pci_device_id *ent) 1938 const struct pci_device_id *ent)
1927{ 1939{
@@ -1944,14 +1956,9 @@ static int __devinit de_init_one (struct pci_dev *pdev,
1944 if (!dev) 1956 if (!dev)
1945 return -ENOMEM; 1957 return -ENOMEM;
1946 1958
1959 dev->netdev_ops = &de_netdev_ops;
1947 SET_NETDEV_DEV(dev, &pdev->dev); 1960 SET_NETDEV_DEV(dev, &pdev->dev);
1948 dev->open = de_open;
1949 dev->stop = de_close;
1950 dev->set_multicast_list = de_set_rx_mode;
1951 dev->hard_start_xmit = de_start_xmit;
1952 dev->get_stats = de_get_stats;
1953 dev->ethtool_ops = &de_ethtool_ops; 1961 dev->ethtool_ops = &de_ethtool_ops;
1954 dev->tx_timeout = de_tx_timeout;
1955 dev->watchdog_timeo = TX_TIMEOUT; 1962 dev->watchdog_timeo = TX_TIMEOUT;
1956 1963
1957 de = netdev_priv(dev); 1964 de = netdev_priv(dev);
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 67bfd6f43366..6418f74415d7 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1077,6 +1077,18 @@ static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
1077 mdelay(2); /* Wait for 2ms */\ 1077 mdelay(2); /* Wait for 2ms */\
1078} 1078}
1079 1079
1080static const struct net_device_ops de4x5_netdev_ops = {
1081 .ndo_open = de4x5_open,
1082 .ndo_stop = de4x5_close,
1083 .ndo_start_xmit = de4x5_queue_pkt,
1084 .ndo_get_stats = de4x5_get_stats,
1085 .ndo_set_multicast_list = set_multicast_list,
1086 .ndo_do_ioctl = de4x5_ioctl,
1087 .ndo_change_mtu = eth_change_mtu,
1088 .ndo_set_mac_address= eth_mac_addr,
1089 .ndo_validate_addr = eth_validate_addr,
1090};
1091
1080 1092
1081static int __devinit 1093static int __devinit
1082de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) 1094de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
@@ -1258,13 +1270,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1258 1270
1259 /* The DE4X5-specific entries in the device structure. */ 1271 /* The DE4X5-specific entries in the device structure. */
1260 SET_NETDEV_DEV(dev, gendev); 1272 SET_NETDEV_DEV(dev, gendev);
1261 dev->open = &de4x5_open; 1273 dev->netdev_ops = &de4x5_netdev_ops;
1262 dev->hard_start_xmit = &de4x5_queue_pkt;
1263 dev->stop = &de4x5_close;
1264 dev->get_stats = &de4x5_get_stats;
1265 dev->set_multicast_list = &set_multicast_list;
1266 dev->do_ioctl = &de4x5_ioctl;
1267
1268 dev->mem_start = 0; 1274 dev->mem_start = 0;
1269 1275
1270 /* Fill in the generic fields of the device structure. */ 1276 /* Fill in the generic fields of the device structure. */
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 28a5c51b43a0..2e5c99941f35 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -257,9 +257,6 @@ struct dmfe_board_info {
257 u8 wol_mode; /* user WOL settings */ 257 u8 wol_mode; /* user WOL settings */
258 struct timer_list timer; 258 struct timer_list timer;
259 259
260 /* System defined statistic counter */
261 struct net_device_stats stats;
262
263 /* Driver defined statistic counter */ 260 /* Driver defined statistic counter */
264 unsigned long tx_fifo_underrun; 261 unsigned long tx_fifo_underrun;
265 unsigned long tx_loss_carrier; 262 unsigned long tx_loss_carrier;
@@ -316,7 +313,6 @@ static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control
316static int dmfe_open(struct DEVICE *); 313static int dmfe_open(struct DEVICE *);
317static int dmfe_start_xmit(struct sk_buff *, struct DEVICE *); 314static int dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
318static int dmfe_stop(struct DEVICE *); 315static int dmfe_stop(struct DEVICE *);
319static struct net_device_stats * dmfe_get_stats(struct DEVICE *);
320static void dmfe_set_filter_mode(struct DEVICE *); 316static void dmfe_set_filter_mode(struct DEVICE *);
321static const struct ethtool_ops netdev_ethtool_ops; 317static const struct ethtool_ops netdev_ethtool_ops;
322static u16 read_srom_word(long ,int); 318static u16 read_srom_word(long ,int);
@@ -351,6 +347,19 @@ static void dmfe_set_phyxcer(struct dmfe_board_info *);
351 347
352/* DM910X network board routine ---------------------------- */ 348/* DM910X network board routine ---------------------------- */
353 349
350static const struct net_device_ops netdev_ops = {
351 .ndo_open = dmfe_open,
352 .ndo_stop = dmfe_stop,
353 .ndo_start_xmit = dmfe_start_xmit,
354 .ndo_set_multicast_list = dmfe_set_filter_mode,
355 .ndo_change_mtu = eth_change_mtu,
356 .ndo_set_mac_address = eth_mac_addr,
357 .ndo_validate_addr = eth_validate_addr,
358#ifdef CONFIG_NET_POLL_CONTROLLER
359 .ndo_poll_controller = poll_dmfe,
360#endif
361};
362
354/* 363/*
355 * Search DM910X board ,allocate space and register it 364 * Search DM910X board ,allocate space and register it
356 */ 365 */
@@ -442,14 +451,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
442 dev->base_addr = db->ioaddr; 451 dev->base_addr = db->ioaddr;
443 dev->irq = pdev->irq; 452 dev->irq = pdev->irq;
444 pci_set_drvdata(pdev, dev); 453 pci_set_drvdata(pdev, dev);
445 dev->open = &dmfe_open; 454 dev->netdev_ops = &netdev_ops;
446 dev->hard_start_xmit = &dmfe_start_xmit;
447 dev->stop = &dmfe_stop;
448 dev->get_stats = &dmfe_get_stats;
449 dev->set_multicast_list = &dmfe_set_filter_mode;
450#ifdef CONFIG_NET_POLL_CONTROLLER
451 dev->poll_controller = &poll_dmfe;
452#endif
453 dev->ethtool_ops = &netdev_ethtool_ops; 455 dev->ethtool_ops = &netdev_ethtool_ops;
454 netif_carrier_off(dev); 456 netif_carrier_off(dev);
455 spin_lock_init(&db->lock); 457 spin_lock_init(&db->lock);
@@ -867,15 +869,15 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
867 869
868 /* A packet sent completed */ 870 /* A packet sent completed */
869 db->tx_packet_cnt--; 871 db->tx_packet_cnt--;
870 db->stats.tx_packets++; 872 dev->stats.tx_packets++;
871 873
872 /* Transmit statistic counter */ 874 /* Transmit statistic counter */
873 if ( tdes0 != 0x7fffffff ) { 875 if ( tdes0 != 0x7fffffff ) {
874 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */ 876 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
875 db->stats.collisions += (tdes0 >> 3) & 0xf; 877 dev->stats.collisions += (tdes0 >> 3) & 0xf;
876 db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff; 878 dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
877 if (tdes0 & TDES0_ERR_MASK) { 879 if (tdes0 & TDES0_ERR_MASK) {
878 db->stats.tx_errors++; 880 dev->stats.tx_errors++;
879 881
880 if (tdes0 & 0x0002) { /* UnderRun */ 882 if (tdes0 & 0x0002) { /* UnderRun */
881 db->tx_fifo_underrun++; 883 db->tx_fifo_underrun++;
@@ -969,13 +971,13 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
969 if (rdes0 & 0x8000) { 971 if (rdes0 & 0x8000) {
970 /* This is a error packet */ 972 /* This is a error packet */
971 //printk(DRV_NAME ": rdes0: %lx\n", rdes0); 973 //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
972 db->stats.rx_errors++; 974 dev->stats.rx_errors++;
973 if (rdes0 & 1) 975 if (rdes0 & 1)
974 db->stats.rx_fifo_errors++; 976 dev->stats.rx_fifo_errors++;
975 if (rdes0 & 2) 977 if (rdes0 & 2)
976 db->stats.rx_crc_errors++; 978 dev->stats.rx_crc_errors++;
977 if (rdes0 & 0x80) 979 if (rdes0 & 0x80)
978 db->stats.rx_length_errors++; 980 dev->stats.rx_length_errors++;
979 } 981 }
980 982
981 if ( !(rdes0 & 0x8000) || 983 if ( !(rdes0 & 0x8000) ||
@@ -1008,8 +1010,8 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
1008 1010
1009 skb->protocol = eth_type_trans(skb, dev); 1011 skb->protocol = eth_type_trans(skb, dev);
1010 netif_rx(skb); 1012 netif_rx(skb);
1011 db->stats.rx_packets++; 1013 dev->stats.rx_packets++;
1012 db->stats.rx_bytes += rxlen; 1014 dev->stats.rx_bytes += rxlen;
1013 } 1015 }
1014 } else { 1016 } else {
1015 /* Reuse SKB buffer when the packet is error */ 1017 /* Reuse SKB buffer when the packet is error */
@@ -1024,20 +1026,6 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
1024 db->rx_ready_ptr = rxptr; 1026 db->rx_ready_ptr = rxptr;
1025} 1027}
1026 1028
1027
1028/*
1029 * Get statistics from driver.
1030 */
1031
1032static struct net_device_stats * dmfe_get_stats(struct DEVICE *dev)
1033{
1034 struct dmfe_board_info *db = netdev_priv(dev);
1035
1036 DMFE_DBUG(0, "dmfe_get_stats", 0);
1037 return &db->stats;
1038}
1039
1040
1041/* 1029/*
1042 * Set DM910X multicast address 1030 * Set DM910X multicast address
1043 */ 1031 */
@@ -1161,7 +1149,7 @@ static void dmfe_timer(unsigned long data)
1161 1149
1162 /* Operating Mode Check */ 1150 /* Operating Mode Check */
1163 if ( (db->dm910x_chk_mode & 0x1) && 1151 if ( (db->dm910x_chk_mode & 0x1) &&
1164 (db->stats.rx_packets > MAX_CHECK_PACKET) ) 1152 (dev->stats.rx_packets > MAX_CHECK_PACKET) )
1165 db->dm910x_chk_mode = 0x4; 1153 db->dm910x_chk_mode = 0x4;
1166 1154
1167 /* Dynamic reset DM910X : system error or transmit time-out */ 1155 /* Dynamic reset DM910X : system error or transmit time-out */
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index ff84babb3ff3..bee75fa87a9c 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1225,6 +1225,22 @@ static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1225 return 0; 1225 return 0;
1226} 1226}
1227 1227
1228static const struct net_device_ops tulip_netdev_ops = {
1229 .ndo_open = tulip_open,
1230 .ndo_start_xmit = tulip_start_xmit,
1231 .ndo_tx_timeout = tulip_tx_timeout,
1232 .ndo_stop = tulip_close,
1233 .ndo_get_stats = tulip_get_stats,
1234 .ndo_do_ioctl = private_ioctl,
1235 .ndo_set_multicast_list = set_rx_mode,
1236 .ndo_change_mtu = eth_change_mtu,
1237 .ndo_set_mac_address = eth_mac_addr,
1238 .ndo_validate_addr = eth_validate_addr,
1239#ifdef CONFIG_NET_POLL_CONTROLLER
1240 .ndo_poll_controller = poll_tulip,
1241#endif
1242};
1243
1228static int __devinit tulip_init_one (struct pci_dev *pdev, 1244static int __devinit tulip_init_one (struct pci_dev *pdev,
1229 const struct pci_device_id *ent) 1245 const struct pci_device_id *ent)
1230{ 1246{
@@ -1601,20 +1617,11 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1601 } 1617 }
1602 1618
1603 /* The Tulip-specific entries in the device structure. */ 1619 /* The Tulip-specific entries in the device structure. */
1604 dev->open = tulip_open; 1620 dev->netdev_ops = &tulip_netdev_ops;
1605 dev->hard_start_xmit = tulip_start_xmit;
1606 dev->tx_timeout = tulip_tx_timeout;
1607 dev->watchdog_timeo = TX_TIMEOUT; 1621 dev->watchdog_timeo = TX_TIMEOUT;
1608#ifdef CONFIG_TULIP_NAPI 1622#ifdef CONFIG_TULIP_NAPI
1609 netif_napi_add(dev, &tp->napi, tulip_poll, 16); 1623 netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1610#endif 1624#endif
1611 dev->stop = tulip_close;
1612 dev->get_stats = tulip_get_stats;
1613 dev->do_ioctl = private_ioctl;
1614 dev->set_multicast_list = set_rx_mode;
1615#ifdef CONFIG_NET_POLL_CONTROLLER
1616 dev->poll_controller = &poll_tulip;
1617#endif
1618 SET_ETHTOOL_OPS(dev, &ops); 1625 SET_ETHTOOL_OPS(dev, &ops);
1619 1626
1620 if (register_netdev(dev)) 1627 if (register_netdev(dev))
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 00cbc5251dcc..030e02e63023 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -168,9 +168,6 @@ struct uli526x_board_info {
168 u8 wait_reset; /* Hardware failed, need to reset */ 168 u8 wait_reset; /* Hardware failed, need to reset */
169 struct timer_list timer; 169 struct timer_list timer;
170 170
171 /* System defined statistic counter */
172 struct net_device_stats stats;
173
174 /* Driver defined statistic counter */ 171 /* Driver defined statistic counter */
175 unsigned long tx_fifo_underrun; 172 unsigned long tx_fifo_underrun;
176 unsigned long tx_loss_carrier; 173 unsigned long tx_loss_carrier;
@@ -220,7 +217,6 @@ static int mode = 8;
220static int uli526x_open(struct net_device *); 217static int uli526x_open(struct net_device *);
221static int uli526x_start_xmit(struct sk_buff *, struct net_device *); 218static int uli526x_start_xmit(struct sk_buff *, struct net_device *);
222static int uli526x_stop(struct net_device *); 219static int uli526x_stop(struct net_device *);
223static struct net_device_stats * uli526x_get_stats(struct net_device *);
224static void uli526x_set_filter_mode(struct net_device *); 220static void uli526x_set_filter_mode(struct net_device *);
225static const struct ethtool_ops netdev_ethtool_ops; 221static const struct ethtool_ops netdev_ethtool_ops;
226static u16 read_srom_word(long, int); 222static u16 read_srom_word(long, int);
@@ -251,6 +247,19 @@ static void uli526x_set_phyxcer(struct uli526x_board_info *);
251 247
252/* ULI526X network board routine ---------------------------- */ 248/* ULI526X network board routine ---------------------------- */
253 249
250static const struct net_device_ops netdev_ops = {
251 .ndo_open = uli526x_open,
252 .ndo_stop = uli526x_stop,
253 .ndo_start_xmit = uli526x_start_xmit,
254 .ndo_set_multicast_list = uli526x_set_filter_mode,
255 .ndo_change_mtu = eth_change_mtu,
256 .ndo_set_mac_address = eth_mac_addr,
257 .ndo_validate_addr = eth_validate_addr,
258#ifdef CONFIG_NET_POLL_CONTROLLER
259 .ndo_poll_controller = uli526x_poll,
260#endif
261};
262
254/* 263/*
255 * Search ULI526X board, allocate space and register it 264 * Search ULI526X board, allocate space and register it
256 */ 265 */
@@ -335,15 +344,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
335 pci_set_drvdata(pdev, dev); 344 pci_set_drvdata(pdev, dev);
336 345
337 /* Register some necessary functions */ 346 /* Register some necessary functions */
338 dev->open = &uli526x_open; 347 dev->netdev_ops = &netdev_ops;
339 dev->hard_start_xmit = &uli526x_start_xmit;
340 dev->stop = &uli526x_stop;
341 dev->get_stats = &uli526x_get_stats;
342 dev->set_multicast_list = &uli526x_set_filter_mode;
343 dev->ethtool_ops = &netdev_ethtool_ops; 348 dev->ethtool_ops = &netdev_ethtool_ops;
344#ifdef CONFIG_NET_POLL_CONTROLLER 349
345 dev->poll_controller = &uli526x_poll;
346#endif
347 spin_lock_init(&db->lock); 350 spin_lock_init(&db->lock);
348 351
349 352
@@ -733,7 +736,8 @@ static void uli526x_poll(struct net_device *dev)
733 * Free TX resource after TX complete 736 * Free TX resource after TX complete
734 */ 737 */
735 738
736static void uli526x_free_tx_pkt(struct net_device *dev, struct uli526x_board_info * db) 739static void uli526x_free_tx_pkt(struct net_device *dev,
740 struct uli526x_board_info * db)
737{ 741{
738 struct tx_desc *txptr; 742 struct tx_desc *txptr;
739 u32 tdes0; 743 u32 tdes0;
@@ -747,15 +751,15 @@ static void uli526x_free_tx_pkt(struct net_device *dev, struct uli526x_board_inf
747 751
748 /* A packet sent completed */ 752 /* A packet sent completed */
749 db->tx_packet_cnt--; 753 db->tx_packet_cnt--;
750 db->stats.tx_packets++; 754 dev->stats.tx_packets++;
751 755
752 /* Transmit statistic counter */ 756 /* Transmit statistic counter */
753 if ( tdes0 != 0x7fffffff ) { 757 if ( tdes0 != 0x7fffffff ) {
754 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */ 758 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
755 db->stats.collisions += (tdes0 >> 3) & 0xf; 759 dev->stats.collisions += (tdes0 >> 3) & 0xf;
756 db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff; 760 dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
757 if (tdes0 & TDES0_ERR_MASK) { 761 if (tdes0 & TDES0_ERR_MASK) {
758 db->stats.tx_errors++; 762 dev->stats.tx_errors++;
759 if (tdes0 & 0x0002) { /* UnderRun */ 763 if (tdes0 & 0x0002) { /* UnderRun */
760 db->tx_fifo_underrun++; 764 db->tx_fifo_underrun++;
761 if ( !(db->cr6_data & CR6_SFT) ) { 765 if ( !(db->cr6_data & CR6_SFT) ) {
@@ -825,13 +829,13 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
825 if (rdes0 & 0x8000) { 829 if (rdes0 & 0x8000) {
826 /* This is a error packet */ 830 /* This is a error packet */
827 //printk(DRV_NAME ": rdes0: %lx\n", rdes0); 831 //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
828 db->stats.rx_errors++; 832 dev->stats.rx_errors++;
829 if (rdes0 & 1) 833 if (rdes0 & 1)
830 db->stats.rx_fifo_errors++; 834 dev->stats.rx_fifo_errors++;
831 if (rdes0 & 2) 835 if (rdes0 & 2)
832 db->stats.rx_crc_errors++; 836 dev->stats.rx_crc_errors++;
833 if (rdes0 & 0x80) 837 if (rdes0 & 0x80)
834 db->stats.rx_length_errors++; 838 dev->stats.rx_length_errors++;
835 } 839 }
836 840
837 if ( !(rdes0 & 0x8000) || 841 if ( !(rdes0 & 0x8000) ||
@@ -854,8 +858,8 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
854 858
855 skb->protocol = eth_type_trans(skb, dev); 859 skb->protocol = eth_type_trans(skb, dev);
856 netif_rx(skb); 860 netif_rx(skb);
857 db->stats.rx_packets++; 861 dev->stats.rx_packets++;
858 db->stats.rx_bytes += rxlen; 862 dev->stats.rx_bytes += rxlen;
859 863
860 } else { 864 } else {
861 /* Reuse SKB buffer when the packet is error */ 865 /* Reuse SKB buffer when the packet is error */
@@ -872,19 +876,6 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
872 876
873 877
874/* 878/*
875 * Get statistics from driver.
876 */
877
878static struct net_device_stats * uli526x_get_stats(struct net_device *dev)
879{
880 struct uli526x_board_info *db = netdev_priv(dev);
881
882 ULI526X_DBUG(0, "uli526x_get_stats", 0);
883 return &db->stats;
884}
885
886
887/*
888 * Set ULI526X multicast address 879 * Set ULI526X multicast address
889 */ 880 */
890 881
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 022d99af8646..f467bf87817d 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -343,7 +343,18 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
343static const struct ethtool_ops netdev_ethtool_ops; 343static const struct ethtool_ops netdev_ethtool_ops;
344static int netdev_close(struct net_device *dev); 344static int netdev_close(struct net_device *dev);
345 345
346 346static const struct net_device_ops netdev_ops = {
347 .ndo_open = netdev_open,
348 .ndo_stop = netdev_close,
349 .ndo_start_xmit = start_tx,
350 .ndo_get_stats = get_stats,
351 .ndo_set_multicast_list = set_rx_mode,
352 .ndo_do_ioctl = netdev_ioctl,
353 .ndo_tx_timeout = tx_timeout,
354 .ndo_change_mtu = eth_change_mtu,
355 .ndo_set_mac_address = eth_mac_addr,
356 .ndo_validate_addr = eth_validate_addr,
357};
347 358
348static int __devinit w840_probe1 (struct pci_dev *pdev, 359static int __devinit w840_probe1 (struct pci_dev *pdev,
349 const struct pci_device_id *ent) 360 const struct pci_device_id *ent)
@@ -420,14 +431,8 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
420 np->mii_if.force_media = 1; 431 np->mii_if.force_media = 1;
421 432
422 /* The chip-specific entries in the device structure. */ 433 /* The chip-specific entries in the device structure. */
423 dev->open = &netdev_open; 434 dev->netdev_ops = &netdev_ops;
424 dev->hard_start_xmit = &start_tx;
425 dev->stop = &netdev_close;
426 dev->get_stats = &get_stats;
427 dev->set_multicast_list = &set_rx_mode;
428 dev->do_ioctl = &netdev_ioctl;
429 dev->ethtool_ops = &netdev_ethtool_ops; 435 dev->ethtool_ops = &netdev_ethtool_ops;
430 dev->tx_timeout = &tx_timeout;
431 dev->watchdog_timeo = TX_TIMEOUT; 436 dev->watchdog_timeo = TX_TIMEOUT;
432 437
433 i = register_netdev(dev); 438 i = register_netdev(dev);
@@ -1555,7 +1560,7 @@ static void __devexit w840_remove1 (struct pci_dev *pdev)
1555 * rtnl_lock, & netif_device_detach after the rtnl_unlock. 1560 * rtnl_lock, & netif_device_detach after the rtnl_unlock.
1556 * - get_stats: 1561 * - get_stats:
1557 * spin_lock_irq(np->lock), doesn't touch hw if not present 1562 * spin_lock_irq(np->lock), doesn't touch hw if not present
1558 * - hard_start_xmit: 1563 * - start_xmit:
1559 * synchronize_irq + netif_tx_disable; 1564 * synchronize_irq + netif_tx_disable;
1560 * - tx_timeout: 1565 * - tx_timeout:
1561 * netif_device_detach + netif_tx_disable; 1566 * netif_device_detach + netif_tx_disable;
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 13c8703ecb9f..c2ca9f40e40e 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -104,10 +104,8 @@ struct xircom_private {
104 */ 104 */
105 spinlock_t lock; 105 spinlock_t lock;
106 106
107
108 struct pci_dev *pdev; 107 struct pci_dev *pdev;
109 struct net_device *dev; 108 struct net_device *dev;
110 struct net_device_stats stats;
111}; 109};
112 110
113 111
@@ -119,7 +117,6 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev);
119static int xircom_open(struct net_device *dev); 117static int xircom_open(struct net_device *dev);
120static int xircom_close(struct net_device *dev); 118static int xircom_close(struct net_device *dev);
121static void xircom_up(struct xircom_private *card); 119static void xircom_up(struct xircom_private *card);
122static struct net_device_stats *xircom_get_stats(struct net_device *dev);
123#ifdef CONFIG_NET_POLL_CONTROLLER 120#ifdef CONFIG_NET_POLL_CONTROLLER
124static void xircom_poll_controller(struct net_device *dev); 121static void xircom_poll_controller(struct net_device *dev);
125#endif 122#endif
@@ -194,6 +191,18 @@ static const struct ethtool_ops netdev_ethtool_ops = {
194 .get_drvinfo = netdev_get_drvinfo, 191 .get_drvinfo = netdev_get_drvinfo,
195}; 192};
196 193
194static const struct net_device_ops netdev_ops = {
195 .ndo_open = xircom_open,
196 .ndo_stop = xircom_close,
197 .ndo_start_xmit = xircom_start_xmit,
198 .ndo_change_mtu = eth_change_mtu,
199 .ndo_set_mac_address = eth_mac_addr,
200 .ndo_validate_addr = eth_validate_addr,
201#ifdef CONFIG_NET_POLL_CONTROLLER
202 .ndo_poll_controller = xircom_poll_controller,
203#endif
204};
205
197/* xircom_probe is the code that gets called on device insertion. 206/* xircom_probe is the code that gets called on device insertion.
198 it sets up the hardware and registers the device to the networklayer. 207 it sets up the hardware and registers the device to the networklayer.
199 208
@@ -266,13 +275,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
266 read_mac_address(private); 275 read_mac_address(private);
267 setup_descriptors(private); 276 setup_descriptors(private);
268 277
269 dev->open = &xircom_open; 278 dev->netdev_ops = &netdev_ops;
270 dev->hard_start_xmit = &xircom_start_xmit;
271 dev->stop = &xircom_close;
272 dev->get_stats = &xircom_get_stats;
273#ifdef CONFIG_NET_POLL_CONTROLLER
274 dev->poll_controller = &xircom_poll_controller;
275#endif
276 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 279 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
277 pci_set_drvdata(pdev, dev); 280 pci_set_drvdata(pdev, dev);
278 281
@@ -497,14 +500,6 @@ static int xircom_close(struct net_device *dev)
497} 500}
498 501
499 502
500
501static struct net_device_stats *xircom_get_stats(struct net_device *dev)
502{
503 struct xircom_private *card = netdev_priv(dev);
504 return &card->stats;
505}
506
507
508#ifdef CONFIG_NET_POLL_CONTROLLER 503#ifdef CONFIG_NET_POLL_CONTROLLER
509static void xircom_poll_controller(struct net_device *dev) 504static void xircom_poll_controller(struct net_device *dev)
510{ 505{
@@ -1193,7 +1188,7 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
1193 1188
1194 skb = dev_alloc_skb(pkt_len + 2); 1189 skb = dev_alloc_skb(pkt_len + 2);
1195 if (skb == NULL) { 1190 if (skb == NULL) {
1196 card->stats.rx_dropped++; 1191 dev->stats.rx_dropped++;
1197 goto out; 1192 goto out;
1198 } 1193 }
1199 skb_reserve(skb, 2); 1194 skb_reserve(skb, 2);
@@ -1201,8 +1196,8 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
1201 skb_put(skb, pkt_len); 1196 skb_put(skb, pkt_len);
1202 skb->protocol = eth_type_trans(skb, dev); 1197 skb->protocol = eth_type_trans(skb, dev);
1203 netif_rx(skb); 1198 netif_rx(skb);
1204 card->stats.rx_packets++; 1199 dev->stats.rx_packets++;
1205 card->stats.rx_bytes += pkt_len; 1200 dev->stats.rx_bytes += pkt_len;
1206 1201
1207 out: 1202 out:
1208 /* give the buffer back to the card */ 1203 /* give the buffer back to the card */
@@ -1232,16 +1227,16 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
1232#endif 1227#endif
1233 if (status > 0) { /* bit 31 is 0 when done */ 1228 if (status > 0) { /* bit 31 is 0 when done */
1234 if (card->tx_skb[descnr]!=NULL) { 1229 if (card->tx_skb[descnr]!=NULL) {
1235 card->stats.tx_bytes += card->tx_skb[descnr]->len; 1230 dev->stats.tx_bytes += card->tx_skb[descnr]->len;
1236 dev_kfree_skb_irq(card->tx_skb[descnr]); 1231 dev_kfree_skb_irq(card->tx_skb[descnr]);
1237 } 1232 }
1238 card->tx_skb[descnr] = NULL; 1233 card->tx_skb[descnr] = NULL;
1239 /* Bit 8 in the status field is 1 if there was a collision */ 1234 /* Bit 8 in the status field is 1 if there was a collision */
1240 if (status&(1<<8)) 1235 if (status&(1<<8))
1241 card->stats.collisions++; 1236 dev->stats.collisions++;
1242 card->tx_buffer[4*descnr] = 0; /* descriptor is free again */ 1237 card->tx_buffer[4*descnr] = 0; /* descriptor is free again */
1243 netif_wake_queue (dev); 1238 netif_wake_queue (dev);
1244 card->stats.tx_packets++; 1239 dev->stats.tx_packets++;
1245 } 1240 }
1246 1241
1247 leave("investigate_write_descriptor"); 1242 leave("investigate_write_descriptor");
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 0009f4e34433..3af9a9516ccb 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -2296,6 +2296,19 @@ out:
2296 return mode; 2296 return mode;
2297} 2297}
2298 2298
2299static const struct net_device_ops typhoon_netdev_ops = {
2300 .ndo_open = typhoon_open,
2301 .ndo_stop = typhoon_close,
2302 .ndo_start_xmit = typhoon_start_tx,
2303 .ndo_set_multicast_list = typhoon_set_rx_mode,
2304 .ndo_tx_timeout = typhoon_tx_timeout,
2305 .ndo_get_stats = typhoon_get_stats,
2306 .ndo_validate_addr = eth_validate_addr,
2307 .ndo_set_mac_address = typhoon_set_mac_address,
2308 .ndo_change_mtu = eth_change_mtu,
2309 .ndo_vlan_rx_register = typhoon_vlan_rx_register,
2310};
2311
2299static int __devinit 2312static int __devinit
2300typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2313typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2301{ 2314{
@@ -2495,16 +2508,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2495 } 2508 }
2496 2509
2497 /* The chip-specific entries in the device structure. */ 2510 /* The chip-specific entries in the device structure. */
2498 dev->open = typhoon_open; 2511 dev->netdev_ops = &typhoon_netdev_ops;
2499 dev->hard_start_xmit = typhoon_start_tx;
2500 dev->stop = typhoon_close;
2501 dev->set_multicast_list = typhoon_set_rx_mode;
2502 dev->tx_timeout = typhoon_tx_timeout;
2503 netif_napi_add(dev, &tp->napi, typhoon_poll, 16); 2512 netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2504 dev->watchdog_timeo = TX_TIMEOUT; 2513 dev->watchdog_timeo = TX_TIMEOUT;
2505 dev->get_stats = typhoon_get_stats;
2506 dev->set_mac_address = typhoon_set_mac_address;
2507 dev->vlan_rx_register = typhoon_vlan_rx_register;
2508 2514
2509 SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops); 2515 SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2510 2516
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index edd244f3acb5..5b67bbf1987e 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -23,7 +23,7 @@
23#include <linux/usb/usbnet.h> 23#include <linux/usb/usbnet.h>
24 24
25/* datasheet: 25/* datasheet:
26 http://www.davicom.com.tw/big5/download/Data%20Sheet/DM9601-DS-P01-930914.pdf 26 http://ptm2.cc.utu.fi/ftp/network/cards/DM9601/From_NET/DM9601-DS-P01-930914.pdf
27*/ 27*/
28 28
29/* control requests */ 29/* control requests */
@@ -397,16 +397,24 @@ static void dm9601_set_multicast(struct net_device *net)
397 dm_write_reg_async(dev, DM_RX_CTRL, rx_ctl); 397 dm_write_reg_async(dev, DM_RX_CTRL, rx_ctl);
398} 398}
399 399
400static void __dm9601_set_mac_address(struct usbnet *dev)
401{
402 dm_write_async(dev, DM_PHY_ADDR, ETH_ALEN, dev->net->dev_addr);
403}
404
400static int dm9601_set_mac_address(struct net_device *net, void *p) 405static int dm9601_set_mac_address(struct net_device *net, void *p)
401{ 406{
402 struct sockaddr *addr = p; 407 struct sockaddr *addr = p;
403 struct usbnet *dev = netdev_priv(net); 408 struct usbnet *dev = netdev_priv(net);
404 409
405 if (!is_valid_ether_addr(addr->sa_data)) 410 if (!is_valid_ether_addr(addr->sa_data)) {
411 dev_err(&net->dev, "not setting invalid mac address %pM\n",
412 addr->sa_data);
406 return -EINVAL; 413 return -EINVAL;
414 }
407 415
408 memcpy(net->dev_addr, addr->sa_data, net->addr_len); 416 memcpy(net->dev_addr, addr->sa_data, net->addr_len);
409 dm_write_async(dev, DM_PHY_ADDR, net->addr_len, net->dev_addr); 417 __dm9601_set_mac_address(dev);
410 418
411 return 0; 419 return 0;
412} 420}
@@ -414,6 +422,7 @@ static int dm9601_set_mac_address(struct net_device *net, void *p)
414static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf) 422static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
415{ 423{
416 int ret; 424 int ret;
425 u8 mac[ETH_ALEN];
417 426
418 ret = usbnet_get_endpoints(dev, intf); 427 ret = usbnet_get_endpoints(dev, intf);
419 if (ret) 428 if (ret)
@@ -438,12 +447,24 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
438 udelay(20); 447 udelay(20);
439 448
440 /* read MAC */ 449 /* read MAC */
441 if (dm_read(dev, DM_PHY_ADDR, ETH_ALEN, dev->net->dev_addr) < 0) { 450 if (dm_read(dev, DM_PHY_ADDR, ETH_ALEN, mac) < 0) {
442 printk(KERN_ERR "Error reading MAC address\n"); 451 printk(KERN_ERR "Error reading MAC address\n");
443 ret = -ENODEV; 452 ret = -ENODEV;
444 goto out; 453 goto out;
445 } 454 }
446 455
456 /*
457 * Overwrite the auto-generated address only with good ones.
458 */
459 if (is_valid_ether_addr(mac))
460 memcpy(dev->net->dev_addr, mac, ETH_ALEN);
461 else {
462 printk(KERN_WARNING
463 "dm9601: No valid MAC address in EEPROM, using %pM\n",
464 dev->net->dev_addr);
465 __dm9601_set_mac_address(dev);
466 }
467
447 /* power up phy */ 468 /* power up phy */
448 dm_write_reg(dev, DM_GPR_CTRL, 1); 469 dm_write_reg(dev, DM_GPR_CTRL, 1);
449 dm_write_reg(dev, DM_GPR_DATA, 0); 470 dm_write_reg(dev, DM_GPR_DATA, 0);
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 3073ca25a0b0..7cb10a0a5316 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -251,7 +251,6 @@ struct kaweth_device
251 struct net_device_stats stats; 251 struct net_device_stats stats;
252}; 252};
253 253
254
255/**************************************************************** 254/****************************************************************
256 * kaweth_control 255 * kaweth_control
257 ****************************************************************/ 256 ****************************************************************/
@@ -975,6 +974,17 @@ static int kaweth_resume(struct usb_interface *intf)
975/**************************************************************** 974/****************************************************************
976 * kaweth_probe 975 * kaweth_probe
977 ****************************************************************/ 976 ****************************************************************/
977
978
979static const struct net_device_ops kaweth_netdev_ops = {
980 .ndo_open = kaweth_open,
981 .ndo_stop = kaweth_close,
982 .ndo_start_xmit = kaweth_start_xmit,
983 .ndo_tx_timeout = kaweth_tx_timeout,
984 .ndo_set_multicast_list = kaweth_set_rx_mode,
985 .ndo_get_stats = kaweth_netdev_stats,
986};
987
978static int kaweth_probe( 988static int kaweth_probe(
979 struct usb_interface *intf, 989 struct usb_interface *intf,
980 const struct usb_device_id *id /* from id_table */ 990 const struct usb_device_id *id /* from id_table */
@@ -1147,22 +1157,13 @@ err_fw:
1147 memcpy(netdev->dev_addr, &kaweth->configuration.hw_addr, 1157 memcpy(netdev->dev_addr, &kaweth->configuration.hw_addr,
1148 sizeof(kaweth->configuration.hw_addr)); 1158 sizeof(kaweth->configuration.hw_addr));
1149 1159
1150 netdev->open = kaweth_open; 1160 netdev->netdev_ops = &kaweth_netdev_ops;
1151 netdev->stop = kaweth_close;
1152
1153 netdev->watchdog_timeo = KAWETH_TX_TIMEOUT; 1161 netdev->watchdog_timeo = KAWETH_TX_TIMEOUT;
1154 netdev->tx_timeout = kaweth_tx_timeout;
1155
1156 netdev->hard_start_xmit = kaweth_start_xmit;
1157 netdev->set_multicast_list = kaweth_set_rx_mode;
1158 netdev->get_stats = kaweth_netdev_stats;
1159 netdev->mtu = le16_to_cpu(kaweth->configuration.segment_size); 1162 netdev->mtu = le16_to_cpu(kaweth->configuration.segment_size);
1160 SET_ETHTOOL_OPS(netdev, &ops); 1163 SET_ETHTOOL_OPS(netdev, &ops);
1161 1164
1162 /* kaweth is zeroed as part of alloc_netdev */ 1165 /* kaweth is zeroed as part of alloc_netdev */
1163
1164 INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl); 1166 INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl);
1165
1166 usb_set_intfdata(intf, kaweth); 1167 usb_set_intfdata(intf, kaweth);
1167 1168
1168#if 0 1169#if 0
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index d9241f1c0803..a8228d87c8cf 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -93,6 +93,7 @@ module_param (msg_level, int, 0);
93MODULE_PARM_DESC (msg_level, "Override default message level"); 93MODULE_PARM_DESC (msg_level, "Override default message level");
94 94
95MODULE_DEVICE_TABLE(usb, pegasus_ids); 95MODULE_DEVICE_TABLE(usb, pegasus_ids);
96static const struct net_device_ops pegasus_netdev_ops;
96 97
97static int update_eth_regs_async(pegasus_t *); 98static int update_eth_regs_async(pegasus_t *);
98/* Aargh!!! I _really_ hate such tweaks */ 99/* Aargh!!! I _really_ hate such tweaks */
@@ -1360,14 +1361,10 @@ static int pegasus_probe(struct usb_interface *intf,
1360 pegasus->intf = intf; 1361 pegasus->intf = intf;
1361 pegasus->usb = dev; 1362 pegasus->usb = dev;
1362 pegasus->net = net; 1363 pegasus->net = net;
1363 net->open = pegasus_open; 1364
1364 net->stop = pegasus_close; 1365
1365 net->watchdog_timeo = PEGASUS_TX_TIMEOUT; 1366 net->watchdog_timeo = PEGASUS_TX_TIMEOUT;
1366 net->tx_timeout = pegasus_tx_timeout; 1367 net->netdev_ops = &pegasus_netdev_ops;
1367 net->do_ioctl = pegasus_ioctl;
1368 net->hard_start_xmit = pegasus_start_xmit;
1369 net->set_multicast_list = pegasus_set_multicast;
1370 net->get_stats = pegasus_netdev_stats;
1371 SET_ETHTOOL_OPS(net, &ops); 1368 SET_ETHTOOL_OPS(net, &ops);
1372 pegasus->mii.dev = net; 1369 pegasus->mii.dev = net;
1373 pegasus->mii.mdio_read = mdio_read; 1370 pegasus->mii.mdio_read = mdio_read;
@@ -1482,6 +1479,16 @@ static int pegasus_resume (struct usb_interface *intf)
1482 return 0; 1479 return 0;
1483} 1480}
1484 1481
1482static const struct net_device_ops pegasus_netdev_ops = {
1483 .ndo_open = pegasus_open,
1484 .ndo_stop = pegasus_close,
1485 .ndo_do_ioctl = pegasus_ioctl,
1486 .ndo_start_xmit = pegasus_start_xmit,
1487 .ndo_set_multicast_list = pegasus_set_multicast,
1488 .ndo_get_stats = pegasus_netdev_stats,
1489 .ndo_tx_timeout = pegasus_tx_timeout,
1490};
1491
1485static struct usb_driver pegasus_driver = { 1492static struct usb_driver pegasus_driver = {
1486 .name = driver_name, 1493 .name = driver_name,
1487 .probe = pegasus_probe, 1494 .probe = pegasus_probe,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b7004ff36451..43f6523c40be 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -624,6 +624,18 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
624 return 0; 624 return 0;
625} 625}
626 626
627static const struct net_device_ops virtnet_netdev = {
628 .ndo_open = virtnet_open,
629 .ndo_stop = virtnet_close,
630 .ndo_start_xmit = start_xmit,
631 .ndo_validate_addr = eth_validate_addr,
632 .ndo_set_mac_address = eth_mac_addr,
633 .ndo_change_mtu = virtnet_change_mtu,
634#ifdef CONFIG_NET_POLL_CONTROLLER
635 .ndo_poll_controller = virtnet_netpoll,
636#endif
637};
638
627static int virtnet_probe(struct virtio_device *vdev) 639static int virtnet_probe(struct virtio_device *vdev)
628{ 640{
629 int err; 641 int err;
@@ -636,14 +648,8 @@ static int virtnet_probe(struct virtio_device *vdev)
636 return -ENOMEM; 648 return -ENOMEM;
637 649
638 /* Set up network device as normal. */ 650 /* Set up network device as normal. */
639 dev->open = virtnet_open; 651 dev->netdev_ops = &virtnet_netdev;
640 dev->stop = virtnet_close;
641 dev->hard_start_xmit = start_xmit;
642 dev->change_mtu = virtnet_change_mtu;
643 dev->features = NETIF_F_HIGHDMA; 652 dev->features = NETIF_F_HIGHDMA;
644#ifdef CONFIG_NET_POLL_CONTROLLER
645 dev->poll_controller = virtnet_netpoll;
646#endif
647 SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); 653 SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
648 SET_NETDEV_DEV(dev, &vdev->dev); 654 SET_NETDEV_DEV(dev, &vdev->dev);
649 655
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 6d4b65fd9c17..c6d93465c7e2 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -397,11 +397,13 @@ int i2400mu_probe(struct usb_interface *iface,
397 i2400m->bus_fw_name = I2400MU_FW_FILE_NAME; 397 i2400m->bus_fw_name = I2400MU_FW_FILE_NAME;
398 i2400m->bus_bm_mac_addr_impaired = 0; 398 i2400m->bus_bm_mac_addr_impaired = 0;
399 399
400#ifdef CONFIG_PM
400 iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */ 401 iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */
401 device_init_wakeup(dev, 1); 402 device_init_wakeup(dev, 1);
402 usb_autopm_enable(i2400mu->usb_iface); 403 usb_autopm_enable(i2400mu->usb_iface);
403 usb_dev->autosuspend_delay = 15 * HZ; 404 usb_dev->autosuspend_delay = 15 * HZ;
404 usb_dev->autosuspend_disabled = 0; 405 usb_dev->autosuspend_disabled = 0;
406#endif
405 407
406 result = i2400m_setup(i2400m, I2400M_BRI_MAC_REINIT); 408 result = i2400m_setup(i2400m, I2400M_BRI_MAC_REINIT);
407 if (result < 0) { 409 if (result < 0) {
@@ -493,7 +495,9 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
493 int result = 0; 495 int result = 0;
494 struct device *dev = &iface->dev; 496 struct device *dev = &iface->dev;
495 struct i2400mu *i2400mu = usb_get_intfdata(iface); 497 struct i2400mu *i2400mu = usb_get_intfdata(iface);
498#ifdef CONFIG_PM
496 struct usb_device *usb_dev = i2400mu->usb_dev; 499 struct usb_device *usb_dev = i2400mu->usb_dev;
500#endif
497 struct i2400m *i2400m = &i2400mu->i2400m; 501 struct i2400m *i2400m = &i2400mu->i2400m;
498 502
499 d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event); 503 d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event);
@@ -503,11 +507,13 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
503 atomic_dec(&i2400mu->do_autopm); 507 atomic_dec(&i2400mu->do_autopm);
504 result = i2400m_cmd_enter_powersave(i2400m); 508 result = i2400m_cmd_enter_powersave(i2400m);
505 atomic_inc(&i2400mu->do_autopm); 509 atomic_inc(&i2400mu->do_autopm);
510#ifdef CONFIG_PM
506 if (result < 0 && usb_dev->auto_pm == 0) { 511 if (result < 0 && usb_dev->auto_pm == 0) {
507 /* System suspend, can't fail */ 512 /* System suspend, can't fail */
508 dev_err(dev, "failed to suspend, will reset on resume\n"); 513 dev_err(dev, "failed to suspend, will reset on resume\n");
509 result = 0; 514 result = 0;
510 } 515 }
516#endif
511 if (result < 0) 517 if (result < 0)
512 goto error_enter_powersave; 518 goto error_enter_powersave;
513 i2400mu_notification_release(i2400mu); 519 i2400mu_notification_release(i2400mu);
diff --git a/drivers/net/wireless/ath5k/dma.c b/drivers/net/wireless/ath5k/dma.c
index 7e2b1a67e5da..b65b4feb2d28 100644
--- a/drivers/net/wireless/ath5k/dma.c
+++ b/drivers/net/wireless/ath5k/dma.c
@@ -594,7 +594,7 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
594 * XXX: BMISS interrupts may occur after association. 594 * XXX: BMISS interrupts may occur after association.
595 * I found this on 5210 code but it needs testing. If this is 595 * I found this on 5210 code but it needs testing. If this is
596 * true we should disable them before assoc and re-enable them 596 * true we should disable them before assoc and re-enable them
597 * after a successfull assoc + some jiffies. 597 * after a successful assoc + some jiffies.
598 interrupt_mask &= ~AR5K_INT_BMISS; 598 interrupt_mask &= ~AR5K_INT_BMISS;
599 */ 599 */
600 } 600 }
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 9caa96a13586..a611ad857983 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -287,7 +287,7 @@ static void zd_op_stop(struct ieee80211_hw *hw)
287 * @skb - a sk-buffer 287 * @skb - a sk-buffer
288 * @flags: extra flags to set in the TX status info 288 * @flags: extra flags to set in the TX status info
289 * @ackssi: ACK signal strength 289 * @ackssi: ACK signal strength
290 * @success - True for successfull transmission of the frame 290 * @success - True for successful transmission of the frame
291 * 291 *
292 * This information calls ieee80211_tx_status_irqsafe() if required by the 292 * This information calls ieee80211_tx_status_irqsafe() if required by the
293 * control information. It copies the control information into the status 293 * control information. It copies the control information into the status
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 761635be9104..cd6184ee08ee 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1105,6 +1105,16 @@ static void xennet_uninit(struct net_device *dev)
1105 gnttab_free_grant_references(np->gref_rx_head); 1105 gnttab_free_grant_references(np->gref_rx_head);
1106} 1106}
1107 1107
1108static const struct net_device_ops xennet_netdev_ops = {
1109 .ndo_open = xennet_open,
1110 .ndo_uninit = xennet_uninit,
1111 .ndo_stop = xennet_close,
1112 .ndo_start_xmit = xennet_start_xmit,
1113 .ndo_change_mtu = xennet_change_mtu,
1114 .ndo_set_mac_address = eth_mac_addr,
1115 .ndo_validate_addr = eth_validate_addr,
1116};
1117
1108static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) 1118static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
1109{ 1119{
1110 int i, err; 1120 int i, err;
@@ -1161,12 +1171,9 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
1161 goto exit_free_tx; 1171 goto exit_free_tx;
1162 } 1172 }
1163 1173
1164 netdev->open = xennet_open; 1174 netdev->netdev_ops = &xennet_netdev_ops;
1165 netdev->hard_start_xmit = xennet_start_xmit; 1175
1166 netdev->stop = xennet_close;
1167 netif_napi_add(netdev, &np->napi, xennet_poll, 64); 1176 netif_napi_add(netdev, &np->napi, xennet_poll, 64);
1168 netdev->uninit = xennet_uninit;
1169 netdev->change_mtu = xennet_change_mtu;
1170 netdev->features = NETIF_F_IP_CSUM; 1177 netdev->features = NETIF_F_IP_CSUM;
1171 1178
1172 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); 1179 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index 65e8294a9e29..9da5a4b81133 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -1,11 +1,12 @@
1/** 1/**
2 * @file buffer_sync.c 2 * @file buffer_sync.c
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 * @author Barry Kasindorf 8 * @author Barry Kasindorf
9 * @author Robert Richter <robert.richter@amd.com>
9 * 10 *
10 * This is the core of the buffer management. Each 11 * This is the core of the buffer management. Each
11 * CPU buffer is processed and entered into the 12 * CPU buffer is processed and entered into the
@@ -315,88 +316,73 @@ static void add_trace_begin(void)
315 add_event_entry(TRACE_BEGIN_CODE); 316 add_event_entry(TRACE_BEGIN_CODE);
316} 317}
317 318
318#ifdef CONFIG_OPROFILE_IBS 319static void add_data(struct op_entry *entry, struct mm_struct *mm)
319
320#define IBS_FETCH_CODE_SIZE 2
321#define IBS_OP_CODE_SIZE 5
322
323/*
324 * Add IBS fetch and op entries to event buffer
325 */
326static void add_ibs_begin(int cpu, int code, struct mm_struct *mm)
327{ 320{
328 unsigned long rip; 321 unsigned long code, pc, val;
329 int i, count; 322 unsigned long cookie;
330 unsigned long ibs_cookie = 0;
331 off_t offset; 323 off_t offset;
332 struct op_sample *sample;
333
334 sample = cpu_buffer_read_entry(cpu);
335 if (!sample)
336 goto Error;
337 rip = sample->eip;
338 324
339#ifdef __LP64__ 325 if (!op_cpu_buffer_get_data(entry, &code))
340 rip += sample->event << 32; 326 return;
341#endif 327 if (!op_cpu_buffer_get_data(entry, &pc))
328 return;
329 if (!op_cpu_buffer_get_size(entry))
330 return;
342 331
343 if (mm) { 332 if (mm) {
344 ibs_cookie = lookup_dcookie(mm, rip, &offset); 333 cookie = lookup_dcookie(mm, pc, &offset);
345 334
346 if (ibs_cookie == NO_COOKIE) 335 if (cookie == NO_COOKIE)
347 offset = rip; 336 offset = pc;
348 if (ibs_cookie == INVALID_COOKIE) { 337 if (cookie == INVALID_COOKIE) {
349 atomic_inc(&oprofile_stats.sample_lost_no_mapping); 338 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
350 offset = rip; 339 offset = pc;
351 } 340 }
352 if (ibs_cookie != last_cookie) { 341 if (cookie != last_cookie) {
353 add_cookie_switch(ibs_cookie); 342 add_cookie_switch(cookie);
354 last_cookie = ibs_cookie; 343 last_cookie = cookie;
355 } 344 }
356 } else 345 } else
357 offset = rip; 346 offset = pc;
358 347
359 add_event_entry(ESCAPE_CODE); 348 add_event_entry(ESCAPE_CODE);
360 add_event_entry(code); 349 add_event_entry(code);
361 add_event_entry(offset); /* Offset from Dcookie */ 350 add_event_entry(offset); /* Offset from Dcookie */
362 351
363 /* we send the Dcookie offset, but send the raw Linear Add also*/ 352 while (op_cpu_buffer_get_data(entry, &val))
364 add_event_entry(sample->eip); 353 add_event_entry(val);
365 add_event_entry(sample->event);
366
367 if (code == IBS_FETCH_CODE)
368 count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/
369 else
370 count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/
371
372 for (i = 0; i < count; i++) {
373 sample = cpu_buffer_read_entry(cpu);
374 if (!sample)
375 goto Error;
376 add_event_entry(sample->eip);
377 add_event_entry(sample->event);
378 }
379
380 return;
381
382Error:
383 return;
384} 354}
385 355
386#endif 356static inline void add_sample_entry(unsigned long offset, unsigned long event)
387
388static void add_sample_entry(unsigned long offset, unsigned long event)
389{ 357{
390 add_event_entry(offset); 358 add_event_entry(offset);
391 add_event_entry(event); 359 add_event_entry(event);
392} 360}
393 361
394 362
395static int add_us_sample(struct mm_struct *mm, struct op_sample *s) 363/*
364 * Add a sample to the global event buffer. If possible the
365 * sample is converted into a persistent dentry/offset pair
366 * for later lookup from userspace. Return 0 on failure.
367 */
368static int
369add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
396{ 370{
397 unsigned long cookie; 371 unsigned long cookie;
398 off_t offset; 372 off_t offset;
399 373
374 if (in_kernel) {
375 add_sample_entry(s->eip, s->event);
376 return 1;
377 }
378
379 /* add userspace sample */
380
381 if (!mm) {
382 atomic_inc(&oprofile_stats.sample_lost_no_mm);
383 return 0;
384 }
385
400 cookie = lookup_dcookie(mm, s->eip, &offset); 386 cookie = lookup_dcookie(mm, s->eip, &offset);
401 387
402 if (cookie == INVALID_COOKIE) { 388 if (cookie == INVALID_COOKIE) {
@@ -415,25 +401,6 @@ static int add_us_sample(struct mm_struct *mm, struct op_sample *s)
415} 401}
416 402
417 403
418/* Add a sample to the global event buffer. If possible the
419 * sample is converted into a persistent dentry/offset pair
420 * for later lookup from userspace.
421 */
422static int
423add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
424{
425 if (in_kernel) {
426 add_sample_entry(s->eip, s->event);
427 return 1;
428 } else if (mm) {
429 return add_us_sample(mm, s);
430 } else {
431 atomic_inc(&oprofile_stats.sample_lost_no_mm);
432 }
433 return 0;
434}
435
436
437static void release_mm(struct mm_struct *mm) 404static void release_mm(struct mm_struct *mm)
438{ 405{
439 if (!mm) 406 if (!mm)
@@ -526,66 +493,69 @@ void sync_buffer(int cpu)
526{ 493{
527 struct mm_struct *mm = NULL; 494 struct mm_struct *mm = NULL;
528 struct mm_struct *oldmm; 495 struct mm_struct *oldmm;
496 unsigned long val;
529 struct task_struct *new; 497 struct task_struct *new;
530 unsigned long cookie = 0; 498 unsigned long cookie = 0;
531 int in_kernel = 1; 499 int in_kernel = 1;
532 sync_buffer_state state = sb_buffer_start; 500 sync_buffer_state state = sb_buffer_start;
533 unsigned int i; 501 unsigned int i;
534 unsigned long available; 502 unsigned long available;
503 unsigned long flags;
504 struct op_entry entry;
505 struct op_sample *sample;
535 506
536 mutex_lock(&buffer_mutex); 507 mutex_lock(&buffer_mutex);
537 508
538 add_cpu_switch(cpu); 509 add_cpu_switch(cpu);
539 510
540 cpu_buffer_reset(cpu); 511 op_cpu_buffer_reset(cpu);
541 available = cpu_buffer_entries(cpu); 512 available = op_cpu_buffer_entries(cpu);
542 513
543 for (i = 0; i < available; ++i) { 514 for (i = 0; i < available; ++i) {
544 struct op_sample *s = cpu_buffer_read_entry(cpu); 515 sample = op_cpu_buffer_read_entry(&entry, cpu);
545 if (!s) 516 if (!sample)
546 break; 517 break;
547 518
548 if (is_code(s->eip)) { 519 if (is_code(sample->eip)) {
549 switch (s->event) { 520 flags = sample->event;
550 case 0: 521 if (flags & TRACE_BEGIN) {
551 case CPU_IS_KERNEL: 522 state = sb_bt_start;
523 add_trace_begin();
524 }
525 if (flags & KERNEL_CTX_SWITCH) {
552 /* kernel/userspace switch */ 526 /* kernel/userspace switch */
553 in_kernel = s->event; 527 in_kernel = flags & IS_KERNEL;
554 if (state == sb_buffer_start) 528 if (state == sb_buffer_start)
555 state = sb_sample_start; 529 state = sb_sample_start;
556 add_kernel_ctx_switch(s->event); 530 add_kernel_ctx_switch(flags & IS_KERNEL);
557 break; 531 }
558 case CPU_TRACE_BEGIN: 532 if (flags & USER_CTX_SWITCH
559 state = sb_bt_start; 533 && op_cpu_buffer_get_data(&entry, &val)) {
560 add_trace_begin();
561 break;
562#ifdef CONFIG_OPROFILE_IBS
563 case IBS_FETCH_BEGIN:
564 state = sb_bt_start;
565 add_ibs_begin(cpu, IBS_FETCH_CODE, mm);
566 break;
567 case IBS_OP_BEGIN:
568 state = sb_bt_start;
569 add_ibs_begin(cpu, IBS_OP_CODE, mm);
570 break;
571#endif
572 default:
573 /* userspace context switch */ 534 /* userspace context switch */
535 new = (struct task_struct *)val;
574 oldmm = mm; 536 oldmm = mm;
575 new = (struct task_struct *)s->event;
576 release_mm(oldmm); 537 release_mm(oldmm);
577 mm = take_tasks_mm(new); 538 mm = take_tasks_mm(new);
578 if (mm != oldmm) 539 if (mm != oldmm)
579 cookie = get_exec_dcookie(mm); 540 cookie = get_exec_dcookie(mm);
580 add_user_ctx_switch(new, cookie); 541 add_user_ctx_switch(new, cookie);
581 break;
582 }
583 } else if (state >= sb_bt_start &&
584 !add_sample(mm, s, in_kernel)) {
585 if (state == sb_bt_start) {
586 state = sb_bt_ignore;
587 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
588 } 542 }
543 if (op_cpu_buffer_get_size(&entry))
544 add_data(&entry, mm);
545 continue;
546 }
547
548 if (state < sb_bt_start)
549 /* ignore sample */
550 continue;
551
552 if (add_sample(mm, sample, in_kernel))
553 continue;
554
555 /* ignore backtraces if failed to add a sample */
556 if (state == sb_bt_start) {
557 state = sb_bt_ignore;
558 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
589 } 559 }
590 } 560 }
591 release_mm(mm); 561 release_mm(mm);
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 61090969158f..2e03b6d796d3 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -1,11 +1,12 @@
1/** 1/**
2 * @file cpu_buffer.c 2 * @file cpu_buffer.c
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 * @author Barry Kasindorf <barry.kasindorf@amd.com> 8 * @author Barry Kasindorf <barry.kasindorf@amd.com>
9 * @author Robert Richter <robert.richter@amd.com>
9 * 10 *
10 * Each CPU has a local buffer that stores PC value/event 11 * Each CPU has a local buffer that stores PC value/event
11 * pairs. We also log context switches when we notice them. 12 * pairs. We also log context switches when we notice them.
@@ -45,8 +46,8 @@
45 * can be changed to a single buffer solution when the ring buffer 46 * can be changed to a single buffer solution when the ring buffer
46 * access is implemented as non-locking atomic code. 47 * access is implemented as non-locking atomic code.
47 */ 48 */
48struct ring_buffer *op_ring_buffer_read; 49static struct ring_buffer *op_ring_buffer_read;
49struct ring_buffer *op_ring_buffer_write; 50static struct ring_buffer *op_ring_buffer_write;
50DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); 51DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
51 52
52static void wq_sync_buffer(struct work_struct *work); 53static void wq_sync_buffer(struct work_struct *work);
@@ -54,19 +55,9 @@ static void wq_sync_buffer(struct work_struct *work);
54#define DEFAULT_TIMER_EXPIRE (HZ / 10) 55#define DEFAULT_TIMER_EXPIRE (HZ / 10)
55static int work_enabled; 56static int work_enabled;
56 57
57void free_cpu_buffers(void)
58{
59 if (op_ring_buffer_read)
60 ring_buffer_free(op_ring_buffer_read);
61 op_ring_buffer_read = NULL;
62 if (op_ring_buffer_write)
63 ring_buffer_free(op_ring_buffer_write);
64 op_ring_buffer_write = NULL;
65}
66
67unsigned long oprofile_get_cpu_buffer_size(void) 58unsigned long oprofile_get_cpu_buffer_size(void)
68{ 59{
69 return fs_cpu_buffer_size; 60 return oprofile_cpu_buffer_size;
70} 61}
71 62
72void oprofile_cpu_buffer_inc_smpl_lost(void) 63void oprofile_cpu_buffer_inc_smpl_lost(void)
@@ -77,11 +68,21 @@ void oprofile_cpu_buffer_inc_smpl_lost(void)
77 cpu_buf->sample_lost_overflow++; 68 cpu_buf->sample_lost_overflow++;
78} 69}
79 70
71void free_cpu_buffers(void)
72{
73 if (op_ring_buffer_read)
74 ring_buffer_free(op_ring_buffer_read);
75 op_ring_buffer_read = NULL;
76 if (op_ring_buffer_write)
77 ring_buffer_free(op_ring_buffer_write);
78 op_ring_buffer_write = NULL;
79}
80
80int alloc_cpu_buffers(void) 81int alloc_cpu_buffers(void)
81{ 82{
82 int i; 83 int i;
83 84
84 unsigned long buffer_size = fs_cpu_buffer_size; 85 unsigned long buffer_size = oprofile_cpu_buffer_size;
85 86
86 op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); 87 op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
87 if (!op_ring_buffer_read) 88 if (!op_ring_buffer_read)
@@ -97,8 +98,6 @@ int alloc_cpu_buffers(void)
97 b->last_is_kernel = -1; 98 b->last_is_kernel = -1;
98 b->tracing = 0; 99 b->tracing = 0;
99 b->buffer_size = buffer_size; 100 b->buffer_size = buffer_size;
100 b->tail_pos = 0;
101 b->head_pos = 0;
102 b->sample_received = 0; 101 b->sample_received = 0;
103 b->sample_lost_overflow = 0; 102 b->sample_lost_overflow = 0;
104 b->backtrace_aborted = 0; 103 b->backtrace_aborted = 0;
@@ -145,47 +144,156 @@ void end_cpu_work(void)
145 flush_scheduled_work(); 144 flush_scheduled_work();
146} 145}
147 146
148static inline int 147/*
149add_sample(struct oprofile_cpu_buffer *cpu_buf, 148 * This function prepares the cpu buffer to write a sample.
150 unsigned long pc, unsigned long event) 149 *
150 * Struct op_entry is used during operations on the ring buffer while
151 * struct op_sample contains the data that is stored in the ring
152 * buffer. Struct entry can be uninitialized. The function reserves a
153 * data array that is specified by size. Use
154 * op_cpu_buffer_write_commit() after preparing the sample. In case of
155 * errors a null pointer is returned, otherwise the pointer to the
156 * sample.
157 *
158 */
159struct op_sample
160*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
161{
162 entry->event = ring_buffer_lock_reserve
163 (op_ring_buffer_write, sizeof(struct op_sample) +
164 size * sizeof(entry->sample->data[0]), &entry->irq_flags);
165 if (entry->event)
166 entry->sample = ring_buffer_event_data(entry->event);
167 else
168 entry->sample = NULL;
169
170 if (!entry->sample)
171 return NULL;
172
173 entry->size = size;
174 entry->data = entry->sample->data;
175
176 return entry->sample;
177}
178
179int op_cpu_buffer_write_commit(struct op_entry *entry)
180{
181 return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
182 entry->irq_flags);
183}
184
185struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
186{
187 struct ring_buffer_event *e;
188 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
189 if (e)
190 goto event;
191 if (ring_buffer_swap_cpu(op_ring_buffer_read,
192 op_ring_buffer_write,
193 cpu))
194 return NULL;
195 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
196 if (e)
197 goto event;
198 return NULL;
199
200event:
201 entry->event = e;
202 entry->sample = ring_buffer_event_data(e);
203 entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
204 / sizeof(entry->sample->data[0]);
205 entry->data = entry->sample->data;
206 return entry->sample;
207}
208
209unsigned long op_cpu_buffer_entries(int cpu)
210{
211 return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
212 + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
213}
214
215static int
216op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace,
217 int is_kernel, struct task_struct *task)
151{ 218{
152 struct op_entry entry; 219 struct op_entry entry;
153 int ret; 220 struct op_sample *sample;
221 unsigned long flags;
222 int size;
223
224 flags = 0;
225
226 if (backtrace)
227 flags |= TRACE_BEGIN;
228
229 /* notice a switch from user->kernel or vice versa */
230 is_kernel = !!is_kernel;
231 if (cpu_buf->last_is_kernel != is_kernel) {
232 cpu_buf->last_is_kernel = is_kernel;
233 flags |= KERNEL_CTX_SWITCH;
234 if (is_kernel)
235 flags |= IS_KERNEL;
236 }
237
238 /* notice a task switch */
239 if (cpu_buf->last_task != task) {
240 cpu_buf->last_task = task;
241 flags |= USER_CTX_SWITCH;
242 }
243
244 if (!flags)
245 /* nothing to do */
246 return 0;
247
248 if (flags & USER_CTX_SWITCH)
249 size = 1;
250 else
251 size = 0;
252
253 sample = op_cpu_buffer_write_reserve(&entry, size);
254 if (!sample)
255 return -ENOMEM;
154 256
155 ret = cpu_buffer_write_entry(&entry); 257 sample->eip = ESCAPE_CODE;
156 if (ret) 258 sample->event = flags;
157 return ret;
158 259
159 entry.sample->eip = pc; 260 if (size)
160 entry.sample->event = event; 261 op_cpu_buffer_add_data(&entry, (unsigned long)task);
161 262
162 ret = cpu_buffer_write_commit(&entry); 263 op_cpu_buffer_write_commit(&entry);
163 if (ret)
164 return ret;
165 264
166 return 0; 265 return 0;
167} 266}
168 267
169static inline int 268static inline int
170add_code(struct oprofile_cpu_buffer *buffer, unsigned long value) 269op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
270 unsigned long pc, unsigned long event)
171{ 271{
172 return add_sample(buffer, ESCAPE_CODE, value); 272 struct op_entry entry;
273 struct op_sample *sample;
274
275 sample = op_cpu_buffer_write_reserve(&entry, 0);
276 if (!sample)
277 return -ENOMEM;
278
279 sample->eip = pc;
280 sample->event = event;
281
282 return op_cpu_buffer_write_commit(&entry);
173} 283}
174 284
175/* This must be safe from any context. It's safe writing here 285/*
176 * because of the head/tail separation of the writer and reader 286 * This must be safe from any context.
177 * of the CPU buffer.
178 * 287 *
179 * is_kernel is needed because on some architectures you cannot 288 * is_kernel is needed because on some architectures you cannot
180 * tell if you are in kernel or user space simply by looking at 289 * tell if you are in kernel or user space simply by looking at
181 * pc. We tag this in the buffer by generating kernel enter/exit 290 * pc. We tag this in the buffer by generating kernel enter/exit
182 * events whenever is_kernel changes 291 * events whenever is_kernel changes
183 */ 292 */
184static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, 293static int
185 int is_kernel, unsigned long event) 294log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
295 unsigned long backtrace, int is_kernel, unsigned long event)
186{ 296{
187 struct task_struct *task;
188
189 cpu_buf->sample_received++; 297 cpu_buf->sample_received++;
190 298
191 if (pc == ESCAPE_CODE) { 299 if (pc == ESCAPE_CODE) {
@@ -193,25 +301,10 @@ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
193 return 0; 301 return 0;
194 } 302 }
195 303
196 is_kernel = !!is_kernel; 304 if (op_add_code(cpu_buf, backtrace, is_kernel, current))
197 305 goto fail;
198 task = current;
199
200 /* notice a switch from user->kernel or vice versa */
201 if (cpu_buf->last_is_kernel != is_kernel) {
202 cpu_buf->last_is_kernel = is_kernel;
203 if (add_code(cpu_buf, is_kernel))
204 goto fail;
205 }
206
207 /* notice a task switch */
208 if (cpu_buf->last_task != task) {
209 cpu_buf->last_task = task;
210 if (add_code(cpu_buf, (unsigned long)task))
211 goto fail;
212 }
213 306
214 if (add_sample(cpu_buf, pc, event)) 307 if (op_add_sample(cpu_buf, pc, event))
215 goto fail; 308 goto fail;
216 309
217 return 1; 310 return 1;
@@ -221,109 +314,102 @@ fail:
221 return 0; 314 return 0;
222} 315}
223 316
224static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) 317static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
225{ 318{
226 add_code(cpu_buf, CPU_TRACE_BEGIN);
227 cpu_buf->tracing = 1; 319 cpu_buf->tracing = 1;
228 return 1;
229} 320}
230 321
231static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) 322static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
232{ 323{
233 cpu_buf->tracing = 0; 324 cpu_buf->tracing = 0;
234} 325}
235 326
236void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, 327static inline void
237 unsigned long event, int is_kernel) 328__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
329 unsigned long event, int is_kernel)
238{ 330{
239 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 331 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
240 332 unsigned long backtrace = oprofile_backtrace_depth;
241 if (!backtrace_depth) {
242 log_sample(cpu_buf, pc, is_kernel, event);
243 return;
244 }
245
246 if (!oprofile_begin_trace(cpu_buf))
247 return;
248 333
249 /* 334 /*
250 * if log_sample() fail we can't backtrace since we lost the 335 * if log_sample() fail we can't backtrace since we lost the
251 * source of this event 336 * source of this event
252 */ 337 */
253 if (log_sample(cpu_buf, pc, is_kernel, event)) 338 if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event))
254 oprofile_ops.backtrace(regs, backtrace_depth); 339 /* failed */
340 return;
341
342 if (!backtrace)
343 return;
344
345 oprofile_begin_trace(cpu_buf);
346 oprofile_ops.backtrace(regs, backtrace);
255 oprofile_end_trace(cpu_buf); 347 oprofile_end_trace(cpu_buf);
256} 348}
257 349
350void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
351 unsigned long event, int is_kernel)
352{
353 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
354}
355
258void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) 356void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
259{ 357{
260 int is_kernel = !user_mode(regs); 358 int is_kernel = !user_mode(regs);
261 unsigned long pc = profile_pc(regs); 359 unsigned long pc = profile_pc(regs);
262 360
263 oprofile_add_ext_sample(pc, regs, event, is_kernel); 361 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
264} 362}
265 363
266#ifdef CONFIG_OPROFILE_IBS 364/*
267 365 * Add samples with data to the ring buffer.
268#define MAX_IBS_SAMPLE_SIZE 14 366 *
269 367 * Use oprofile_add_data(&entry, val) to add data and
270void oprofile_add_ibs_sample(struct pt_regs * const regs, 368 * oprofile_write_commit(&entry) to commit the sample.
271 unsigned int * const ibs_sample, int ibs_code) 369 */
370void
371oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
372 unsigned long pc, int code, int size)
272{ 373{
374 struct op_sample *sample;
273 int is_kernel = !user_mode(regs); 375 int is_kernel = !user_mode(regs);
274 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 376 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
275 struct task_struct *task;
276 int fail = 0;
277 377
278 cpu_buf->sample_received++; 378 cpu_buf->sample_received++;
279 379
280 /* notice a switch from user->kernel or vice versa */ 380 /* no backtraces for samples with data */
281 if (cpu_buf->last_is_kernel != is_kernel) { 381 if (op_add_code(cpu_buf, 0, is_kernel, current))
282 if (add_code(cpu_buf, is_kernel)) 382 goto fail;
283 goto fail;
284 cpu_buf->last_is_kernel = is_kernel;
285 }
286
287 /* notice a task switch */
288 if (!is_kernel) {
289 task = current;
290 if (cpu_buf->last_task != task) {
291 if (add_code(cpu_buf, (unsigned long)task))
292 goto fail;
293 cpu_buf->last_task = task;
294 }
295 }
296
297 fail = fail || add_code(cpu_buf, ibs_code);
298 fail = fail || add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
299 fail = fail || add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
300 fail = fail || add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
301
302 if (ibs_code == IBS_OP_BEGIN) {
303 fail = fail || add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
304 fail = fail || add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
305 fail = fail || add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
306 }
307 383
308 if (fail) 384 sample = op_cpu_buffer_write_reserve(entry, size + 2);
385 if (!sample)
309 goto fail; 386 goto fail;
387 sample->eip = ESCAPE_CODE;
388 sample->event = 0; /* no flags */
310 389
311 if (backtrace_depth) 390 op_cpu_buffer_add_data(entry, code);
312 oprofile_ops.backtrace(regs, backtrace_depth); 391 op_cpu_buffer_add_data(entry, pc);
313 392
314 return; 393 return;
315 394
316fail: 395fail:
317 cpu_buf->sample_lost_overflow++; 396 cpu_buf->sample_lost_overflow++;
318 return;
319} 397}
320 398
321#endif 399int oprofile_add_data(struct op_entry *entry, unsigned long val)
400{
401 return op_cpu_buffer_add_data(entry, val);
402}
403
404int oprofile_write_commit(struct op_entry *entry)
405{
406 return op_cpu_buffer_write_commit(entry);
407}
322 408
323void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) 409void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
324{ 410{
325 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 411 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
326 log_sample(cpu_buf, pc, is_kernel, event); 412 log_sample(cpu_buf, pc, 0, is_kernel, event);
327} 413}
328 414
329void oprofile_add_trace(unsigned long pc) 415void oprofile_add_trace(unsigned long pc)
@@ -340,7 +426,7 @@ void oprofile_add_trace(unsigned long pc)
340 if (pc == ESCAPE_CODE) 426 if (pc == ESCAPE_CODE)
341 goto fail; 427 goto fail;
342 428
343 if (add_sample(cpu_buf, pc, 0)) 429 if (op_add_sample(cpu_buf, pc, 0))
344 goto fail; 430 goto fail;
345 431
346 return; 432 return;
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index aacb0f0bc566..63f81c44846a 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -1,10 +1,11 @@
1/** 1/**
2 * @file cpu_buffer.h 2 * @file cpu_buffer.h
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 * @author Robert Richter <robert.richter@amd.com>
8 */ 9 */
9 10
10#ifndef OPROFILE_CPU_BUFFER_H 11#ifndef OPROFILE_CPU_BUFFER_H
@@ -31,17 +32,12 @@ void end_cpu_work(void);
31struct op_sample { 32struct op_sample {
32 unsigned long eip; 33 unsigned long eip;
33 unsigned long event; 34 unsigned long event;
35 unsigned long data[0];
34}; 36};
35 37
36struct op_entry { 38struct op_entry;
37 struct ring_buffer_event *event;
38 struct op_sample *sample;
39 unsigned long irq_flags;
40};
41 39
42struct oprofile_cpu_buffer { 40struct oprofile_cpu_buffer {
43 volatile unsigned long head_pos;
44 volatile unsigned long tail_pos;
45 unsigned long buffer_size; 41 unsigned long buffer_size;
46 struct task_struct *last_task; 42 struct task_struct *last_task;
47 int last_is_kernel; 43 int last_is_kernel;
@@ -54,8 +50,6 @@ struct oprofile_cpu_buffer {
54 struct delayed_work work; 50 struct delayed_work work;
55}; 51};
56 52
57extern struct ring_buffer *op_ring_buffer_read;
58extern struct ring_buffer *op_ring_buffer_write;
59DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); 53DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
60 54
61/* 55/*
@@ -64,7 +58,7 @@ DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
64 * reset these to invalid values; the next sample collected will 58 * reset these to invalid values; the next sample collected will
65 * populate the buffer with proper values to initialize the buffer 59 * populate the buffer with proper values to initialize the buffer
66 */ 60 */
67static inline void cpu_buffer_reset(int cpu) 61static inline void op_cpu_buffer_reset(int cpu)
68{ 62{
69 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu); 63 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
70 64
@@ -72,55 +66,48 @@ static inline void cpu_buffer_reset(int cpu)
72 cpu_buf->last_task = NULL; 66 cpu_buf->last_task = NULL;
73} 67}
74 68
75static inline int cpu_buffer_write_entry(struct op_entry *entry) 69struct op_sample
76{ 70*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
77 entry->event = ring_buffer_lock_reserve(op_ring_buffer_write, 71int op_cpu_buffer_write_commit(struct op_entry *entry);
78 sizeof(struct op_sample), 72struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu);
79 &entry->irq_flags); 73unsigned long op_cpu_buffer_entries(int cpu);
80 if (entry->event)
81 entry->sample = ring_buffer_event_data(entry->event);
82 else
83 entry->sample = NULL;
84
85 if (!entry->sample)
86 return -ENOMEM;
87
88 return 0;
89}
90 74
91static inline int cpu_buffer_write_commit(struct op_entry *entry) 75/* returns the remaining free size of data in the entry */
76static inline
77int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val)
92{ 78{
93 return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event, 79 if (!entry->size)
94 entry->irq_flags); 80 return 0;
81 *entry->data = val;
82 entry->size--;
83 entry->data++;
84 return entry->size;
95} 85}
96 86
97static inline struct op_sample *cpu_buffer_read_entry(int cpu) 87/* returns the size of data in the entry */
88static inline
89int op_cpu_buffer_get_size(struct op_entry *entry)
98{ 90{
99 struct ring_buffer_event *e; 91 return entry->size;
100 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
101 if (e)
102 return ring_buffer_event_data(e);
103 if (ring_buffer_swap_cpu(op_ring_buffer_read,
104 op_ring_buffer_write,
105 cpu))
106 return NULL;
107 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
108 if (e)
109 return ring_buffer_event_data(e);
110 return NULL;
111} 92}
112 93
113/* "acquire" as many cpu buffer slots as we can */ 94/* returns 0 if empty or the size of data including the current value */
114static inline unsigned long cpu_buffer_entries(int cpu) 95static inline
96int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val)
115{ 97{
116 return ring_buffer_entries_cpu(op_ring_buffer_read, cpu) 98 int size = entry->size;
117 + ring_buffer_entries_cpu(op_ring_buffer_write, cpu); 99 if (!size)
100 return 0;
101 *val = *entry->data;
102 entry->size--;
103 entry->data++;
104 return size;
118} 105}
119 106
120/* transient events for the CPU buffer -> event buffer */ 107/* extra data flags */
121#define CPU_IS_KERNEL 1 108#define KERNEL_CTX_SWITCH (1UL << 0)
122#define CPU_TRACE_BEGIN 2 109#define IS_KERNEL (1UL << 1)
123#define IBS_FETCH_BEGIN 3 110#define TRACE_BEGIN (1UL << 2)
124#define IBS_OP_BEGIN 4 111#define USER_CTX_SWITCH (1UL << 3)
125 112
126#endif /* OPROFILE_CPU_BUFFER_H */ 113#endif /* OPROFILE_CPU_BUFFER_H */
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index 191a3202cecc..2b7ae366ceb1 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -73,8 +73,8 @@ int alloc_event_buffer(void)
73 unsigned long flags; 73 unsigned long flags;
74 74
75 spin_lock_irqsave(&oprofilefs_lock, flags); 75 spin_lock_irqsave(&oprofilefs_lock, flags);
76 buffer_size = fs_buffer_size; 76 buffer_size = oprofile_buffer_size;
77 buffer_watershed = fs_buffer_watershed; 77 buffer_watershed = oprofile_buffer_watershed;
78 spin_unlock_irqrestore(&oprofilefs_lock, flags); 78 spin_unlock_irqrestore(&oprofilefs_lock, flags);
79 79
80 if (buffer_watershed >= buffer_size) 80 if (buffer_watershed >= buffer_size)
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index cd375907f26f..3cffce90f82a 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -23,7 +23,7 @@
23struct oprofile_operations oprofile_ops; 23struct oprofile_operations oprofile_ops;
24 24
25unsigned long oprofile_started; 25unsigned long oprofile_started;
26unsigned long backtrace_depth; 26unsigned long oprofile_backtrace_depth;
27static unsigned long is_setup; 27static unsigned long is_setup;
28static DEFINE_MUTEX(start_mutex); 28static DEFINE_MUTEX(start_mutex);
29 29
@@ -172,7 +172,7 @@ int oprofile_set_backtrace(unsigned long val)
172 goto out; 172 goto out;
173 } 173 }
174 174
175 backtrace_depth = val; 175 oprofile_backtrace_depth = val;
176 176
177out: 177out:
178 mutex_unlock(&start_mutex); 178 mutex_unlock(&start_mutex);
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index 5df0c21a608f..c288d3c24b50 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -21,12 +21,12 @@ void oprofile_stop(void);
21 21
22struct oprofile_operations; 22struct oprofile_operations;
23 23
24extern unsigned long fs_buffer_size; 24extern unsigned long oprofile_buffer_size;
25extern unsigned long fs_cpu_buffer_size; 25extern unsigned long oprofile_cpu_buffer_size;
26extern unsigned long fs_buffer_watershed; 26extern unsigned long oprofile_buffer_watershed;
27extern struct oprofile_operations oprofile_ops; 27extern struct oprofile_operations oprofile_ops;
28extern unsigned long oprofile_started; 28extern unsigned long oprofile_started;
29extern unsigned long backtrace_depth; 29extern unsigned long oprofile_backtrace_depth;
30 30
31struct super_block; 31struct super_block;
32struct dentry; 32struct dentry;
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index d8201998b0b7..5d36ffc30dd5 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -14,17 +14,18 @@
14#include "oprofile_stats.h" 14#include "oprofile_stats.h"
15#include "oprof.h" 15#include "oprof.h"
16 16
17#define FS_BUFFER_SIZE_DEFAULT 131072 17#define BUFFER_SIZE_DEFAULT 131072
18#define FS_CPU_BUFFER_SIZE_DEFAULT 8192 18#define CPU_BUFFER_SIZE_DEFAULT 8192
19#define FS_BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */ 19#define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */
20 20
21unsigned long fs_buffer_size; 21unsigned long oprofile_buffer_size;
22unsigned long fs_cpu_buffer_size; 22unsigned long oprofile_cpu_buffer_size;
23unsigned long fs_buffer_watershed; 23unsigned long oprofile_buffer_watershed;
24 24
25static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset) 25static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
26{ 26{
27 return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset); 27 return oprofilefs_ulong_to_user(oprofile_backtrace_depth, buf, count,
28 offset);
28} 29}
29 30
30 31
@@ -125,16 +126,16 @@ static const struct file_operations dump_fops = {
125void oprofile_create_files(struct super_block *sb, struct dentry *root) 126void oprofile_create_files(struct super_block *sb, struct dentry *root)
126{ 127{
127 /* reinitialize default values */ 128 /* reinitialize default values */
128 fs_buffer_size = FS_BUFFER_SIZE_DEFAULT; 129 oprofile_buffer_size = BUFFER_SIZE_DEFAULT;
129 fs_cpu_buffer_size = FS_CPU_BUFFER_SIZE_DEFAULT; 130 oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT;
130 fs_buffer_watershed = FS_BUFFER_WATERSHED_DEFAULT; 131 oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT;
131 132
132 oprofilefs_create_file(sb, root, "enable", &enable_fops); 133 oprofilefs_create_file(sb, root, "enable", &enable_fops);
133 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); 134 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
134 oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops); 135 oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
135 oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size); 136 oprofilefs_create_ulong(sb, root, "buffer_size", &oprofile_buffer_size);
136 oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed); 137 oprofilefs_create_ulong(sb, root, "buffer_watershed", &oprofile_buffer_watershed);
137 oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size); 138 oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &oprofile_cpu_buffer_size);
138 oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); 139 oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
139 oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); 140 oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
140 oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); 141 oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
diff --git a/drivers/parisc/asp.c b/drivers/parisc/asp.c
index 821369135369..7931133526c4 100644
--- a/drivers/parisc/asp.c
+++ b/drivers/parisc/asp.c
@@ -71,8 +71,7 @@ static void asp_choose_irq(struct parisc_device *dev, void *ctrl)
71 */ 71 */
72#define ASP_INTERRUPT_ADDR 0xf0800000 72#define ASP_INTERRUPT_ADDR 0xf0800000
73 73
74int __init 74static int __init asp_init_chip(struct parisc_device *dev)
75asp_init_chip(struct parisc_device *dev)
76{ 75{
77 struct gsc_irq gsc_irq; 76 struct gsc_irq gsc_irq;
78 int ret; 77 int ret;
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index dcc1e9958d2f..cd4dd7ed2c06 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -555,7 +555,7 @@ static u32 hint_lookup[] = {
555 * (Load Coherence Index) instruction. The 8 bits used for the virtual 555 * (Load Coherence Index) instruction. The 8 bits used for the virtual
556 * index are bits 12:19 of the value returned by LCI. 556 * index are bits 12:19 of the value returned by LCI.
557 */ 557 */
558void CCIO_INLINE 558static void CCIO_INLINE
559ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, 559ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
560 unsigned long hints) 560 unsigned long hints)
561{ 561{
@@ -1578,8 +1578,6 @@ static int __init ccio_probe(struct parisc_device *dev)
1578 1578
1579 ioc_count++; 1579 ioc_count++;
1580 1580
1581 parisc_vmerge_boundary = IOVP_SIZE;
1582 parisc_vmerge_max_size = BITS_PER_LONG * IOVP_SIZE;
1583 parisc_has_iommu(); 1581 parisc_has_iommu();
1584 return 0; 1582 return 0;
1585} 1583}
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 77cc8bfef8c9..d539d9df88e7 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -287,7 +287,7 @@ DINO_PORT_OUT(b, 8, 3)
287DINO_PORT_OUT(w, 16, 2) 287DINO_PORT_OUT(w, 16, 2)
288DINO_PORT_OUT(l, 32, 0) 288DINO_PORT_OUT(l, 32, 0)
289 289
290struct pci_port_ops dino_port_ops = { 290static struct pci_port_ops dino_port_ops = {
291 .inb = dino_in8, 291 .inb = dino_in8,
292 .inw = dino_in16, 292 .inw = dino_in16,
293 .inl = dino_in32, 293 .inl = dino_in32,
@@ -690,7 +690,7 @@ dino_fixup_bus(struct pci_bus *bus)
690} 690}
691 691
692 692
693struct pci_bios_ops dino_bios_ops = { 693static struct pci_bios_ops dino_bios_ops = {
694 .init = dino_bios_init, 694 .init = dino_bios_init,
695 .fixup_bus = dino_fixup_bus 695 .fixup_bus = dino_fixup_bus
696}; 696};
diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c
index 65eee67aa2ae..13856415b432 100644
--- a/drivers/parisc/hppb.c
+++ b/drivers/parisc/hppb.c
@@ -29,7 +29,7 @@ struct hppb_card {
29 struct hppb_card *next; 29 struct hppb_card *next;
30}; 30};
31 31
32struct hppb_card hppb_card_head = { 32static struct hppb_card hppb_card_head = {
33 .hpa = 0, 33 .hpa = 0,
34 .next = NULL, 34 .next = NULL,
35}; 35};
diff --git a/drivers/parisc/lasi.c b/drivers/parisc/lasi.c
index bee510098ce8..e65727ca9fc0 100644
--- a/drivers/parisc/lasi.c
+++ b/drivers/parisc/lasi.c
@@ -107,7 +107,7 @@ lasi_init_irq(struct gsc_asic *this_lasi)
107 107
108#else 108#else
109 109
110void __init lasi_led_init(unsigned long lasi_hpa) 110static void __init lasi_led_init(unsigned long lasi_hpa)
111{ 111{
112 unsigned long datareg; 112 unsigned long datareg;
113 113
@@ -163,8 +163,7 @@ static void lasi_power_off(void)
163 gsc_writel(0x02, datareg); 163 gsc_writel(0x02, datareg);
164} 164}
165 165
166int __init 166static int __init lasi_init_chip(struct parisc_device *dev)
167lasi_init_chip(struct parisc_device *dev)
168{ 167{
169 extern void (*chassis_power_off)(void); 168 extern void (*chassis_power_off)(void);
170 struct gsc_asic *lasi; 169 struct gsc_asic *lasi;
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index a28c8946deaa..d8233de8c75d 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -824,7 +824,7 @@ lba_fixup_bus(struct pci_bus *bus)
824} 824}
825 825
826 826
827struct pci_bios_ops lba_bios_ops = { 827static struct pci_bios_ops lba_bios_ops = {
828 .init = lba_bios_init, 828 .init = lba_bios_init,
829 .fixup_bus = lba_fixup_bus, 829 .fixup_bus = lba_fixup_bus,
830}; 830};
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index bc73b96346ff..3fac8f81d59d 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -561,7 +561,7 @@ typedef unsigned long space_t;
561 * IOMMU uses little endian for the pdir. 561 * IOMMU uses little endian for the pdir.
562 */ 562 */
563 563
564void SBA_INLINE 564static void SBA_INLINE
565sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, 565sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
566 unsigned long hint) 566 unsigned long hint)
567{ 567{
@@ -1874,7 +1874,7 @@ static struct parisc_device_id sba_tbl[] = {
1874 { 0, } 1874 { 0, }
1875}; 1875};
1876 1876
1877int sba_driver_callback(struct parisc_device *); 1877static int sba_driver_callback(struct parisc_device *);
1878 1878
1879static struct parisc_driver sba_driver = { 1879static struct parisc_driver sba_driver = {
1880 .name = MODULE_NAME, 1880 .name = MODULE_NAME,
@@ -1887,8 +1887,7 @@ static struct parisc_driver sba_driver = {
1887** If so, initialize the chip and tell other partners in crime they 1887** If so, initialize the chip and tell other partners in crime they
1888** have work to do. 1888** have work to do.
1889*/ 1889*/
1890int 1890static int sba_driver_callback(struct parisc_device *dev)
1891sba_driver_callback(struct parisc_device *dev)
1892{ 1891{
1893 struct sba_device *sba_dev; 1892 struct sba_device *sba_dev;
1894 u32 func_class; 1893 u32 func_class;
@@ -1979,8 +1978,6 @@ sba_driver_callback(struct parisc_device *dev)
1979 proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops); 1978 proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops);
1980#endif 1979#endif
1981 1980
1982 parisc_vmerge_boundary = IOVP_SIZE;
1983 parisc_vmerge_max_size = IOVP_SIZE * BITS_PER_LONG;
1984 parisc_has_iommu(); 1981 parisc_has_iommu();
1985 return 0; 1982 return 0;
1986} 1983}
diff --git a/drivers/parisc/wax.c b/drivers/parisc/wax.c
index 892a83bbe73d..da9d5ad1353c 100644
--- a/drivers/parisc/wax.c
+++ b/drivers/parisc/wax.c
@@ -68,8 +68,7 @@ wax_init_irq(struct gsc_asic *wax)
68// gsc_writel(0xFFFFFFFF, base+0x2000); /* RS232-B on Wax */ 68// gsc_writel(0xFFFFFFFF, base+0x2000); /* RS232-B on Wax */
69} 69}
70 70
71int __init 71static int __init wax_init_chip(struct parisc_device *dev)
72wax_init_chip(struct parisc_device *dev)
73{ 72{
74 struct gsc_asic *wax; 73 struct gsc_asic *wax;
75 struct parisc_device *parent; 74 struct parisc_device *parent;
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index c62ab8d240aa..1c1141801060 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -33,7 +33,6 @@
33#include <linux/pci-acpi.h> 33#include <linux/pci-acpi.h>
34#include <acpi/acpi.h> 34#include <acpi/acpi.h>
35#include <acpi/acpi_bus.h> 35#include <acpi/acpi_bus.h>
36#include <acpi/actypes.h>
37 36
38#define MY_NAME "acpi_pcihp" 37#define MY_NAME "acpi_pcihp"
39 38
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 27fd18f019f8..db85284ffb62 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -217,7 +217,6 @@ struct hpc_ops {
217#ifdef CONFIG_ACPI 217#ifdef CONFIG_ACPI
218#include <acpi/acpi.h> 218#include <acpi/acpi.h>
219#include <acpi/acpi_bus.h> 219#include <acpi/acpi_bus.h>
220#include <acpi/actypes.h>
221#include <linux/pci-acpi.h> 220#include <linux/pci-acpi.h>
222 221
223extern void __init pciehp_acpi_slot_detection_init(void); 222extern void __init pciehp_acpi_slot_detection_init(void);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 3582512e7226..deea8a187eb8 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -13,8 +13,6 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/pci-aspm.h> 14#include <linux/pci-aspm.h>
15#include <acpi/acpi.h> 15#include <acpi/acpi.h>
16#include <acpi/acnamesp.h>
17#include <acpi/acresrc.h>
18#include <acpi/acpi_bus.h> 16#include <acpi/acpi_bus.h>
19 17
20#include <linux/pci-acpi.h> 18#include <linux/pci-acpi.h>
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
new file mode 100644
index 000000000000..9652c3fe7f5e
--- /dev/null
+++ b/drivers/platform/Kconfig
@@ -0,0 +1,5 @@
1# drivers/platform/Kconfig
2
3if X86
4source "drivers/platform/x86/Kconfig"
5endif
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
new file mode 100644
index 000000000000..782953ae4c03
--- /dev/null
+++ b/drivers/platform/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for linux/drivers/platform
3#
4
5obj-$(CONFIG_X86) += x86/
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
new file mode 100644
index 000000000000..e65448e99b48
--- /dev/null
+++ b/drivers/platform/x86/Kconfig
@@ -0,0 +1,375 @@
1#
2# X86 Platform Specific Drivers
3#
4
5menuconfig X86_PLATFORM_DEVICES
6 bool "X86 Platform Specific Device Drivers"
7 default y
8 ---help---
9 Say Y here to get to see options for device drivers for various
10 x86 platforms, including vendor-specific laptop extension drivers.
11 This option alone does not add any kernel code.
12
13 If you say N, all options in this submenu will be skipped and disabled.
14
15if X86_PLATFORM_DEVICES
16
17config ACER_WMI
18 tristate "Acer WMI Laptop Extras (EXPERIMENTAL)"
19 depends on EXPERIMENTAL
20 depends on ACPI
21 depends on LEDS_CLASS
22 depends on NEW_LEDS
23 depends on BACKLIGHT_CLASS_DEVICE
24 depends on SERIO_I8042
25 depends on RFKILL
26 select ACPI_WMI
27 ---help---
28 This is a driver for newer Acer (and Wistron) laptops. It adds
29 wireless radio and bluetooth control, and on some laptops,
30 exposes the mail LED and LCD backlight.
31
32 For more information about this driver see
33 <file:Documentation/laptops/acer-wmi.txt>
34
35 If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M
36 here.
37
38config ASUS_LAPTOP
39 tristate "Asus Laptop Extras (EXPERIMENTAL)"
40 depends on ACPI
41 depends on EXPERIMENTAL && !ACPI_ASUS
42 depends on LEDS_CLASS
43 depends on NEW_LEDS
44 depends on BACKLIGHT_CLASS_DEVICE
45 ---help---
46 This is the new Linux driver for Asus laptops. It may also support some
47 MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate
48 standard ACPI events that go through /proc/acpi/events. It also adds
49 support for video output switching, LCD backlight control, Bluetooth and
50 Wlan control, and most importantly, allows you to blink those fancy LEDs.
51
52 For more information and a userspace daemon for handling the extra
53 buttons see <http://acpi4asus.sf.net/>.
54
55 If you have an ACPI-compatible ASUS laptop, say Y or M here.
56
57config FUJITSU_LAPTOP
58 tristate "Fujitsu Laptop Extras"
59 depends on ACPI
60 depends on INPUT
61 depends on BACKLIGHT_CLASS_DEVICE
62 ---help---
63 This is a driver for laptops built by Fujitsu:
64
65 * P2xxx/P5xxx/S6xxx/S7xxx series Lifebooks
66 * Possibly other Fujitsu laptop models
67 * Tested with S6410 and S7020
68
69 It adds support for LCD brightness control and some hotkeys.
70
71 If you have a Fujitsu laptop, say Y or M here.
72
73config FUJITSU_LAPTOP_DEBUG
74 bool "Verbose debug mode for Fujitsu Laptop Extras"
75 depends on FUJITSU_LAPTOP
76 default n
77 ---help---
78 Enables extra debug output from the fujitsu extras driver, at the
79 expense of a slight increase in driver size.
80
81 If you are not sure, say N here.
82
83config TC1100_WMI
84 tristate "HP Compaq TC1100 Tablet WMI Extras (EXPERIMENTAL)"
85 depends on !X86_64
86 depends on EXPERIMENTAL
87 depends on ACPI
88 select ACPI_WMI
89 ---help---
90 This is a driver for the WMI extensions (wireless and bluetooth power
91 control) of the HP Compaq TC1100 tablet.
92
93config HP_WMI
94 tristate "HP WMI extras"
95 depends on ACPI_WMI
96 depends on INPUT
97 depends on RFKILL
98 help
99 Say Y here if you want to support WMI-based hotkeys on HP laptops and
100 to read data from WMI such as docking or ambient light sensor state.
101
102 To compile this driver as a module, choose M here: the module will
103 be called hp-wmi.
104
105config MSI_LAPTOP
106 tristate "MSI Laptop Extras"
107 depends on ACPI
108 depends on BACKLIGHT_CLASS_DEVICE
109 ---help---
110 This is a driver for laptops built by MSI (MICRO-STAR
111 INTERNATIONAL):
112
113 MSI MegaBook S270 (MS-1013)
114 Cytron/TCM/Medion/Tchibo MD96100/SAM2000
115
116 It adds support for Bluetooth, WLAN and LCD brightness control.
117
118 More information about this driver is available at
119 <http://0pointer.de/lennart/tchibo.html>.
120
121 If you have an MSI S270 laptop, say Y or M here.
122
123config PANASONIC_LAPTOP
124 tristate "Panasonic Laptop Extras"
125 depends on INPUT && ACPI
126 depends on BACKLIGHT_CLASS_DEVICE
127 ---help---
128 This driver adds support for access to backlight control and hotkeys
129 on Panasonic Let's Note laptops.
130
131 If you have a Panasonic Let's note laptop (such as the R1(N variant),
132 R2, R3, R5, T2, W2 and Y2 series), say Y.
133
134config COMPAL_LAPTOP
135 tristate "Compal Laptop Extras"
136 depends on ACPI
137 depends on BACKLIGHT_CLASS_DEVICE
138 ---help---
139 This is a driver for laptops built by Compal:
140
141 Compal FL90/IFL90
142 Compal FL91/IFL91
143 Compal FL92/JFL92
144 Compal FT00/IFT00
145
146 It adds support for Bluetooth, WLAN and LCD brightness control.
147
148 If you have an Compal FL9x/IFL9x/FT00 laptop, say Y or M here.
149
150config SONY_LAPTOP
151 tristate "Sony Laptop Extras"
152 depends on ACPI
153 select BACKLIGHT_CLASS_DEVICE
154 depends on INPUT
155 ---help---
156 This mini-driver drives the SNC and SPIC devices present in the ACPI
157 BIOS of the Sony Vaio laptops.
158
159 It gives access to some extra laptop functionalities like Bluetooth,
160 screen brightness control, Fn keys and allows powering on/off some
161 devices.
162
163 Read <file:Documentation/laptops/sony-laptop.txt> for more information.
164
165config SONYPI_COMPAT
166 bool "Sonypi compatibility"
167 depends on SONY_LAPTOP
168 ---help---
169 Build the sonypi driver compatibility code into the sony-laptop driver.
170
171config THINKPAD_ACPI
172 tristate "ThinkPad ACPI Laptop Extras"
173 depends on ACPI
174 select BACKLIGHT_LCD_SUPPORT
175 select BACKLIGHT_CLASS_DEVICE
176 select HWMON
177 select NVRAM
178 select INPUT
179 select NEW_LEDS
180 select LEDS_CLASS
181 select NET
182 select RFKILL
183 ---help---
184 This is a driver for the IBM and Lenovo ThinkPad laptops. It adds
185 support for Fn-Fx key combinations, Bluetooth control, video
186 output switching, ThinkLight control, UltraBay eject and more.
187 For more information about this driver see
188 <file:Documentation/laptops/thinkpad-acpi.txt> and
189 <http://ibm-acpi.sf.net/> .
190
191 This driver was formerly known as ibm-acpi.
192
193 If you have an IBM or Lenovo ThinkPad laptop, say Y or M here.
194
195config THINKPAD_ACPI_DEBUG
196 bool "Verbose debug mode"
197 depends on THINKPAD_ACPI
198 default n
199 ---help---
200 Enables extra debugging information, at the expense of a slightly
201 increase in driver size.
202
203 If you are not sure, say N here.
204
205config THINKPAD_ACPI_DOCK
206 bool "Legacy Docking Station Support"
207 depends on THINKPAD_ACPI
208 depends on ACPI_DOCK=n
209 default n
210 ---help---
211 Allows the thinkpad_acpi driver to handle docking station events.
212 This support was made obsolete by the generic ACPI docking station
213 support (CONFIG_ACPI_DOCK). It will allow locking and removing the
214 laptop from the docking station, but will not properly connect PCI
215 devices.
216
217 If you are not sure, say N here.
218
219config THINKPAD_ACPI_BAY
220 bool "Legacy Removable Bay Support"
221 depends on THINKPAD_ACPI
222 default y
223 ---help---
224 Allows the thinkpad_acpi driver to handle removable bays. It will
225 electrically disable the device in the bay, and also generate
226 notifications when the bay lever is ejected or inserted.
227
228 If you are not sure, say Y here.
229
230config THINKPAD_ACPI_VIDEO
231 bool "Video output control support"
232 depends on THINKPAD_ACPI
233 default y
234 ---help---
235 Allows the thinkpad_acpi driver to provide an interface to control
236 the various video output ports.
237
238 This feature often won't work well, depending on ThinkPad model,
239 display state, video output devices in use, whether there is a X
240 server running, phase of the moon, and the current mood of
241 Schroedinger's cat. If you can use X.org's RandR to control
242 your ThinkPad's video output ports instead of this feature,
243 don't think twice: do it and say N here to save some memory.
244
245 If you are not sure, say Y here.
246
247config THINKPAD_ACPI_HOTKEY_POLL
248 bool "Support NVRAM polling for hot keys"
249 depends on THINKPAD_ACPI
250 default y
251 ---help---
252 Some thinkpad models benefit from NVRAM polling to detect a few of
253 the hot key press events. If you know your ThinkPad model does not
254 need to do NVRAM polling to support any of the hot keys you use,
255 unselecting this option will save about 1kB of memory.
256
257 ThinkPads T40 and newer, R52 and newer, and X31 and newer are
258 unlikely to need NVRAM polling in their latest BIOS versions.
259
260 NVRAM polling can detect at most the following keys: ThinkPad/Access
261 IBM, Zoom, Switch Display (fn+F7), ThinkLight, Volume up/down/mute,
262 Brightness up/down, Display Expand (fn+F8), Hibernate (fn+F12).
263
264 If you are not sure, say Y here. The driver enables polling only if
265 it is strictly necessary to do so.
266
267config INTEL_MENLOW
268 tristate "Thermal Management driver for Intel menlow platform"
269 depends on ACPI_THERMAL
270 select THERMAL
271 ---help---
272 ACPI thermal management enhancement driver on
273 Intel Menlow platform.
274
275 If unsure, say N.
276
277config EEEPC_LAPTOP
278 tristate "Eee PC Hotkey Driver (EXPERIMENTAL)"
279 depends on ACPI
280 depends on EXPERIMENTAL
281 select BACKLIGHT_CLASS_DEVICE
282 select HWMON
283 select RFKILL
284 ---help---
285 This driver supports the Fn-Fx keys on Eee PC laptops.
286 It also adds the ability to switch camera/wlan on/off.
287
288 If you have an Eee PC laptop, say Y or M here.
289
290
291config ACPI_WMI
292 tristate "WMI (EXPERIMENTAL)"
293 depends on ACPI
294 depends on EXPERIMENTAL
295 help
296 This driver adds support for the ACPI-WMI (Windows Management
297 Instrumentation) mapper device (PNP0C14) found on some systems.
298
299 ACPI-WMI is a proprietary extension to ACPI to expose parts of the
300 ACPI firmware to userspace - this is done through various vendor
301 defined methods and data blocks in a PNP0C14 device, which are then
302 made available for userspace to call.
303
304 The implementation of this in Linux currently only exposes this to
305 other kernel space drivers.
306
307 This driver is a required dependency to build the firmware specific
308 drivers needed on many machines, including Acer and HP laptops.
309
310 It is safe to enable this driver even if your DSDT doesn't define
311 any ACPI-WMI devices.
312
313config ACPI_ASUS
314 tristate "ASUS/Medion Laptop Extras"
315 depends on ACPI
316 select BACKLIGHT_CLASS_DEVICE
317 ---help---
318 This driver provides support for extra features of ACPI-compatible
319 ASUS laptops. As some of Medion laptops are made by ASUS, it may also
320 support some Medion laptops (such as 9675 for example). It makes all
321 the extra buttons generate standard ACPI events that go through
322 /proc/acpi/events, and (on some models) adds support for changing the
323 display brightness and output, switching the LCD backlight on and off,
324 and most importantly, allows you to blink those fancy LEDs intended
325 for reporting mail and wireless status.
326
327 Note: display switching code is currently considered EXPERIMENTAL,
328 toying with these values may even lock your machine.
329
330 All settings are changed via /proc/acpi/asus directory entries. Owner
331 and group for these entries can be set with asus_uid and asus_gid
332 parameters.
333
334 More information and a userspace daemon for handling the extra buttons
335 at <http://sourceforge.net/projects/acpi4asus/>.
336
337 If you have an ACPI-compatible ASUS laptop, say Y or M here. This
338 driver is still under development, so if your laptop is unsupported or
339 something works not quite as expected, please use the mailing list
340 available on the above page (acpi4asus-user@lists.sourceforge.net).
341
342 NOTE: This driver is deprecated and will probably be removed soon,
343 use asus-laptop instead.
344
345config ACPI_TOSHIBA
346 tristate "Toshiba Laptop Extras"
347 depends on ACPI
348 depends on INPUT
349 select INPUT_POLLDEV
350 select NET
351 select RFKILL
352 select BACKLIGHT_CLASS_DEVICE
353 ---help---
354 This driver adds support for access to certain system settings
355 on "legacy free" Toshiba laptops. These laptops can be recognized by
356 their lack of a BIOS setup menu and APM support.
357
358 On these machines, all system configuration is handled through the
359 ACPI. This driver is required for access to controls not covered
360 by the general ACPI drivers, such as LCD brightness, video output,
361 etc.
362
363 This driver differs from the non-ACPI Toshiba laptop driver (located
364 under "Processor type and features") in several aspects.
365 Configuration is accessed by reading and writing text files in the
366 /proc tree instead of by program interface to /dev. Furthermore, no
367 power management functions are exposed, as those are handled by the
368 general ACPI drivers.
369
370 More information about this driver is available at
371 <http://memebeam.org/toys/ToshibaAcpiDriver>.
372
373 If you have a legacy free Toshiba laptop (such as the Libretto L1
374 series), say Y.
375endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
new file mode 100644
index 000000000000..1e9de2ae0de5
--- /dev/null
+++ b/drivers/platform/x86/Makefile
@@ -0,0 +1,19 @@
1#
2# Makefile for linux/drivers/platform/x86
3# x86 Platform-Specific Drivers
4#
5obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
6obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o
7obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
8obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
9obj-$(CONFIG_ACER_WMI) += acer-wmi.o
10obj-$(CONFIG_HP_WMI) += hp-wmi.o
11obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
12obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
13obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
14obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
15obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o
16obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
17obj-$(CONFIG_ACPI_WMI) += wmi.o
18obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o
19obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
diff --git a/drivers/misc/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 94c9f911824e..94c9f911824e 100644
--- a/drivers/misc/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
diff --git a/drivers/misc/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 8fb8b3591048..8fb8b3591048 100644
--- a/drivers/misc/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
diff --git a/drivers/acpi/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index 1e74988c7b2d..1e74988c7b2d 100644
--- a/drivers/acpi/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
diff --git a/drivers/misc/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 11003bba10d3..11003bba10d3 100644
--- a/drivers/misc/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
diff --git a/drivers/misc/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 02fe2b8b8939..02fe2b8b8939 100644
--- a/drivers/misc/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
diff --git a/drivers/misc/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index a7dd3e9fb79d..65dc41540c62 100644
--- a/drivers/misc/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -3,6 +3,7 @@
3/* 3/*
4 Copyright (C) 2007,2008 Jonathan Woithe <jwoithe@physics.adelaide.edu.au> 4 Copyright (C) 2007,2008 Jonathan Woithe <jwoithe@physics.adelaide.edu.au>
5 Copyright (C) 2008 Peter Gruber <nokos@gmx.net> 5 Copyright (C) 2008 Peter Gruber <nokos@gmx.net>
6 Copyright (C) 2008 Tony Vroon <tony@linx.net>
6 Based on earlier work: 7 Based on earlier work:
7 Copyright (C) 2003 Shane Spencer <shane@bogomip.com> 8 Copyright (C) 2003 Shane Spencer <shane@bogomip.com>
8 Adrian Yee <brewt-fujitsu@brewt.org> 9 Adrian Yee <brewt-fujitsu@brewt.org>
@@ -65,8 +66,11 @@
65#include <linux/kfifo.h> 66#include <linux/kfifo.h>
66#include <linux/video_output.h> 67#include <linux/video_output.h>
67#include <linux/platform_device.h> 68#include <linux/platform_device.h>
69#ifdef CONFIG_LEDS_CLASS
70#include <linux/leds.h>
71#endif
68 72
69#define FUJITSU_DRIVER_VERSION "0.4.3" 73#define FUJITSU_DRIVER_VERSION "0.5.0"
70 74
71#define FUJITSU_LCD_N_LEVELS 8 75#define FUJITSU_LCD_N_LEVELS 8
72 76
@@ -83,6 +87,24 @@
83#define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS 0x86 87#define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS 0x86
84#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87 88#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87
85 89
90/* FUNC interface - command values */
91#define FUNC_RFKILL 0x1000
92#define FUNC_LEDS 0x1001
93#define FUNC_BUTTONS 0x1002
94#define FUNC_BACKLIGHT 0x1004
95
96/* FUNC interface - responses */
97#define UNSUPPORTED_CMD 0x80000000
98
99#ifdef CONFIG_LEDS_CLASS
100/* FUNC interface - LED control */
101#define FUNC_LED_OFF 0x1
102#define FUNC_LED_ON 0x30001
103#define KEYBOARD_LAMPS 0x100
104#define LOGOLAMP_POWERON 0x2000
105#define LOGOLAMP_ALWAYS 0x4000
106#endif
107
86/* Hotkey details */ 108/* Hotkey details */
87#define KEY1_CODE 0x410 /* codes for the keys in the GIRB register */ 109#define KEY1_CODE 0x410 /* codes for the keys in the GIRB register */
88#define KEY2_CODE 0x411 110#define KEY2_CODE 0x411
@@ -133,7 +155,6 @@ struct fujitsu_t {
133 155
134static struct fujitsu_t *fujitsu; 156static struct fujitsu_t *fujitsu;
135static int use_alt_lcd_levels = -1; 157static int use_alt_lcd_levels = -1;
136static int disable_brightness_keys = -1;
137static int disable_brightness_adjust = -1; 158static int disable_brightness_adjust = -1;
138 159
139/* Device used to access other hotkeys on the laptop */ 160/* Device used to access other hotkeys on the laptop */
@@ -145,8 +166,9 @@ struct fujitsu_hotkey_t {
145 struct platform_device *pf_device; 166 struct platform_device *pf_device;
146 struct kfifo *fifo; 167 struct kfifo *fifo;
147 spinlock_t fifo_lock; 168 spinlock_t fifo_lock;
148 169 int rfkill_state;
149 unsigned int irb; /* info about the pressed buttons */ 170 int logolamp_registered;
171 int kblamps_registered;
150}; 172};
151 173
152static struct fujitsu_hotkey_t *fujitsu_hotkey; 174static struct fujitsu_hotkey_t *fujitsu_hotkey;
@@ -154,12 +176,139 @@ static struct fujitsu_hotkey_t *fujitsu_hotkey;
154static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event, 176static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event,
155 void *data); 177 void *data);
156 178
179#ifdef CONFIG_LEDS_CLASS
180static enum led_brightness logolamp_get(struct led_classdev *cdev);
181static void logolamp_set(struct led_classdev *cdev,
182 enum led_brightness brightness);
183
184struct led_classdev logolamp_led = {
185 .name = "fujitsu::logolamp",
186 .brightness_get = logolamp_get,
187 .brightness_set = logolamp_set
188};
189
190static enum led_brightness kblamps_get(struct led_classdev *cdev);
191static void kblamps_set(struct led_classdev *cdev,
192 enum led_brightness brightness);
193
194struct led_classdev kblamps_led = {
195 .name = "fujitsu::kblamps",
196 .brightness_get = kblamps_get,
197 .brightness_set = kblamps_set
198};
199#endif
200
157#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG 201#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
158static u32 dbg_level = 0x03; 202static u32 dbg_level = 0x03;
159#endif 203#endif
160 204
161static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data); 205static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data);
162 206
207/* Fujitsu ACPI interface function */
208
209static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
210{
211 acpi_status status = AE_OK;
212 union acpi_object params[4] = {
213 { .type = ACPI_TYPE_INTEGER },
214 { .type = ACPI_TYPE_INTEGER },
215 { .type = ACPI_TYPE_INTEGER },
216 { .type = ACPI_TYPE_INTEGER }
217 };
218 struct acpi_object_list arg_list = { 4, &params[0] };
219 struct acpi_buffer output;
220 union acpi_object out_obj;
221 acpi_handle handle = NULL;
222
223 status = acpi_get_handle(fujitsu_hotkey->acpi_handle, "FUNC", &handle);
224 if (ACPI_FAILURE(status)) {
225 vdbg_printk(FUJLAPTOP_DBG_ERROR,
226 "FUNC interface is not present\n");
227 return -ENODEV;
228 }
229
230 params[0].integer.value = cmd;
231 params[1].integer.value = arg0;
232 params[2].integer.value = arg1;
233 params[3].integer.value = arg2;
234
235 output.length = sizeof(out_obj);
236 output.pointer = &out_obj;
237
238 status = acpi_evaluate_object(handle, NULL, &arg_list, &output);
239 if (ACPI_FAILURE(status)) {
240 vdbg_printk(FUJLAPTOP_DBG_WARN,
241 "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) call failed\n",
242 cmd, arg0, arg1, arg2);
243 return -ENODEV;
244 }
245
246 if (out_obj.type != ACPI_TYPE_INTEGER) {
247 vdbg_printk(FUJLAPTOP_DBG_WARN,
248 "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) did not "
249 "return an integer\n",
250 cmd, arg0, arg1, arg2);
251 return -ENODEV;
252 }
253
254 vdbg_printk(FUJLAPTOP_DBG_TRACE,
255 "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) returned 0x%x\n",
256 cmd, arg0, arg1, arg2, (int)out_obj.integer.value);
257 return out_obj.integer.value;
258}
259
260#ifdef CONFIG_LEDS_CLASS
261/* LED class callbacks */
262
263static void logolamp_set(struct led_classdev *cdev,
264 enum led_brightness brightness)
265{
266 if (brightness >= LED_FULL) {
267 call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON);
268 call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_ON);
269 } else if (brightness >= LED_HALF) {
270 call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON);
271 call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_OFF);
272 } else {
273 call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_OFF);
274 }
275}
276
277static void kblamps_set(struct led_classdev *cdev,
278 enum led_brightness brightness)
279{
280 if (brightness >= LED_FULL)
281 call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_ON);
282 else
283 call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF);
284}
285
286static enum led_brightness logolamp_get(struct led_classdev *cdev)
287{
288 enum led_brightness brightness = LED_OFF;
289 int poweron, always;
290
291 poweron = call_fext_func(FUNC_LEDS, 0x2, LOGOLAMP_POWERON, 0x0);
292 if (poweron == FUNC_LED_ON) {
293 brightness = LED_HALF;
294 always = call_fext_func(FUNC_LEDS, 0x2, LOGOLAMP_ALWAYS, 0x0);
295 if (always == FUNC_LED_ON)
296 brightness = LED_FULL;
297 }
298 return brightness;
299}
300
301static enum led_brightness kblamps_get(struct led_classdev *cdev)
302{
303 enum led_brightness brightness = LED_OFF;
304
305 if (call_fext_func(FUNC_LEDS, 0x2, KEYBOARD_LAMPS, 0x0) == FUNC_LED_ON)
306 brightness = LED_FULL;
307
308 return brightness;
309}
310#endif
311
163/* Hardware access for LCD brightness control */ 312/* Hardware access for LCD brightness control */
164 313
165static int set_lcd_level(int level) 314static int set_lcd_level(int level)
@@ -263,44 +412,34 @@ static int get_max_brightness(void)
263 return fujitsu->max_brightness; 412 return fujitsu->max_brightness;
264} 413}
265 414
266static int get_lcd_level_alt(void)
267{
268 unsigned long long state = 0;
269 acpi_status status = AE_OK;
270
271 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLS\n");
272
273 status =
274 acpi_evaluate_integer(fujitsu->acpi_handle, "GBLS", NULL, &state);
275 if (status < 0)
276 return status;
277
278 fujitsu->brightness_level = state & 0x0fffffff;
279
280 if (state & 0x80000000)
281 fujitsu->brightness_changed = 1;
282 else
283 fujitsu->brightness_changed = 0;
284
285 return fujitsu->brightness_level;
286}
287
288/* Backlight device stuff */ 415/* Backlight device stuff */
289 416
290static int bl_get_brightness(struct backlight_device *b) 417static int bl_get_brightness(struct backlight_device *b)
291{ 418{
292 if (use_alt_lcd_levels) 419 return get_lcd_level();
293 return get_lcd_level_alt();
294 else
295 return get_lcd_level();
296} 420}
297 421
298static int bl_update_status(struct backlight_device *b) 422static int bl_update_status(struct backlight_device *b)
299{ 423{
424 int ret;
425 if (b->props.power == 4)
426 ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
427 else
428 ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
429 if (ret != 0)
430 vdbg_printk(FUJLAPTOP_DBG_ERROR,
431 "Unable to adjust backlight power, error code %i\n",
432 ret);
433
300 if (use_alt_lcd_levels) 434 if (use_alt_lcd_levels)
301 return set_lcd_level_alt(b->props.brightness); 435 ret = set_lcd_level_alt(b->props.brightness);
302 else 436 else
303 return set_lcd_level(b->props.brightness); 437 ret = set_lcd_level(b->props.brightness);
438 if (ret != 0)
439 vdbg_printk(FUJLAPTOP_DBG_ERROR,
440 "Unable to adjust LCD brightness, error code %i\n",
441 ret);
442 return ret;
304} 443}
305 444
306static struct backlight_ops fujitsubl_ops = { 445static struct backlight_ops fujitsubl_ops = {
@@ -344,10 +483,7 @@ static ssize_t show_lcd_level(struct device *dev,
344 483
345 int ret; 484 int ret;
346 485
347 if (use_alt_lcd_levels) 486 ret = get_lcd_level();
348 ret = get_lcd_level_alt();
349 else
350 ret = get_lcd_level();
351 if (ret < 0) 487 if (ret < 0)
352 return ret; 488 return ret;
353 489
@@ -372,52 +508,71 @@ static ssize_t store_lcd_level(struct device *dev,
372 if (ret < 0) 508 if (ret < 0)
373 return ret; 509 return ret;
374 510
375 if (use_alt_lcd_levels) 511 ret = get_lcd_level();
376 ret = get_lcd_level_alt();
377 else
378 ret = get_lcd_level();
379 if (ret < 0) 512 if (ret < 0)
380 return ret; 513 return ret;
381 514
382 return count; 515 return count;
383} 516}
384 517
385/* Hardware access for hotkey device */ 518static ssize_t
386 519ignore_store(struct device *dev,
387static int get_irb(void) 520 struct device_attribute *attr, const char *buf, size_t count)
388{ 521{
389 unsigned long long state = 0; 522 return count;
390 acpi_status status = AE_OK; 523}
391
392 vdbg_printk(FUJLAPTOP_DBG_TRACE, "Get irb\n");
393
394 status =
395 acpi_evaluate_integer(fujitsu_hotkey->acpi_handle, "GIRB", NULL,
396 &state);
397 if (status < 0)
398 return status;
399 524
400 fujitsu_hotkey->irb = state; 525static ssize_t
526show_lid_state(struct device *dev,
527 struct device_attribute *attr, char *buf)
528{
529 if (fujitsu_hotkey->rfkill_state == UNSUPPORTED_CMD)
530 return sprintf(buf, "unknown\n");
531 if (fujitsu_hotkey->rfkill_state & 0x100)
532 return sprintf(buf, "open\n");
533 else
534 return sprintf(buf, "closed\n");
535}
401 536
402 return fujitsu_hotkey->irb; 537static ssize_t
538show_dock_state(struct device *dev,
539 struct device_attribute *attr, char *buf)
540{
541 if (fujitsu_hotkey->rfkill_state == UNSUPPORTED_CMD)
542 return sprintf(buf, "unknown\n");
543 if (fujitsu_hotkey->rfkill_state & 0x200)
544 return sprintf(buf, "docked\n");
545 else
546 return sprintf(buf, "undocked\n");
403} 547}
404 548
405static ssize_t 549static ssize_t
406ignore_store(struct device *dev, 550show_radios_state(struct device *dev,
407 struct device_attribute *attr, const char *buf, size_t count) 551 struct device_attribute *attr, char *buf)
408{ 552{
409 return count; 553 if (fujitsu_hotkey->rfkill_state == UNSUPPORTED_CMD)
554 return sprintf(buf, "unknown\n");
555 if (fujitsu_hotkey->rfkill_state & 0x20)
556 return sprintf(buf, "on\n");
557 else
558 return sprintf(buf, "killed\n");
410} 559}
411 560
412static DEVICE_ATTR(max_brightness, 0444, show_max_brightness, ignore_store); 561static DEVICE_ATTR(max_brightness, 0444, show_max_brightness, ignore_store);
413static DEVICE_ATTR(brightness_changed, 0444, show_brightness_changed, 562static DEVICE_ATTR(brightness_changed, 0444, show_brightness_changed,
414 ignore_store); 563 ignore_store);
415static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level); 564static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level);
565static DEVICE_ATTR(lid, 0444, show_lid_state, ignore_store);
566static DEVICE_ATTR(dock, 0444, show_dock_state, ignore_store);
567static DEVICE_ATTR(radios, 0444, show_radios_state, ignore_store);
416 568
417static struct attribute *fujitsupf_attributes[] = { 569static struct attribute *fujitsupf_attributes[] = {
418 &dev_attr_brightness_changed.attr, 570 &dev_attr_brightness_changed.attr,
419 &dev_attr_max_brightness.attr, 571 &dev_attr_max_brightness.attr,
420 &dev_attr_lcd_level.attr, 572 &dev_attr_lcd_level.attr,
573 &dev_attr_lid.attr,
574 &dev_attr_dock.attr,
575 &dev_attr_radios.attr,
421 NULL 576 NULL
422}; 577};
423 578
@@ -435,24 +590,16 @@ static struct platform_driver fujitsupf_driver = {
435static void dmi_check_cb_common(const struct dmi_system_id *id) 590static void dmi_check_cb_common(const struct dmi_system_id *id)
436{ 591{
437 acpi_handle handle; 592 acpi_handle handle;
438 int have_blnf;
439 printk(KERN_INFO "fujitsu-laptop: Identified laptop model '%s'.\n", 593 printk(KERN_INFO "fujitsu-laptop: Identified laptop model '%s'.\n",
440 id->ident); 594 id->ident);
441 have_blnf = ACPI_SUCCESS
442 (acpi_get_handle(NULL, "\\_SB.PCI0.GFX0.LCD.BLNF", &handle));
443 if (use_alt_lcd_levels == -1) { 595 if (use_alt_lcd_levels == -1) {
444 vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detecting usealt\n"); 596 if (ACPI_SUCCESS(acpi_get_handle(NULL,
445 use_alt_lcd_levels = 1; 597 "\\_SB.PCI0.LPCB.FJEX.SBL2", &handle)))
446 } 598 use_alt_lcd_levels = 1;
447 if (disable_brightness_keys == -1) { 599 else
448 vdbg_printk(FUJLAPTOP_DBG_TRACE, 600 use_alt_lcd_levels = 0;
449 "auto-detecting disable_keys\n"); 601 vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as "
450 disable_brightness_keys = have_blnf ? 1 : 0; 602 "%i\n", use_alt_lcd_levels);
451 }
452 if (disable_brightness_adjust == -1) {
453 vdbg_printk(FUJLAPTOP_DBG_TRACE,
454 "auto-detecting disable_adjust\n");
455 disable_brightness_adjust = have_blnf ? 0 : 1;
456 } 603 }
457} 604}
458 605
@@ -581,19 +728,14 @@ static int acpi_fujitsu_add(struct acpi_device *device)
581 728
582 /* do config (detect defaults) */ 729 /* do config (detect defaults) */
583 use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0; 730 use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0;
584 disable_brightness_keys = disable_brightness_keys == 1 ? 1 : 0;
585 disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0; 731 disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0;
586 vdbg_printk(FUJLAPTOP_DBG_INFO, 732 vdbg_printk(FUJLAPTOP_DBG_INFO,
587 "config: [alt interface: %d], [key disable: %d], [adjust disable: %d]\n", 733 "config: [alt interface: %d], [adjust disable: %d]\n",
588 use_alt_lcd_levels, disable_brightness_keys, 734 use_alt_lcd_levels, disable_brightness_adjust);
589 disable_brightness_adjust);
590 735
591 if (get_max_brightness() <= 0) 736 if (get_max_brightness() <= 0)
592 fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS; 737 fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS;
593 if (use_alt_lcd_levels) 738 get_lcd_level();
594 get_lcd_level_alt();
595 else
596 get_lcd_level();
597 739
598 return result; 740 return result;
599 741
@@ -644,43 +786,23 @@ static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data)
644 case ACPI_FUJITSU_NOTIFY_CODE1: 786 case ACPI_FUJITSU_NOTIFY_CODE1:
645 keycode = 0; 787 keycode = 0;
646 oldb = fujitsu->brightness_level; 788 oldb = fujitsu->brightness_level;
647 get_lcd_level(); /* the alt version always yields changed */ 789 get_lcd_level();
648 newb = fujitsu->brightness_level; 790 newb = fujitsu->brightness_level;
649 791
650 vdbg_printk(FUJLAPTOP_DBG_TRACE, 792 vdbg_printk(FUJLAPTOP_DBG_TRACE,
651 "brightness button event [%i -> %i (%i)]\n", 793 "brightness button event [%i -> %i (%i)]\n",
652 oldb, newb, fujitsu->brightness_changed); 794 oldb, newb, fujitsu->brightness_changed);
653 795
654 if (oldb == newb && fujitsu->brightness_changed) { 796 if (oldb < newb) {
655 keycode = 0;
656 if (disable_brightness_keys != 1) {
657 if (oldb == 0) {
658 acpi_bus_generate_proc_event
659 (fujitsu->dev,
660 ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS,
661 0);
662 keycode = KEY_BRIGHTNESSDOWN;
663 } else if (oldb ==
664 (fujitsu->max_brightness) - 1) {
665 acpi_bus_generate_proc_event
666 (fujitsu->dev,
667 ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS,
668 0);
669 keycode = KEY_BRIGHTNESSUP;
670 }
671 }
672 } else if (oldb < newb) {
673 if (disable_brightness_adjust != 1) { 797 if (disable_brightness_adjust != 1) {
674 if (use_alt_lcd_levels) 798 if (use_alt_lcd_levels)
675 set_lcd_level_alt(newb); 799 set_lcd_level_alt(newb);
676 else 800 else
677 set_lcd_level(newb); 801 set_lcd_level(newb);
678 } 802 }
679 if (disable_brightness_keys != 1) { 803 acpi_bus_generate_proc_event(fujitsu->dev,
680 acpi_bus_generate_proc_event(fujitsu->dev, 804 ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS, 0);
681 ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS, 0); 805 keycode = KEY_BRIGHTNESSUP;
682 keycode = KEY_BRIGHTNESSUP;
683 }
684 } else if (oldb > newb) { 806 } else if (oldb > newb) {
685 if (disable_brightness_adjust != 1) { 807 if (disable_brightness_adjust != 1) {
686 if (use_alt_lcd_levels) 808 if (use_alt_lcd_levels)
@@ -688,13 +810,9 @@ static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data)
688 else 810 else
689 set_lcd_level(newb); 811 set_lcd_level(newb);
690 } 812 }
691 if (disable_brightness_keys != 1) { 813 acpi_bus_generate_proc_event(fujitsu->dev,
692 acpi_bus_generate_proc_event(fujitsu->dev, 814 ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS, 0);
693 ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS, 0); 815 keycode = KEY_BRIGHTNESSDOWN;
694 keycode = KEY_BRIGHTNESSDOWN;
695 }
696 } else {
697 keycode = KEY_UNKNOWN;
698 } 816 }
699 break; 817 break;
700 default: 818 default:
@@ -771,7 +889,8 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
771 input->id.bustype = BUS_HOST; 889 input->id.bustype = BUS_HOST;
772 input->id.product = 0x06; 890 input->id.product = 0x06;
773 input->dev.parent = &device->dev; 891 input->dev.parent = &device->dev;
774 input->evbit[0] = BIT(EV_KEY); 892
893 set_bit(EV_KEY, input->evbit);
775 set_bit(fujitsu->keycode1, input->keybit); 894 set_bit(fujitsu->keycode1, input->keybit);
776 set_bit(fujitsu->keycode2, input->keybit); 895 set_bit(fujitsu->keycode2, input->keybit);
777 set_bit(fujitsu->keycode3, input->keybit); 896 set_bit(fujitsu->keycode3, input->keybit);
@@ -803,10 +922,44 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
803 printk(KERN_ERR "_INI Method failed\n"); 922 printk(KERN_ERR "_INI Method failed\n");
804 } 923 }
805 924
806 i = 0; /* Discard hotkey ringbuffer */ 925 i = 0;
807 while (get_irb() != 0 && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) ; 926 while (call_fext_func(FUNC_BUTTONS, 0x1, 0x0, 0x0) != 0
927 && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE)
928 ; /* No action, result is discarded */
808 vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i); 929 vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i);
809 930
931 fujitsu_hotkey->rfkill_state =
932 call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0);
933
934 /* Suspect this is a keymap of the application panel, print it */
935 printk(KERN_INFO "fujitsu-laptop: BTNI: [0x%x]\n",
936 call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
937
938 #ifdef CONFIG_LEDS_CLASS
939 if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
940 result = led_classdev_register(&fujitsu->pf_device->dev,
941 &logolamp_led);
942 if (result == 0) {
943 fujitsu_hotkey->logolamp_registered = 1;
944 } else {
945 printk(KERN_ERR "fujitsu-laptop: Could not register "
946 "LED handler for logo lamp, error %i\n", result);
947 }
948 }
949
950 if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) &&
951 (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) {
952 result = led_classdev_register(&fujitsu->pf_device->dev,
953 &kblamps_led);
954 if (result == 0) {
955 fujitsu_hotkey->kblamps_registered = 1;
956 } else {
957 printk(KERN_ERR "fujitsu-laptop: Could not register "
958 "LED handler for keyboard lamps, error %i\n", result);
959 }
960 }
961 #endif
962
810 return result; 963 return result;
811 964
812end: 965end:
@@ -852,16 +1005,15 @@ static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event,
852 1005
853 input = fujitsu_hotkey->input; 1006 input = fujitsu_hotkey->input;
854 1007
855 vdbg_printk(FUJLAPTOP_DBG_TRACE, "Hotkey event\n"); 1008 fujitsu_hotkey->rfkill_state =
1009 call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0);
856 1010
857 switch (event) { 1011 switch (event) {
858 case ACPI_FUJITSU_NOTIFY_CODE1: 1012 case ACPI_FUJITSU_NOTIFY_CODE1:
859 i = 0; 1013 i = 0;
860 while ((irb = get_irb()) != 0 1014 while ((irb =
861 && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) { 1015 call_fext_func(FUNC_BUTTONS, 0x1, 0x0, 0x0)) != 0
862 vdbg_printk(FUJLAPTOP_DBG_TRACE, "GIRB result [%x]\n", 1016 && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) {
863 irb);
864
865 switch (irb & 0x4ff) { 1017 switch (irb & 0x4ff) {
866 case KEY1_CODE: 1018 case KEY1_CODE:
867 keycode = fujitsu->keycode1; 1019 keycode = fujitsu->keycode1;
@@ -1035,6 +1187,15 @@ static int __init fujitsu_init(void)
1035 goto fail_hotkey1; 1187 goto fail_hotkey1;
1036 } 1188 }
1037 1189
1190 /* Sync backlight power status (needs FUJ02E3 device, hence deferred) */
1191
1192 if (!acpi_video_backlight_support()) {
1193 if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3)
1194 fujitsu->bl_device->props.power = 4;
1195 else
1196 fujitsu->bl_device->props.power = 0;
1197 }
1198
1038 printk(KERN_INFO "fujitsu-laptop: driver " FUJITSU_DRIVER_VERSION 1199 printk(KERN_INFO "fujitsu-laptop: driver " FUJITSU_DRIVER_VERSION
1039 " successfully loaded.\n"); 1200 " successfully loaded.\n");
1040 1201
@@ -1074,6 +1235,14 @@ fail_acpi:
1074 1235
1075static void __exit fujitsu_cleanup(void) 1236static void __exit fujitsu_cleanup(void)
1076{ 1237{
1238 #ifdef CONFIG_LEDS_CLASS
1239 if (fujitsu_hotkey->logolamp_registered != 0)
1240 led_classdev_unregister(&logolamp_led);
1241
1242 if (fujitsu_hotkey->kblamps_registered != 0)
1243 led_classdev_unregister(&kblamps_led);
1244 #endif
1245
1077 sysfs_remove_group(&fujitsu->pf_device->dev.kobj, 1246 sysfs_remove_group(&fujitsu->pf_device->dev.kobj,
1078 &fujitsupf_attribute_group); 1247 &fujitsupf_attribute_group);
1079 platform_device_unregister(fujitsu->pf_device); 1248 platform_device_unregister(fujitsu->pf_device);
@@ -1098,9 +1267,6 @@ module_exit(fujitsu_cleanup);
1098module_param(use_alt_lcd_levels, uint, 0644); 1267module_param(use_alt_lcd_levels, uint, 0644);
1099MODULE_PARM_DESC(use_alt_lcd_levels, 1268MODULE_PARM_DESC(use_alt_lcd_levels,
1100 "Use alternative interface for lcd_levels (needed for Lifebook s6410)."); 1269 "Use alternative interface for lcd_levels (needed for Lifebook s6410).");
1101module_param(disable_brightness_keys, uint, 0644);
1102MODULE_PARM_DESC(disable_brightness_keys,
1103 "Disable brightness keys (eg. if they are already handled by the generic ACPI_VIDEO device).");
1104module_param(disable_brightness_adjust, uint, 0644); 1270module_param(disable_brightness_adjust, uint, 0644);
1105MODULE_PARM_DESC(disable_brightness_adjust, "Disable brightness adjustment ."); 1271MODULE_PARM_DESC(disable_brightness_adjust, "Disable brightness adjustment .");
1106#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG 1272#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
@@ -1108,12 +1274,13 @@ module_param_named(debug, dbg_level, uint, 0644);
1108MODULE_PARM_DESC(debug, "Sets debug level bit-mask"); 1274MODULE_PARM_DESC(debug, "Sets debug level bit-mask");
1109#endif 1275#endif
1110 1276
1111MODULE_AUTHOR("Jonathan Woithe, Peter Gruber"); 1277MODULE_AUTHOR("Jonathan Woithe, Peter Gruber, Tony Vroon");
1112MODULE_DESCRIPTION("Fujitsu laptop extras support"); 1278MODULE_DESCRIPTION("Fujitsu laptop extras support");
1113MODULE_VERSION(FUJITSU_DRIVER_VERSION); 1279MODULE_VERSION(FUJITSU_DRIVER_VERSION);
1114MODULE_LICENSE("GPL"); 1280MODULE_LICENSE("GPL");
1115 1281
1116MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*"); 1282MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*");
1283MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*");
1117MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*"); 1284MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
1118 1285
1119static struct pnp_device_id pnp_ids[] = { 1286static struct pnp_device_id pnp_ids[] = {
diff --git a/drivers/misc/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 4b7c24c519c3..4b7c24c519c3 100644
--- a/drivers/misc/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
diff --git a/drivers/misc/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index 27b7662955bb..27b7662955bb 100644
--- a/drivers/misc/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
diff --git a/drivers/misc/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 759763d18e4c..759763d18e4c 100644
--- a/drivers/misc/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
diff --git a/drivers/misc/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 4a1bc64485d5..f30db367c82e 100644
--- a/drivers/misc/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -241,8 +241,6 @@ static int acpi_pcc_write_sset(struct pcc_acpi *pcc, int func, int val)
241 }; 241 };
242 acpi_status status = AE_OK; 242 acpi_status status = AE_OK;
243 243
244 ACPI_FUNCTION_TRACE("acpi_pcc_write_sset");
245
246 status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SSET, 244 status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SSET,
247 &params, NULL); 245 &params, NULL);
248 246
@@ -254,8 +252,6 @@ static inline int acpi_pcc_get_sqty(struct acpi_device *device)
254 unsigned long long s; 252 unsigned long long s;
255 acpi_status status; 253 acpi_status status;
256 254
257 ACPI_FUNCTION_TRACE("acpi_pcc_get_sqty");
258
259 status = acpi_evaluate_integer(device->handle, METHOD_HKEY_SQTY, 255 status = acpi_evaluate_integer(device->handle, METHOD_HKEY_SQTY,
260 NULL, &s); 256 NULL, &s);
261 if (ACPI_SUCCESS(status)) 257 if (ACPI_SUCCESS(status))
@@ -274,8 +270,6 @@ static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc, u32 *sinf)
274 union acpi_object *hkey = NULL; 270 union acpi_object *hkey = NULL;
275 int i; 271 int i;
276 272
277 ACPI_FUNCTION_TRACE("acpi_pcc_retrieve_biosdata");
278
279 status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SINF, 0, 273 status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SINF, 0,
280 &buffer); 274 &buffer);
281 if (ACPI_FAILURE(status)) { 275 if (ACPI_FAILURE(status)) {
@@ -501,8 +495,6 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
501 int key_code, hkey_num; 495 int key_code, hkey_num;
502 unsigned long long result; 496 unsigned long long result;
503 497
504 ACPI_FUNCTION_TRACE("acpi_pcc_generate_keyinput");
505
506 rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY, 498 rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY,
507 NULL, &result); 499 NULL, &result);
508 if (!ACPI_SUCCESS(rc)) { 500 if (!ACPI_SUCCESS(rc)) {
@@ -538,8 +530,6 @@ static void acpi_pcc_hotkey_notify(acpi_handle handle, u32 event, void *data)
538{ 530{
539 struct pcc_acpi *pcc = (struct pcc_acpi *) data; 531 struct pcc_acpi *pcc = (struct pcc_acpi *) data;
540 532
541 ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_notify");
542
543 switch (event) { 533 switch (event) {
544 case HKEY_NOTIFY: 534 case HKEY_NOTIFY:
545 acpi_pcc_generate_keyinput(pcc); 535 acpi_pcc_generate_keyinput(pcc);
@@ -554,8 +544,6 @@ static int acpi_pcc_init_input(struct pcc_acpi *pcc)
554{ 544{
555 int i, rc; 545 int i, rc;
556 546
557 ACPI_FUNCTION_TRACE("acpi_pcc_init_input");
558
559 pcc->input_dev = input_allocate_device(); 547 pcc->input_dev = input_allocate_device();
560 if (!pcc->input_dev) { 548 if (!pcc->input_dev) {
561 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 549 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
@@ -597,8 +585,6 @@ static int acpi_pcc_hotkey_resume(struct acpi_device *device)
597 struct pcc_acpi *pcc = acpi_driver_data(device); 585 struct pcc_acpi *pcc = acpi_driver_data(device);
598 acpi_status status = AE_OK; 586 acpi_status status = AE_OK;
599 587
600 ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_resume");
601
602 if (device == NULL || pcc == NULL) 588 if (device == NULL || pcc == NULL)
603 return -EINVAL; 589 return -EINVAL;
604 590
@@ -616,8 +602,6 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
616 struct pcc_acpi *pcc; 602 struct pcc_acpi *pcc;
617 int num_sifr, result; 603 int num_sifr, result;
618 604
619 ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_add");
620
621 if (!device) 605 if (!device)
622 return -EINVAL; 606 return -EINVAL;
623 607
@@ -714,8 +698,6 @@ static int __init acpi_pcc_init(void)
714{ 698{
715 int result = 0; 699 int result = 0;
716 700
717 ACPI_FUNCTION_TRACE("acpi_pcc_init");
718
719 if (acpi_disabled) 701 if (acpi_disabled)
720 return -ENODEV; 702 return -ENODEV;
721 703
@@ -733,8 +715,6 @@ static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type)
733{ 715{
734 struct pcc_acpi *pcc = acpi_driver_data(device); 716 struct pcc_acpi *pcc = acpi_driver_data(device);
735 717
736 ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_remove");
737
738 if (!device || !pcc) 718 if (!device || !pcc)
739 return -EINVAL; 719 return -EINVAL;
740 720
@@ -757,8 +737,6 @@ static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type)
757 737
758static void __exit acpi_pcc_exit(void) 738static void __exit acpi_pcc_exit(void)
759{ 739{
760 ACPI_FUNCTION_TRACE("acpi_pcc_exit");
761
762 acpi_bus_unregister_driver(&acpi_pcc_driver); 740 acpi_bus_unregister_driver(&acpi_pcc_driver);
763} 741}
764 742
diff --git a/drivers/misc/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 571b211608d1..537959d07148 100644
--- a/drivers/misc/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -935,14 +935,17 @@ static void sony_acpi_notify(acpi_handle handle, u32 event, void *data)
935static acpi_status sony_walk_callback(acpi_handle handle, u32 level, 935static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
936 void *context, void **return_value) 936 void *context, void **return_value)
937{ 937{
938 struct acpi_namespace_node *node; 938 struct acpi_device_info *info;
939 union acpi_operand_object *operand; 939 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
940 940
941 node = (struct acpi_namespace_node *)handle; 941 if (ACPI_SUCCESS(acpi_get_object_info(handle, &buffer))) {
942 operand = (union acpi_operand_object *)node->object; 942 info = buffer.pointer;
943 943
944 printk(KERN_WARNING DRV_PFX "method: name: %4.4s, args %X\n", node->name.ascii, 944 printk(KERN_WARNING DRV_PFX "method: name: %4.4s, args %X\n",
945 (u32) operand->method.param_count); 945 (char *)&info->name, info->param_count);
946
947 kfree(buffer.pointer);
948 }
946 949
947 return AE_OK; 950 return AE_OK;
948} 951}
diff --git a/drivers/misc/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index f25e4c974dcf..b4a4aa9ee482 100644
--- a/drivers/misc/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -30,7 +30,6 @@
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/types.h> 31#include <linux/types.h>
32#include <acpi/acpi.h> 32#include <acpi/acpi.h>
33#include <acpi/actypes.h>
34#include <acpi/acpi_bus.h> 33#include <acpi/acpi_bus.h>
35#include <acpi/acpi_drivers.h> 34#include <acpi/acpi_drivers.h>
36#include <linux/platform_device.h> 35#include <linux/platform_device.h>
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 899766e16fa8..3478453eba7a 100644
--- a/drivers/misc/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -76,7 +76,6 @@
76#include <linux/workqueue.h> 76#include <linux/workqueue.h>
77 77
78#include <acpi/acpi_drivers.h> 78#include <acpi/acpi_drivers.h>
79#include <acpi/acnamesp.h>
80 79
81#include <linux/pci_ids.h> 80#include <linux/pci_ids.h>
82 81
diff --git a/drivers/acpi/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 40e60fc2e596..40e60fc2e596 100644
--- a/drivers/acpi/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
diff --git a/drivers/acpi/wmi.c b/drivers/platform/x86/wmi.c
index 8a8b377712c9..8a8b377712c9 100644
--- a/drivers/acpi/wmi.c
+++ b/drivers/platform/x86/wmi.c
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 383e47c392a4..2834846a185d 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -23,7 +23,6 @@
23#include <linux/pnp.h> 23#include <linux/pnp.h>
24#include <linux/mod_devicetable.h> 24#include <linux/mod_devicetable.h>
25#include <acpi/acpi_bus.h> 25#include <acpi/acpi_bus.h>
26#include <acpi/actypes.h>
27 26
28#include "../base.h" 27#include "../base.h"
29#include "pnpacpi.h" 28#include "pnpacpi.h"
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 02a774424e8d..f511a406fcaa 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -29,7 +29,7 @@ static DEFINE_MUTEX(regulator_list_mutex);
29static LIST_HEAD(regulator_list); 29static LIST_HEAD(regulator_list);
30static LIST_HEAD(regulator_map_list); 30static LIST_HEAD(regulator_map_list);
31 31
32/** 32/*
33 * struct regulator_dev 33 * struct regulator_dev
34 * 34 *
35 * Voltage / Current regulator class device. One for each regulator. 35 * Voltage / Current regulator class device. One for each regulator.
@@ -56,7 +56,7 @@ struct regulator_dev {
56 void *reg_data; /* regulator_dev data */ 56 void *reg_data; /* regulator_dev data */
57}; 57};
58 58
59/** 59/*
60 * struct regulator_map 60 * struct regulator_map
61 * 61 *
62 * Used to provide symbolic supply names to devices. 62 * Used to provide symbolic supply names to devices.
@@ -79,7 +79,7 @@ struct regulator {
79 int uA_load; 79 int uA_load;
80 int min_uV; 80 int min_uV;
81 int max_uV; 81 int max_uV;
82 int enabled; /* client has called enabled */ 82 int enabled; /* count of client enables */
83 char *supply_name; 83 char *supply_name;
84 struct device_attribute dev_attr; 84 struct device_attribute dev_attr;
85 struct regulator_dev *rdev; 85 struct regulator_dev *rdev;
@@ -174,6 +174,16 @@ static int regulator_check_current_limit(struct regulator_dev *rdev,
174/* operating mode constraint check */ 174/* operating mode constraint check */
175static int regulator_check_mode(struct regulator_dev *rdev, int mode) 175static int regulator_check_mode(struct regulator_dev *rdev, int mode)
176{ 176{
177 switch (mode) {
178 case REGULATOR_MODE_FAST:
179 case REGULATOR_MODE_NORMAL:
180 case REGULATOR_MODE_IDLE:
181 case REGULATOR_MODE_STANDBY:
182 break;
183 default:
184 return -EINVAL;
185 }
186
177 if (!rdev->constraints) { 187 if (!rdev->constraints) {
178 printk(KERN_ERR "%s: no constraints for %s\n", __func__, 188 printk(KERN_ERR "%s: no constraints for %s\n", __func__,
179 rdev->desc->name); 189 rdev->desc->name);
@@ -232,6 +242,7 @@ static ssize_t regulator_uV_show(struct device *dev,
232 242
233 return ret; 243 return ret;
234} 244}
245static DEVICE_ATTR(microvolts, 0444, regulator_uV_show, NULL);
235 246
236static ssize_t regulator_uA_show(struct device *dev, 247static ssize_t regulator_uA_show(struct device *dev,
237 struct device_attribute *attr, char *buf) 248 struct device_attribute *attr, char *buf)
@@ -240,6 +251,7 @@ static ssize_t regulator_uA_show(struct device *dev,
240 251
241 return sprintf(buf, "%d\n", _regulator_get_current_limit(rdev)); 252 return sprintf(buf, "%d\n", _regulator_get_current_limit(rdev));
242} 253}
254static DEVICE_ATTR(microamps, 0444, regulator_uA_show, NULL);
243 255
244static ssize_t regulator_name_show(struct device *dev, 256static ssize_t regulator_name_show(struct device *dev,
245 struct device_attribute *attr, char *buf) 257 struct device_attribute *attr, char *buf)
@@ -257,12 +269,8 @@ static ssize_t regulator_name_show(struct device *dev,
257 return sprintf(buf, "%s\n", name); 269 return sprintf(buf, "%s\n", name);
258} 270}
259 271
260static ssize_t regulator_opmode_show(struct device *dev, 272static ssize_t regulator_print_opmode(char *buf, int mode)
261 struct device_attribute *attr, char *buf)
262{ 273{
263 struct regulator_dev *rdev = dev_get_drvdata(dev);
264 int mode = _regulator_get_mode(rdev);
265
266 switch (mode) { 274 switch (mode) {
267 case REGULATOR_MODE_FAST: 275 case REGULATOR_MODE_FAST:
268 return sprintf(buf, "fast\n"); 276 return sprintf(buf, "fast\n");
@@ -276,12 +284,17 @@ static ssize_t regulator_opmode_show(struct device *dev,
276 return sprintf(buf, "unknown\n"); 284 return sprintf(buf, "unknown\n");
277} 285}
278 286
279static ssize_t regulator_state_show(struct device *dev, 287static ssize_t regulator_opmode_show(struct device *dev,
280 struct device_attribute *attr, char *buf) 288 struct device_attribute *attr, char *buf)
281{ 289{
282 struct regulator_dev *rdev = dev_get_drvdata(dev); 290 struct regulator_dev *rdev = dev_get_drvdata(dev);
283 int state = _regulator_is_enabled(rdev);
284 291
292 return regulator_print_opmode(buf, _regulator_get_mode(rdev));
293}
294static DEVICE_ATTR(opmode, 0444, regulator_opmode_show, NULL);
295
296static ssize_t regulator_print_state(char *buf, int state)
297{
285 if (state > 0) 298 if (state > 0)
286 return sprintf(buf, "enabled\n"); 299 return sprintf(buf, "enabled\n");
287 else if (state == 0) 300 else if (state == 0)
@@ -290,6 +303,15 @@ static ssize_t regulator_state_show(struct device *dev,
290 return sprintf(buf, "unknown\n"); 303 return sprintf(buf, "unknown\n");
291} 304}
292 305
306static ssize_t regulator_state_show(struct device *dev,
307 struct device_attribute *attr, char *buf)
308{
309 struct regulator_dev *rdev = dev_get_drvdata(dev);
310
311 return regulator_print_state(buf, _regulator_is_enabled(rdev));
312}
313static DEVICE_ATTR(state, 0444, regulator_state_show, NULL);
314
293static ssize_t regulator_min_uA_show(struct device *dev, 315static ssize_t regulator_min_uA_show(struct device *dev,
294 struct device_attribute *attr, char *buf) 316 struct device_attribute *attr, char *buf)
295{ 317{
@@ -300,6 +322,7 @@ static ssize_t regulator_min_uA_show(struct device *dev,
300 322
301 return sprintf(buf, "%d\n", rdev->constraints->min_uA); 323 return sprintf(buf, "%d\n", rdev->constraints->min_uA);
302} 324}
325static DEVICE_ATTR(min_microamps, 0444, regulator_min_uA_show, NULL);
303 326
304static ssize_t regulator_max_uA_show(struct device *dev, 327static ssize_t regulator_max_uA_show(struct device *dev,
305 struct device_attribute *attr, char *buf) 328 struct device_attribute *attr, char *buf)
@@ -311,6 +334,7 @@ static ssize_t regulator_max_uA_show(struct device *dev,
311 334
312 return sprintf(buf, "%d\n", rdev->constraints->max_uA); 335 return sprintf(buf, "%d\n", rdev->constraints->max_uA);
313} 336}
337static DEVICE_ATTR(max_microamps, 0444, regulator_max_uA_show, NULL);
314 338
315static ssize_t regulator_min_uV_show(struct device *dev, 339static ssize_t regulator_min_uV_show(struct device *dev,
316 struct device_attribute *attr, char *buf) 340 struct device_attribute *attr, char *buf)
@@ -322,6 +346,7 @@ static ssize_t regulator_min_uV_show(struct device *dev,
322 346
323 return sprintf(buf, "%d\n", rdev->constraints->min_uV); 347 return sprintf(buf, "%d\n", rdev->constraints->min_uV);
324} 348}
349static DEVICE_ATTR(min_microvolts, 0444, regulator_min_uV_show, NULL);
325 350
326static ssize_t regulator_max_uV_show(struct device *dev, 351static ssize_t regulator_max_uV_show(struct device *dev,
327 struct device_attribute *attr, char *buf) 352 struct device_attribute *attr, char *buf)
@@ -333,6 +358,7 @@ static ssize_t regulator_max_uV_show(struct device *dev,
333 358
334 return sprintf(buf, "%d\n", rdev->constraints->max_uV); 359 return sprintf(buf, "%d\n", rdev->constraints->max_uV);
335} 360}
361static DEVICE_ATTR(max_microvolts, 0444, regulator_max_uV_show, NULL);
336 362
337static ssize_t regulator_total_uA_show(struct device *dev, 363static ssize_t regulator_total_uA_show(struct device *dev,
338 struct device_attribute *attr, char *buf) 364 struct device_attribute *attr, char *buf)
@@ -347,6 +373,7 @@ static ssize_t regulator_total_uA_show(struct device *dev,
347 mutex_unlock(&rdev->mutex); 373 mutex_unlock(&rdev->mutex);
348 return sprintf(buf, "%d\n", uA); 374 return sprintf(buf, "%d\n", uA);
349} 375}
376static DEVICE_ATTR(requested_microamps, 0444, regulator_total_uA_show, NULL);
350 377
351static ssize_t regulator_num_users_show(struct device *dev, 378static ssize_t regulator_num_users_show(struct device *dev,
352 struct device_attribute *attr, char *buf) 379 struct device_attribute *attr, char *buf)
@@ -374,153 +401,106 @@ static ssize_t regulator_suspend_mem_uV_show(struct device *dev,
374{ 401{
375 struct regulator_dev *rdev = dev_get_drvdata(dev); 402 struct regulator_dev *rdev = dev_get_drvdata(dev);
376 403
377 if (!rdev->constraints)
378 return sprintf(buf, "not defined\n");
379 return sprintf(buf, "%d\n", rdev->constraints->state_mem.uV); 404 return sprintf(buf, "%d\n", rdev->constraints->state_mem.uV);
380} 405}
406static DEVICE_ATTR(suspend_mem_microvolts, 0444,
407 regulator_suspend_mem_uV_show, NULL);
381 408
382static ssize_t regulator_suspend_disk_uV_show(struct device *dev, 409static ssize_t regulator_suspend_disk_uV_show(struct device *dev,
383 struct device_attribute *attr, char *buf) 410 struct device_attribute *attr, char *buf)
384{ 411{
385 struct regulator_dev *rdev = dev_get_drvdata(dev); 412 struct regulator_dev *rdev = dev_get_drvdata(dev);
386 413
387 if (!rdev->constraints)
388 return sprintf(buf, "not defined\n");
389 return sprintf(buf, "%d\n", rdev->constraints->state_disk.uV); 414 return sprintf(buf, "%d\n", rdev->constraints->state_disk.uV);
390} 415}
416static DEVICE_ATTR(suspend_disk_microvolts, 0444,
417 regulator_suspend_disk_uV_show, NULL);
391 418
392static ssize_t regulator_suspend_standby_uV_show(struct device *dev, 419static ssize_t regulator_suspend_standby_uV_show(struct device *dev,
393 struct device_attribute *attr, char *buf) 420 struct device_attribute *attr, char *buf)
394{ 421{
395 struct regulator_dev *rdev = dev_get_drvdata(dev); 422 struct regulator_dev *rdev = dev_get_drvdata(dev);
396 423
397 if (!rdev->constraints)
398 return sprintf(buf, "not defined\n");
399 return sprintf(buf, "%d\n", rdev->constraints->state_standby.uV); 424 return sprintf(buf, "%d\n", rdev->constraints->state_standby.uV);
400} 425}
401 426static DEVICE_ATTR(suspend_standby_microvolts, 0444,
402static ssize_t suspend_opmode_show(struct regulator_dev *rdev, 427 regulator_suspend_standby_uV_show, NULL);
403 unsigned int mode, char *buf)
404{
405 switch (mode) {
406 case REGULATOR_MODE_FAST:
407 return sprintf(buf, "fast\n");
408 case REGULATOR_MODE_NORMAL:
409 return sprintf(buf, "normal\n");
410 case REGULATOR_MODE_IDLE:
411 return sprintf(buf, "idle\n");
412 case REGULATOR_MODE_STANDBY:
413 return sprintf(buf, "standby\n");
414 }
415 return sprintf(buf, "unknown\n");
416}
417 428
418static ssize_t regulator_suspend_mem_mode_show(struct device *dev, 429static ssize_t regulator_suspend_mem_mode_show(struct device *dev,
419 struct device_attribute *attr, char *buf) 430 struct device_attribute *attr, char *buf)
420{ 431{
421 struct regulator_dev *rdev = dev_get_drvdata(dev); 432 struct regulator_dev *rdev = dev_get_drvdata(dev);
422 433
423 if (!rdev->constraints) 434 return regulator_print_opmode(buf,
424 return sprintf(buf, "not defined\n"); 435 rdev->constraints->state_mem.mode);
425 return suspend_opmode_show(rdev,
426 rdev->constraints->state_mem.mode, buf);
427} 436}
437static DEVICE_ATTR(suspend_mem_mode, 0444,
438 regulator_suspend_mem_mode_show, NULL);
428 439
429static ssize_t regulator_suspend_disk_mode_show(struct device *dev, 440static ssize_t regulator_suspend_disk_mode_show(struct device *dev,
430 struct device_attribute *attr, char *buf) 441 struct device_attribute *attr, char *buf)
431{ 442{
432 struct regulator_dev *rdev = dev_get_drvdata(dev); 443 struct regulator_dev *rdev = dev_get_drvdata(dev);
433 444
434 if (!rdev->constraints) 445 return regulator_print_opmode(buf,
435 return sprintf(buf, "not defined\n"); 446 rdev->constraints->state_disk.mode);
436 return suspend_opmode_show(rdev,
437 rdev->constraints->state_disk.mode, buf);
438} 447}
448static DEVICE_ATTR(suspend_disk_mode, 0444,
449 regulator_suspend_disk_mode_show, NULL);
439 450
440static ssize_t regulator_suspend_standby_mode_show(struct device *dev, 451static ssize_t regulator_suspend_standby_mode_show(struct device *dev,
441 struct device_attribute *attr, char *buf) 452 struct device_attribute *attr, char *buf)
442{ 453{
443 struct regulator_dev *rdev = dev_get_drvdata(dev); 454 struct regulator_dev *rdev = dev_get_drvdata(dev);
444 455
445 if (!rdev->constraints) 456 return regulator_print_opmode(buf,
446 return sprintf(buf, "not defined\n"); 457 rdev->constraints->state_standby.mode);
447 return suspend_opmode_show(rdev,
448 rdev->constraints->state_standby.mode, buf);
449} 458}
459static DEVICE_ATTR(suspend_standby_mode, 0444,
460 regulator_suspend_standby_mode_show, NULL);
450 461
451static ssize_t regulator_suspend_mem_state_show(struct device *dev, 462static ssize_t regulator_suspend_mem_state_show(struct device *dev,
452 struct device_attribute *attr, char *buf) 463 struct device_attribute *attr, char *buf)
453{ 464{
454 struct regulator_dev *rdev = dev_get_drvdata(dev); 465 struct regulator_dev *rdev = dev_get_drvdata(dev);
455 466
456 if (!rdev->constraints) 467 return regulator_print_state(buf,
457 return sprintf(buf, "not defined\n"); 468 rdev->constraints->state_mem.enabled);
458
459 if (rdev->constraints->state_mem.enabled)
460 return sprintf(buf, "enabled\n");
461 else
462 return sprintf(buf, "disabled\n");
463} 469}
470static DEVICE_ATTR(suspend_mem_state, 0444,
471 regulator_suspend_mem_state_show, NULL);
464 472
465static ssize_t regulator_suspend_disk_state_show(struct device *dev, 473static ssize_t regulator_suspend_disk_state_show(struct device *dev,
466 struct device_attribute *attr, char *buf) 474 struct device_attribute *attr, char *buf)
467{ 475{
468 struct regulator_dev *rdev = dev_get_drvdata(dev); 476 struct regulator_dev *rdev = dev_get_drvdata(dev);
469 477
470 if (!rdev->constraints) 478 return regulator_print_state(buf,
471 return sprintf(buf, "not defined\n"); 479 rdev->constraints->state_disk.enabled);
472
473 if (rdev->constraints->state_disk.enabled)
474 return sprintf(buf, "enabled\n");
475 else
476 return sprintf(buf, "disabled\n");
477} 480}
481static DEVICE_ATTR(suspend_disk_state, 0444,
482 regulator_suspend_disk_state_show, NULL);
478 483
479static ssize_t regulator_suspend_standby_state_show(struct device *dev, 484static ssize_t regulator_suspend_standby_state_show(struct device *dev,
480 struct device_attribute *attr, char *buf) 485 struct device_attribute *attr, char *buf)
481{ 486{
482 struct regulator_dev *rdev = dev_get_drvdata(dev); 487 struct regulator_dev *rdev = dev_get_drvdata(dev);
483 488
484 if (!rdev->constraints) 489 return regulator_print_state(buf,
485 return sprintf(buf, "not defined\n"); 490 rdev->constraints->state_standby.enabled);
486
487 if (rdev->constraints->state_standby.enabled)
488 return sprintf(buf, "enabled\n");
489 else
490 return sprintf(buf, "disabled\n");
491} 491}
492static DEVICE_ATTR(suspend_standby_state, 0444,
493 regulator_suspend_standby_state_show, NULL);
492 494
495
496/*
497 * These are the only attributes are present for all regulators.
498 * Other attributes are a function of regulator functionality.
499 */
493static struct device_attribute regulator_dev_attrs[] = { 500static struct device_attribute regulator_dev_attrs[] = {
494 __ATTR(name, 0444, regulator_name_show, NULL), 501 __ATTR(name, 0444, regulator_name_show, NULL),
495 __ATTR(microvolts, 0444, regulator_uV_show, NULL),
496 __ATTR(microamps, 0444, regulator_uA_show, NULL),
497 __ATTR(opmode, 0444, regulator_opmode_show, NULL),
498 __ATTR(state, 0444, regulator_state_show, NULL),
499 __ATTR(min_microvolts, 0444, regulator_min_uV_show, NULL),
500 __ATTR(min_microamps, 0444, regulator_min_uA_show, NULL),
501 __ATTR(max_microvolts, 0444, regulator_max_uV_show, NULL),
502 __ATTR(max_microamps, 0444, regulator_max_uA_show, NULL),
503 __ATTR(requested_microamps, 0444, regulator_total_uA_show, NULL),
504 __ATTR(num_users, 0444, regulator_num_users_show, NULL), 502 __ATTR(num_users, 0444, regulator_num_users_show, NULL),
505 __ATTR(type, 0444, regulator_type_show, NULL), 503 __ATTR(type, 0444, regulator_type_show, NULL),
506 __ATTR(suspend_mem_microvolts, 0444,
507 regulator_suspend_mem_uV_show, NULL),
508 __ATTR(suspend_disk_microvolts, 0444,
509 regulator_suspend_disk_uV_show, NULL),
510 __ATTR(suspend_standby_microvolts, 0444,
511 regulator_suspend_standby_uV_show, NULL),
512 __ATTR(suspend_mem_mode, 0444,
513 regulator_suspend_mem_mode_show, NULL),
514 __ATTR(suspend_disk_mode, 0444,
515 regulator_suspend_disk_mode_show, NULL),
516 __ATTR(suspend_standby_mode, 0444,
517 regulator_suspend_standby_mode_show, NULL),
518 __ATTR(suspend_mem_state, 0444,
519 regulator_suspend_mem_state_show, NULL),
520 __ATTR(suspend_disk_state, 0444,
521 regulator_suspend_disk_state_show, NULL),
522 __ATTR(suspend_standby_state, 0444,
523 regulator_suspend_standby_state_show, NULL),
524 __ATTR_NULL, 504 __ATTR_NULL,
525}; 505};
526 506
@@ -675,7 +655,8 @@ static void print_constraints(struct regulator_dev *rdev)
675 655
676/** 656/**
677 * set_machine_constraints - sets regulator constraints 657 * set_machine_constraints - sets regulator constraints
678 * @regulator: regulator source 658 * @rdev: regulator source
659 * @constraints: constraints to apply
679 * 660 *
680 * Allows platform initialisation code to define and constrain 661 * Allows platform initialisation code to define and constrain
681 * regulator circuits e.g. valid voltage/current ranges, etc. NOTE: 662 * regulator circuits e.g. valid voltage/current ranges, etc. NOTE:
@@ -750,8 +731,8 @@ out:
750 731
751/** 732/**
752 * set_supply - set regulator supply regulator 733 * set_supply - set regulator supply regulator
753 * @regulator: regulator name 734 * @rdev: regulator name
754 * @supply: supply regulator name 735 * @supply_rdev: supply regulator name
755 * 736 *
756 * Called by platform initialisation code to set the supply regulator for this 737 * Called by platform initialisation code to set the supply regulator for this
757 * regulator. This ensures that a regulators supply will also be enabled by the 738 * regulator. This ensures that a regulators supply will also be enabled by the
@@ -778,9 +759,9 @@ out:
778 759
779/** 760/**
780 * set_consumer_device_supply: Bind a regulator to a symbolic supply 761 * set_consumer_device_supply: Bind a regulator to a symbolic supply
781 * @regulator: regulator source 762 * @rdev: regulator source
782 * @dev: device the supply applies to 763 * @consumer_dev: device the supply applies to
783 * @supply: symbolic name for supply 764 * @supply: symbolic name for supply
784 * 765 *
785 * Allows platform initialisation code to map physical regulator 766 * Allows platform initialisation code to map physical regulator
786 * sources to symbolic names for supplies for use by devices. Devices 767 * sources to symbolic names for supplies for use by devices. Devices
@@ -795,6 +776,20 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
795 if (supply == NULL) 776 if (supply == NULL)
796 return -EINVAL; 777 return -EINVAL;
797 778
779 list_for_each_entry(node, &regulator_map_list, list) {
780 if (consumer_dev != node->dev)
781 continue;
782 if (strcmp(node->supply, supply) != 0)
783 continue;
784
785 dev_dbg(consumer_dev, "%s/%s is '%s' supply; fail %s/%s\n",
786 dev_name(&node->regulator->dev),
787 node->regulator->desc->name,
788 supply,
789 dev_name(&rdev->dev), rdev->desc->name);
790 return -EBUSY;
791 }
792
798 node = kmalloc(sizeof(struct regulator_map), GFP_KERNEL); 793 node = kmalloc(sizeof(struct regulator_map), GFP_KERNEL);
799 if (node == NULL) 794 if (node == NULL)
800 return -ENOMEM; 795 return -ENOMEM;
@@ -963,16 +958,13 @@ void regulator_put(struct regulator *regulator)
963 if (regulator == NULL || IS_ERR(regulator)) 958 if (regulator == NULL || IS_ERR(regulator))
964 return; 959 return;
965 960
966 if (regulator->enabled) {
967 printk(KERN_WARNING "Releasing supply %s while enabled\n",
968 regulator->supply_name);
969 WARN_ON(regulator->enabled);
970 regulator_disable(regulator);
971 }
972
973 mutex_lock(&regulator_list_mutex); 961 mutex_lock(&regulator_list_mutex);
974 rdev = regulator->rdev; 962 rdev = regulator->rdev;
975 963
964 if (WARN(regulator->enabled, "Releasing supply %s while enabled\n",
965 regulator->supply_name))
966 _regulator_disable(rdev);
967
976 /* remove any sysfs entries */ 968 /* remove any sysfs entries */
977 if (regulator->dev) { 969 if (regulator->dev) {
978 sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); 970 sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
@@ -1034,29 +1026,26 @@ static int _regulator_enable(struct regulator_dev *rdev)
1034 * regulator_enable - enable regulator output 1026 * regulator_enable - enable regulator output
1035 * @regulator: regulator source 1027 * @regulator: regulator source
1036 * 1028 *
1037 * Enable the regulator output at the predefined voltage or current value. 1029 * Request that the regulator be enabled with the regulator output at
1030 * the predefined voltage or current value. Calls to regulator_enable()
1031 * must be balanced with calls to regulator_disable().
1032 *
1038 * NOTE: the output value can be set by other drivers, boot loader or may be 1033 * NOTE: the output value can be set by other drivers, boot loader or may be
1039 * hardwired in the regulator. 1034 * hardwired in the regulator.
1040 * NOTE: calls to regulator_enable() must be balanced with calls to
1041 * regulator_disable().
1042 */ 1035 */
1043int regulator_enable(struct regulator *regulator) 1036int regulator_enable(struct regulator *regulator)
1044{ 1037{
1045 int ret; 1038 struct regulator_dev *rdev = regulator->rdev;
1046 1039 int ret = 0;
1047 if (regulator->enabled) {
1048 printk(KERN_CRIT "Regulator %s already enabled\n",
1049 regulator->supply_name);
1050 WARN_ON(regulator->enabled);
1051 return 0;
1052 }
1053 1040
1054 mutex_lock(&regulator->rdev->mutex); 1041 mutex_lock(&rdev->mutex);
1055 regulator->enabled = 1; 1042 if (regulator->enabled == 0)
1056 ret = _regulator_enable(regulator->rdev); 1043 ret = _regulator_enable(rdev);
1057 if (ret != 0) 1044 else if (regulator->enabled < 0)
1058 regulator->enabled = 0; 1045 ret = -EIO;
1059 mutex_unlock(&regulator->rdev->mutex); 1046 if (ret == 0)
1047 regulator->enabled++;
1048 mutex_unlock(&rdev->mutex);
1060 return ret; 1049 return ret;
1061} 1050}
1062EXPORT_SYMBOL_GPL(regulator_enable); 1051EXPORT_SYMBOL_GPL(regulator_enable);
@@ -1100,27 +1089,31 @@ static int _regulator_disable(struct regulator_dev *rdev)
1100 * regulator_disable - disable regulator output 1089 * regulator_disable - disable regulator output
1101 * @regulator: regulator source 1090 * @regulator: regulator source
1102 * 1091 *
1103 * Disable the regulator output voltage or current. 1092 * Disable the regulator output voltage or current. Calls to
1104 * NOTE: this will only disable the regulator output if no other consumer 1093 * regulator_enable() must be balanced with calls to
1105 * devices have it enabled.
1106 * NOTE: calls to regulator_enable() must be balanced with calls to
1107 * regulator_disable(). 1094 * regulator_disable().
1095 *
1096 * NOTE: this will only disable the regulator output if no other consumer
1097 * devices have it enabled, the regulator device supports disabling and
1098 * machine constraints permit this operation.
1108 */ 1099 */
1109int regulator_disable(struct regulator *regulator) 1100int regulator_disable(struct regulator *regulator)
1110{ 1101{
1111 int ret; 1102 struct regulator_dev *rdev = regulator->rdev;
1112 1103 int ret = 0;
1113 if (!regulator->enabled) {
1114 printk(KERN_ERR "%s: not in use by this consumer\n",
1115 __func__);
1116 return 0;
1117 }
1118 1104
1119 mutex_lock(&regulator->rdev->mutex); 1105 mutex_lock(&rdev->mutex);
1120 regulator->enabled = 0; 1106 if (regulator->enabled == 1) {
1121 regulator->uA_load = 0; 1107 ret = _regulator_disable(rdev);
1122 ret = _regulator_disable(regulator->rdev); 1108 if (ret == 0)
1123 mutex_unlock(&regulator->rdev->mutex); 1109 regulator->uA_load = 0;
1110 } else if (WARN(regulator->enabled <= 0,
1111 "unbalanced disables for supply %s\n",
1112 regulator->supply_name))
1113 ret = -EIO;
1114 if (ret == 0)
1115 regulator->enabled--;
1116 mutex_unlock(&rdev->mutex);
1124 return ret; 1117 return ret;
1125} 1118}
1126EXPORT_SYMBOL_GPL(regulator_disable); 1119EXPORT_SYMBOL_GPL(regulator_disable);
@@ -1196,7 +1189,13 @@ out:
1196 * regulator_is_enabled - is the regulator output enabled 1189 * regulator_is_enabled - is the regulator output enabled
1197 * @regulator: regulator source 1190 * @regulator: regulator source
1198 * 1191 *
1199 * Returns zero for disabled otherwise return number of enable requests. 1192 * Returns positive if the regulator driver backing the source/client
1193 * has requested that the device be enabled, zero if it hasn't, else a
1194 * negative errno code.
1195 *
1196 * Note that the device backing this regulator handle can have multiple
1197 * users, so it might be enabled even if regulator_enable() was never
1198 * called for this particular source.
1200 */ 1199 */
1201int regulator_is_enabled(struct regulator *regulator) 1200int regulator_is_enabled(struct regulator *regulator)
1202{ 1201{
@@ -1219,7 +1218,7 @@ EXPORT_SYMBOL_GPL(regulator_is_enabled);
1219 * 1218 *
1220 * NOTE: If the regulator is shared between several devices then the lowest 1219 * NOTE: If the regulator is shared between several devices then the lowest
1221 * request voltage that meets the system constraints will be used. 1220 * request voltage that meets the system constraints will be used.
1222 * NOTE: Regulator system constraints must be set for this regulator before 1221 * Regulator system constraints must be set for this regulator before
1223 * calling this function otherwise this call will fail. 1222 * calling this function otherwise this call will fail.
1224 */ 1223 */
1225int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV) 1224int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
@@ -1493,7 +1492,8 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
1493 mode = rdev->desc->ops->get_optimum_mode(rdev, 1492 mode = rdev->desc->ops->get_optimum_mode(rdev,
1494 input_uV, output_uV, 1493 input_uV, output_uV,
1495 total_uA_load); 1494 total_uA_load);
1496 if (ret <= 0) { 1495 ret = regulator_check_mode(rdev, mode);
1496 if (ret < 0) {
1497 printk(KERN_ERR "%s: failed to get optimum mode for %s @" 1497 printk(KERN_ERR "%s: failed to get optimum mode for %s @"
1498 " %d uA %d -> %d uV\n", __func__, rdev->desc->name, 1498 " %d uA %d -> %d uV\n", __func__, rdev->desc->name,
1499 total_uA_load, input_uV, output_uV); 1499 total_uA_load, input_uV, output_uV);
@@ -1501,7 +1501,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
1501 } 1501 }
1502 1502
1503 ret = rdev->desc->ops->set_mode(rdev, mode); 1503 ret = rdev->desc->ops->set_mode(rdev, mode);
1504 if (ret <= 0) { 1504 if (ret < 0) {
1505 printk(KERN_ERR "%s: failed to set optimum mode %x for %s\n", 1505 printk(KERN_ERR "%s: failed to set optimum mode %x for %s\n",
1506 __func__, mode, rdev->desc->name); 1506 __func__, mode, rdev->desc->name);
1507 goto out; 1507 goto out;
@@ -1516,7 +1516,7 @@ EXPORT_SYMBOL_GPL(regulator_set_optimum_mode);
1516/** 1516/**
1517 * regulator_register_notifier - register regulator event notifier 1517 * regulator_register_notifier - register regulator event notifier
1518 * @regulator: regulator source 1518 * @regulator: regulator source
1519 * @notifier_block: notifier block 1519 * @nb: notifier block
1520 * 1520 *
1521 * Register notifier block to receive regulator events. 1521 * Register notifier block to receive regulator events.
1522 */ 1522 */
@@ -1531,7 +1531,7 @@ EXPORT_SYMBOL_GPL(regulator_register_notifier);
1531/** 1531/**
1532 * regulator_unregister_notifier - unregister regulator event notifier 1532 * regulator_unregister_notifier - unregister regulator event notifier
1533 * @regulator: regulator source 1533 * @regulator: regulator source
1534 * @notifier_block: notifier block 1534 * @nb: notifier block
1535 * 1535 *
1536 * Unregister regulator event notifier block. 1536 * Unregister regulator event notifier block.
1537 */ 1537 */
@@ -1697,9 +1697,9 @@ EXPORT_SYMBOL_GPL(regulator_bulk_free);
1697 1697
1698/** 1698/**
1699 * regulator_notifier_call_chain - call regulator event notifier 1699 * regulator_notifier_call_chain - call regulator event notifier
1700 * @regulator: regulator source 1700 * @rdev: regulator source
1701 * @event: notifier block 1701 * @event: notifier block
1702 * @data: 1702 * @data: callback-specific data.
1703 * 1703 *
1704 * Called by regulator drivers to notify clients a regulator event has 1704 * Called by regulator drivers to notify clients a regulator event has
1705 * occurred. We also notify regulator clients downstream. 1705 * occurred. We also notify regulator clients downstream.
@@ -1713,10 +1713,122 @@ int regulator_notifier_call_chain(struct regulator_dev *rdev,
1713} 1713}
1714EXPORT_SYMBOL_GPL(regulator_notifier_call_chain); 1714EXPORT_SYMBOL_GPL(regulator_notifier_call_chain);
1715 1715
1716/*
1717 * To avoid cluttering sysfs (and memory) with useless state, only
1718 * create attributes that can be meaningfully displayed.
1719 */
1720static int add_regulator_attributes(struct regulator_dev *rdev)
1721{
1722 struct device *dev = &rdev->dev;
1723 struct regulator_ops *ops = rdev->desc->ops;
1724 int status = 0;
1725
1726 /* some attributes need specific methods to be displayed */
1727 if (ops->get_voltage) {
1728 status = device_create_file(dev, &dev_attr_microvolts);
1729 if (status < 0)
1730 return status;
1731 }
1732 if (ops->get_current_limit) {
1733 status = device_create_file(dev, &dev_attr_microamps);
1734 if (status < 0)
1735 return status;
1736 }
1737 if (ops->get_mode) {
1738 status = device_create_file(dev, &dev_attr_opmode);
1739 if (status < 0)
1740 return status;
1741 }
1742 if (ops->is_enabled) {
1743 status = device_create_file(dev, &dev_attr_state);
1744 if (status < 0)
1745 return status;
1746 }
1747
1748 /* some attributes are type-specific */
1749 if (rdev->desc->type == REGULATOR_CURRENT) {
1750 status = device_create_file(dev, &dev_attr_requested_microamps);
1751 if (status < 0)
1752 return status;
1753 }
1754
1755 /* all the other attributes exist to support constraints;
1756 * don't show them if there are no constraints, or if the
1757 * relevant supporting methods are missing.
1758 */
1759 if (!rdev->constraints)
1760 return status;
1761
1762 /* constraints need specific supporting methods */
1763 if (ops->set_voltage) {
1764 status = device_create_file(dev, &dev_attr_min_microvolts);
1765 if (status < 0)
1766 return status;
1767 status = device_create_file(dev, &dev_attr_max_microvolts);
1768 if (status < 0)
1769 return status;
1770 }
1771 if (ops->set_current_limit) {
1772 status = device_create_file(dev, &dev_attr_min_microamps);
1773 if (status < 0)
1774 return status;
1775 status = device_create_file(dev, &dev_attr_max_microamps);
1776 if (status < 0)
1777 return status;
1778 }
1779
1780 /* suspend mode constraints need multiple supporting methods */
1781 if (!(ops->set_suspend_enable && ops->set_suspend_disable))
1782 return status;
1783
1784 status = device_create_file(dev, &dev_attr_suspend_standby_state);
1785 if (status < 0)
1786 return status;
1787 status = device_create_file(dev, &dev_attr_suspend_mem_state);
1788 if (status < 0)
1789 return status;
1790 status = device_create_file(dev, &dev_attr_suspend_disk_state);
1791 if (status < 0)
1792 return status;
1793
1794 if (ops->set_suspend_voltage) {
1795 status = device_create_file(dev,
1796 &dev_attr_suspend_standby_microvolts);
1797 if (status < 0)
1798 return status;
1799 status = device_create_file(dev,
1800 &dev_attr_suspend_mem_microvolts);
1801 if (status < 0)
1802 return status;
1803 status = device_create_file(dev,
1804 &dev_attr_suspend_disk_microvolts);
1805 if (status < 0)
1806 return status;
1807 }
1808
1809 if (ops->set_suspend_mode) {
1810 status = device_create_file(dev,
1811 &dev_attr_suspend_standby_mode);
1812 if (status < 0)
1813 return status;
1814 status = device_create_file(dev,
1815 &dev_attr_suspend_mem_mode);
1816 if (status < 0)
1817 return status;
1818 status = device_create_file(dev,
1819 &dev_attr_suspend_disk_mode);
1820 if (status < 0)
1821 return status;
1822 }
1823
1824 return status;
1825}
1826
1716/** 1827/**
1717 * regulator_register - register regulator 1828 * regulator_register - register regulator
1718 * @regulator: regulator source 1829 * @regulator_desc: regulator to register
1719 * @reg_data: private regulator data 1830 * @dev: struct device for the regulator
1831 * @driver_data: private regulator data
1720 * 1832 *
1721 * Called by regulator drivers to register a regulator. 1833 * Called by regulator drivers to register a regulator.
1722 * Returns 0 on success. 1834 * Returns 0 on success.
@@ -1761,45 +1873,37 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
1761 /* preform any regulator specific init */ 1873 /* preform any regulator specific init */
1762 if (init_data->regulator_init) { 1874 if (init_data->regulator_init) {
1763 ret = init_data->regulator_init(rdev->reg_data); 1875 ret = init_data->regulator_init(rdev->reg_data);
1764 if (ret < 0) { 1876 if (ret < 0)
1765 kfree(rdev); 1877 goto clean;
1766 rdev = ERR_PTR(ret);
1767 goto out;
1768 }
1769 }
1770
1771 /* set regulator constraints */
1772 ret = set_machine_constraints(rdev, &init_data->constraints);
1773 if (ret < 0) {
1774 kfree(rdev);
1775 rdev = ERR_PTR(ret);
1776 goto out;
1777 } 1878 }
1778 1879
1779 /* register with sysfs */ 1880 /* register with sysfs */
1780 rdev->dev.class = &regulator_class; 1881 rdev->dev.class = &regulator_class;
1781 rdev->dev.parent = dev; 1882 rdev->dev.parent = dev;
1782 snprintf(rdev->dev.bus_id, sizeof(rdev->dev.bus_id), 1883 dev_set_name(&rdev->dev, "regulator.%d",
1783 "regulator.%d", atomic_inc_return(&regulator_no) - 1); 1884 atomic_inc_return(&regulator_no) - 1);
1784 ret = device_register(&rdev->dev); 1885 ret = device_register(&rdev->dev);
1785 if (ret != 0) { 1886 if (ret != 0)
1786 kfree(rdev); 1887 goto clean;
1787 rdev = ERR_PTR(ret);
1788 goto out;
1789 }
1790 1888
1791 dev_set_drvdata(&rdev->dev, rdev); 1889 dev_set_drvdata(&rdev->dev, rdev);
1792 1890
1891 /* set regulator constraints */
1892 ret = set_machine_constraints(rdev, &init_data->constraints);
1893 if (ret < 0)
1894 goto scrub;
1895
1896 /* add attributes supported by this regulator */
1897 ret = add_regulator_attributes(rdev);
1898 if (ret < 0)
1899 goto scrub;
1900
1793 /* set supply regulator if it exists */ 1901 /* set supply regulator if it exists */
1794 if (init_data->supply_regulator_dev) { 1902 if (init_data->supply_regulator_dev) {
1795 ret = set_supply(rdev, 1903 ret = set_supply(rdev,
1796 dev_get_drvdata(init_data->supply_regulator_dev)); 1904 dev_get_drvdata(init_data->supply_regulator_dev));
1797 if (ret < 0) { 1905 if (ret < 0)
1798 device_unregister(&rdev->dev); 1906 goto scrub;
1799 kfree(rdev);
1800 rdev = ERR_PTR(ret);
1801 goto out;
1802 }
1803 } 1907 }
1804 1908
1805 /* add consumers devices */ 1909 /* add consumers devices */
@@ -1811,10 +1915,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
1811 for (--i; i >= 0; i--) 1915 for (--i; i >= 0; i--)
1812 unset_consumer_device_supply(rdev, 1916 unset_consumer_device_supply(rdev,
1813 init_data->consumer_supplies[i].dev); 1917 init_data->consumer_supplies[i].dev);
1814 device_unregister(&rdev->dev); 1918 goto scrub;
1815 kfree(rdev);
1816 rdev = ERR_PTR(ret);
1817 goto out;
1818 } 1919 }
1819 } 1920 }
1820 1921
@@ -1822,12 +1923,19 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
1822out: 1923out:
1823 mutex_unlock(&regulator_list_mutex); 1924 mutex_unlock(&regulator_list_mutex);
1824 return rdev; 1925 return rdev;
1926
1927scrub:
1928 device_unregister(&rdev->dev);
1929clean:
1930 kfree(rdev);
1931 rdev = ERR_PTR(ret);
1932 goto out;
1825} 1933}
1826EXPORT_SYMBOL_GPL(regulator_register); 1934EXPORT_SYMBOL_GPL(regulator_register);
1827 1935
1828/** 1936/**
1829 * regulator_unregister - unregister regulator 1937 * regulator_unregister - unregister regulator
1830 * @regulator: regulator source 1938 * @rdev: regulator to unregister
1831 * 1939 *
1832 * Called by regulator drivers to unregister a regulator. 1940 * Called by regulator drivers to unregister a regulator.
1833 */ 1941 */
@@ -1846,7 +1954,7 @@ void regulator_unregister(struct regulator_dev *rdev)
1846EXPORT_SYMBOL_GPL(regulator_unregister); 1954EXPORT_SYMBOL_GPL(regulator_unregister);
1847 1955
1848/** 1956/**
1849 * regulator_suspend_prepare: prepare regulators for system wide suspend 1957 * regulator_suspend_prepare - prepare regulators for system wide suspend
1850 * @state: system suspend state 1958 * @state: system suspend state
1851 * 1959 *
1852 * Configure each regulator with it's suspend operating parameters for state. 1960 * Configure each regulator with it's suspend operating parameters for state.
@@ -1882,7 +1990,7 @@ EXPORT_SYMBOL_GPL(regulator_suspend_prepare);
1882 1990
1883/** 1991/**
1884 * rdev_get_drvdata - get rdev regulator driver data 1992 * rdev_get_drvdata - get rdev regulator driver data
1885 * @regulator: regulator 1993 * @rdev: regulator
1886 * 1994 *
1887 * Get rdev regulator driver private data. This call can be used in the 1995 * Get rdev regulator driver private data. This call can be used in the
1888 * regulator driver context. 1996 * regulator driver context.
@@ -1919,7 +2027,7 @@ EXPORT_SYMBOL_GPL(regulator_set_drvdata);
1919 2027
1920/** 2028/**
1921 * regulator_get_id - get regulator ID 2029 * regulator_get_id - get regulator ID
1922 * @regulator: regulator 2030 * @rdev: regulator
1923 */ 2031 */
1924int rdev_get_id(struct regulator_dev *rdev) 2032int rdev_get_id(struct regulator_dev *rdev)
1925{ 2033{
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index 773b29cec8be..fe77730a7edb 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -102,7 +102,7 @@ static int da903x_set_ldo_voltage(struct regulator_dev *rdev,
102 uint8_t val, mask; 102 uint8_t val, mask;
103 103
104 if (check_range(info, min_uV, max_uV)) { 104 if (check_range(info, min_uV, max_uV)) {
105 pr_err("invalid voltage range (%d, %d) uV", min_uV, max_uV); 105 pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
106 return -EINVAL; 106 return -EINVAL;
107 } 107 }
108 108
@@ -159,7 +159,7 @@ static int da903x_is_enabled(struct regulator_dev *rdev)
159 if (ret) 159 if (ret)
160 return ret; 160 return ret;
161 161
162 return reg_val & (1 << info->enable_bit); 162 return !!(reg_val & (1 << info->enable_bit));
163} 163}
164 164
165/* DA9030 specific operations */ 165/* DA9030 specific operations */
@@ -172,7 +172,7 @@ static int da9030_set_ldo1_15_voltage(struct regulator_dev *rdev,
172 int ret; 172 int ret;
173 173
174 if (check_range(info, min_uV, max_uV)) { 174 if (check_range(info, min_uV, max_uV)) {
175 pr_err("invalid voltage range (%d, %d) uV", min_uV, max_uV); 175 pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
176 return -EINVAL; 176 return -EINVAL;
177 } 177 }
178 178
@@ -199,7 +199,7 @@ static int da9030_set_ldo14_voltage(struct regulator_dev *rdev,
199 int thresh; 199 int thresh;
200 200
201 if (check_range(info, min_uV, max_uV)) { 201 if (check_range(info, min_uV, max_uV)) {
202 pr_err("invalid voltage range (%d, %d) uV", min_uV, max_uV); 202 pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
203 return -EINVAL; 203 return -EINVAL;
204 } 204 }
205 205
@@ -248,7 +248,7 @@ static int da9034_set_dvc_voltage(struct regulator_dev *rdev,
248 int ret; 248 int ret;
249 249
250 if (check_range(info, min_uV, max_uV)) { 250 if (check_range(info, min_uV, max_uV)) {
251 pr_err("invalid voltage range (%d, %d) uV", min_uV, max_uV); 251 pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
252 return -EINVAL; 252 return -EINVAL;
253 } 253 }
254 254
@@ -273,7 +273,7 @@ static int da9034_set_ldo12_voltage(struct regulator_dev *rdev,
273 uint8_t val, mask; 273 uint8_t val, mask;
274 274
275 if (check_range(info, min_uV, max_uV)) { 275 if (check_range(info, min_uV, max_uV)) {
276 pr_err("invalid voltage range (%d, %d) uV", min_uV, max_uV); 276 pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
277 return -EINVAL; 277 return -EINVAL;
278 } 278 }
279 279
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index c68c496b2c49..7aa35248181b 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1412,6 +1412,97 @@ int wm8350_register_regulator(struct wm8350 *wm8350, int reg,
1412} 1412}
1413EXPORT_SYMBOL_GPL(wm8350_register_regulator); 1413EXPORT_SYMBOL_GPL(wm8350_register_regulator);
1414 1414
1415/**
1416 * wm8350_register_led - Register a WM8350 LED output
1417 *
1418 * @param wm8350 The WM8350 device to configure.
1419 * @param lednum LED device index to create.
1420 * @param dcdc The DCDC to use for the LED.
1421 * @param isink The ISINK to use for the LED.
1422 * @param pdata Configuration for the LED.
1423 *
1424 * The WM8350 supports the use of an ISINK together with a DCDC to
1425 * provide a power-efficient LED driver. This function registers the
1426 * regulators and instantiates the platform device for a LED. The
1427 * operating modes for the LED regulators must be configured using
1428 * wm8350_isink_set_flash(), wm8350_dcdc25_set_mode() and
1429 * wm8350_dcdc_set_slot() prior to calling this function.
1430 */
1431int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink,
1432 struct wm8350_led_platform_data *pdata)
1433{
1434 struct wm8350_led *led;
1435 struct platform_device *pdev;
1436 int ret;
1437
1438 if (lednum > ARRAY_SIZE(wm8350->pmic.led) || lednum < 0) {
1439 dev_err(wm8350->dev, "Invalid LED index %d\n", lednum);
1440 return -ENODEV;
1441 }
1442
1443 led = &wm8350->pmic.led[lednum];
1444
1445 if (led->pdev) {
1446 dev_err(wm8350->dev, "LED %d already allocated\n", lednum);
1447 return -EINVAL;
1448 }
1449
1450 pdev = platform_device_alloc("wm8350-led", lednum);
1451 if (pdev == NULL) {
1452 dev_err(wm8350->dev, "Failed to allocate LED %d\n", lednum);
1453 return -ENOMEM;
1454 }
1455
1456 led->isink_consumer.dev = &pdev->dev;
1457 led->isink_consumer.supply = "led_isink";
1458 led->isink_init.num_consumer_supplies = 1;
1459 led->isink_init.consumer_supplies = &led->isink_consumer;
1460 led->isink_init.constraints.min_uA = 0;
1461 led->isink_init.constraints.max_uA = pdata->max_uA;
1462 led->isink_init.constraints.valid_ops_mask = REGULATOR_CHANGE_CURRENT;
1463 led->isink_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
1464 ret = wm8350_register_regulator(wm8350, isink, &led->isink_init);
1465 if (ret != 0) {
1466 platform_device_put(pdev);
1467 return ret;
1468 }
1469
1470 led->dcdc_consumer.dev = &pdev->dev;
1471 led->dcdc_consumer.supply = "led_vcc";
1472 led->dcdc_init.num_consumer_supplies = 1;
1473 led->dcdc_init.consumer_supplies = &led->dcdc_consumer;
1474 led->dcdc_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
1475 ret = wm8350_register_regulator(wm8350, dcdc, &led->dcdc_init);
1476 if (ret != 0) {
1477 platform_device_put(pdev);
1478 return ret;
1479 }
1480
1481 switch (isink) {
1482 case WM8350_ISINK_A:
1483 wm8350->pmic.isink_A_dcdc = dcdc;
1484 break;
1485 case WM8350_ISINK_B:
1486 wm8350->pmic.isink_B_dcdc = dcdc;
1487 break;
1488 }
1489
1490 pdev->dev.platform_data = pdata;
1491 pdev->dev.parent = wm8350->dev;
1492 ret = platform_device_add(pdev);
1493 if (ret != 0) {
1494 dev_err(wm8350->dev, "Failed to register LED %d: %d\n",
1495 lednum, ret);
1496 platform_device_put(pdev);
1497 return ret;
1498 }
1499
1500 led->pdev = pdev;
1501
1502 return 0;
1503}
1504EXPORT_SYMBOL_GPL(wm8350_register_led);
1505
1415static struct platform_driver wm8350_regulator_driver = { 1506static struct platform_driver wm8350_regulator_driver = {
1416 .probe = wm8350_regulator_probe, 1507 .probe = wm8350_regulator_probe,
1417 .remove = wm8350_regulator_remove, 1508 .remove = wm8350_regulator_remove,
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 162330b9d1dc..7e5155e88ac7 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -86,13 +86,11 @@ enum ds_type {
86 86
87 87
88struct ds1307 { 88struct ds1307 {
89 u8 reg_addr;
90 u8 regs[11]; 89 u8 regs[11];
91 enum ds_type type; 90 enum ds_type type;
92 unsigned long flags; 91 unsigned long flags;
93#define HAS_NVRAM 0 /* bit 0 == sysfs file active */ 92#define HAS_NVRAM 0 /* bit 0 == sysfs file active */
94#define HAS_ALARM 1 /* bit 1 == irq claimed */ 93#define HAS_ALARM 1 /* bit 1 == irq claimed */
95 struct i2c_msg msg[2];
96 struct i2c_client *client; 94 struct i2c_client *client;
97 struct rtc_device *rtc; 95 struct rtc_device *rtc;
98 struct work_struct work; 96 struct work_struct work;
@@ -204,13 +202,9 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
204 int tmp; 202 int tmp;
205 203
206 /* read the RTC date and time registers all at once */ 204 /* read the RTC date and time registers all at once */
207 ds1307->reg_addr = 0; 205 tmp = i2c_smbus_read_i2c_block_data(ds1307->client,
208 ds1307->msg[1].flags = I2C_M_RD; 206 DS1307_REG_SECS, 7, ds1307->regs);
209 ds1307->msg[1].len = 7; 207 if (tmp != 7) {
210
211 tmp = i2c_transfer(to_i2c_adapter(ds1307->client->dev.parent),
212 ds1307->msg, 2);
213 if (tmp != 2) {
214 dev_err(dev, "%s error %d\n", "read", tmp); 208 dev_err(dev, "%s error %d\n", "read", tmp);
215 return -EIO; 209 return -EIO;
216 } 210 }
@@ -257,7 +251,6 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
257 t->tm_hour, t->tm_mday, 251 t->tm_hour, t->tm_mday,
258 t->tm_mon, t->tm_year, t->tm_wday); 252 t->tm_mon, t->tm_year, t->tm_wday);
259 253
260 *buf++ = 0; /* first register addr */
261 buf[DS1307_REG_SECS] = bin2bcd(t->tm_sec); 254 buf[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
262 buf[DS1307_REG_MIN] = bin2bcd(t->tm_min); 255 buf[DS1307_REG_MIN] = bin2bcd(t->tm_min);
263 buf[DS1307_REG_HOUR] = bin2bcd(t->tm_hour); 256 buf[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
@@ -282,23 +275,19 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
282 break; 275 break;
283 } 276 }
284 277
285 ds1307->msg[1].flags = 0;
286 ds1307->msg[1].len = 8;
287
288 dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n", 278 dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n",
289 "write", buf[0], buf[1], buf[2], buf[3], 279 "write", buf[0], buf[1], buf[2], buf[3],
290 buf[4], buf[5], buf[6]); 280 buf[4], buf[5], buf[6]);
291 281
292 result = i2c_transfer(to_i2c_adapter(ds1307->client->dev.parent), 282 result = i2c_smbus_write_i2c_block_data(ds1307->client, 0, 7, buf);
293 &ds1307->msg[1], 1); 283 if (result < 0) {
294 if (result != 1) { 284 dev_err(dev, "%s error %d\n", "write", result);
295 dev_err(dev, "%s error %d\n", "write", tmp); 285 return result;
296 return -EIO;
297 } 286 }
298 return 0; 287 return 0;
299} 288}
300 289
301static int ds1307_read_alarm(struct device *dev, struct rtc_wkalrm *t) 290static int ds1337_read_alarm(struct device *dev, struct rtc_wkalrm *t)
302{ 291{
303 struct i2c_client *client = to_i2c_client(dev); 292 struct i2c_client *client = to_i2c_client(dev);
304 struct ds1307 *ds1307 = i2c_get_clientdata(client); 293 struct ds1307 *ds1307 = i2c_get_clientdata(client);
@@ -308,13 +297,9 @@ static int ds1307_read_alarm(struct device *dev, struct rtc_wkalrm *t)
308 return -EINVAL; 297 return -EINVAL;
309 298
310 /* read all ALARM1, ALARM2, and status registers at once */ 299 /* read all ALARM1, ALARM2, and status registers at once */
311 ds1307->reg_addr = DS1339_REG_ALARM1_SECS; 300 ret = i2c_smbus_read_i2c_block_data(client,
312 ds1307->msg[1].flags = I2C_M_RD; 301 DS1339_REG_ALARM1_SECS, 9, ds1307->regs);
313 ds1307->msg[1].len = 9; 302 if (ret != 9) {
314
315 ret = i2c_transfer(to_i2c_adapter(client->dev.parent),
316 ds1307->msg, 2);
317 if (ret != 2) {
318 dev_err(dev, "%s error %d\n", "alarm read", ret); 303 dev_err(dev, "%s error %d\n", "alarm read", ret);
319 return -EIO; 304 return -EIO;
320 } 305 }
@@ -353,7 +338,7 @@ static int ds1307_read_alarm(struct device *dev, struct rtc_wkalrm *t)
353 return 0; 338 return 0;
354} 339}
355 340
356static int ds1307_set_alarm(struct device *dev, struct rtc_wkalrm *t) 341static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t)
357{ 342{
358 struct i2c_client *client = to_i2c_client(dev); 343 struct i2c_client *client = to_i2c_client(dev);
359 struct ds1307 *ds1307 = i2c_get_clientdata(client); 344 struct ds1307 *ds1307 = i2c_get_clientdata(client);
@@ -371,13 +356,9 @@ static int ds1307_set_alarm(struct device *dev, struct rtc_wkalrm *t)
371 t->enabled, t->pending); 356 t->enabled, t->pending);
372 357
373 /* read current status of both alarms and the chip */ 358 /* read current status of both alarms and the chip */
374 ds1307->reg_addr = DS1339_REG_ALARM1_SECS; 359 ret = i2c_smbus_read_i2c_block_data(client,
375 ds1307->msg[1].flags = I2C_M_RD; 360 DS1339_REG_ALARM1_SECS, 9, buf);
376 ds1307->msg[1].len = 9; 361 if (ret != 9) {
377
378 ret = i2c_transfer(to_i2c_adapter(client->dev.parent),
379 ds1307->msg, 2);
380 if (ret != 2) {
381 dev_err(dev, "%s error %d\n", "alarm write", ret); 362 dev_err(dev, "%s error %d\n", "alarm write", ret);
382 return -EIO; 363 return -EIO;
383 } 364 }
@@ -392,7 +373,6 @@ static int ds1307_set_alarm(struct device *dev, struct rtc_wkalrm *t)
392 ds1307->regs[6], control, status); 373 ds1307->regs[6], control, status);
393 374
394 /* set ALARM1, using 24 hour and day-of-month modes */ 375 /* set ALARM1, using 24 hour and day-of-month modes */
395 *buf++ = DS1339_REG_ALARM1_SECS; /* first register addr */
396 buf[0] = bin2bcd(t->time.tm_sec); 376 buf[0] = bin2bcd(t->time.tm_sec);
397 buf[1] = bin2bcd(t->time.tm_min); 377 buf[1] = bin2bcd(t->time.tm_min);
398 buf[2] = bin2bcd(t->time.tm_hour); 378 buf[2] = bin2bcd(t->time.tm_hour);
@@ -411,14 +391,11 @@ static int ds1307_set_alarm(struct device *dev, struct rtc_wkalrm *t)
411 } 391 }
412 buf[8] = status & ~(DS1337_BIT_A1I | DS1337_BIT_A2I); 392 buf[8] = status & ~(DS1337_BIT_A1I | DS1337_BIT_A2I);
413 393
414 ds1307->msg[1].flags = 0; 394 ret = i2c_smbus_write_i2c_block_data(client,
415 ds1307->msg[1].len = 10; 395 DS1339_REG_ALARM1_SECS, 9, buf);
416 396 if (ret < 0) {
417 ret = i2c_transfer(to_i2c_adapter(client->dev.parent),
418 &ds1307->msg[1], 1);
419 if (ret != 1) {
420 dev_err(dev, "can't set alarm time\n"); 397 dev_err(dev, "can't set alarm time\n");
421 return -EIO; 398 return ret;
422 } 399 }
423 400
424 return 0; 401 return 0;
@@ -475,8 +452,8 @@ static int ds1307_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
475static const struct rtc_class_ops ds13xx_rtc_ops = { 452static const struct rtc_class_ops ds13xx_rtc_ops = {
476 .read_time = ds1307_get_time, 453 .read_time = ds1307_get_time,
477 .set_time = ds1307_set_time, 454 .set_time = ds1307_set_time,
478 .read_alarm = ds1307_read_alarm, 455 .read_alarm = ds1337_read_alarm,
479 .set_alarm = ds1307_set_alarm, 456 .set_alarm = ds1337_set_alarm,
480 .ioctl = ds1307_ioctl, 457 .ioctl = ds1307_ioctl,
481}; 458};
482 459
@@ -490,7 +467,6 @@ ds1307_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
490{ 467{
491 struct i2c_client *client; 468 struct i2c_client *client;
492 struct ds1307 *ds1307; 469 struct ds1307 *ds1307;
493 struct i2c_msg msg[2];
494 int result; 470 int result;
495 471
496 client = kobj_to_i2c_client(kobj); 472 client = kobj_to_i2c_client(kobj);
@@ -503,24 +479,10 @@ ds1307_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
503 if (unlikely(!count)) 479 if (unlikely(!count))
504 return count; 480 return count;
505 481
506 msg[0].addr = client->addr; 482 result = i2c_smbus_read_i2c_block_data(client, 8 + off, count, buf);
507 msg[0].flags = 0; 483 if (result < 0)
508 msg[0].len = 1;
509 msg[0].buf = buf;
510
511 buf[0] = 8 + off;
512
513 msg[1].addr = client->addr;
514 msg[1].flags = I2C_M_RD;
515 msg[1].len = count;
516 msg[1].buf = buf;
517
518 result = i2c_transfer(to_i2c_adapter(client->dev.parent), msg, 2);
519 if (result != 2) {
520 dev_err(&client->dev, "%s error %d\n", "nvram read", result); 484 dev_err(&client->dev, "%s error %d\n", "nvram read", result);
521 return -EIO; 485 return result;
522 }
523 return count;
524} 486}
525 487
526static ssize_t 488static ssize_t
@@ -528,8 +490,7 @@ ds1307_nvram_write(struct kobject *kobj, struct bin_attribute *attr,
528 char *buf, loff_t off, size_t count) 490 char *buf, loff_t off, size_t count)
529{ 491{
530 struct i2c_client *client; 492 struct i2c_client *client;
531 u8 buffer[NVRAM_SIZE + 1]; 493 int result;
532 int ret;
533 494
534 client = kobj_to_i2c_client(kobj); 495 client = kobj_to_i2c_client(kobj);
535 496
@@ -540,11 +501,12 @@ ds1307_nvram_write(struct kobject *kobj, struct bin_attribute *attr,
540 if (unlikely(!count)) 501 if (unlikely(!count))
541 return count; 502 return count;
542 503
543 buffer[0] = 8 + off; 504 result = i2c_smbus_write_i2c_block_data(client, 8 + off, count, buf);
544 memcpy(buffer + 1, buf, count); 505 if (result < 0) {
545 506 dev_err(&client->dev, "%s error %d\n", "nvram write", result);
546 ret = i2c_master_send(client, buffer, count + 1); 507 return result;
547 return (ret < 0) ? ret : (ret - 1); 508 }
509 return count;
548} 510}
549 511
550static struct bin_attribute nvram = { 512static struct bin_attribute nvram = {
@@ -571,9 +533,11 @@ static int __devinit ds1307_probe(struct i2c_client *client,
571 const struct chip_desc *chip = &chips[id->driver_data]; 533 const struct chip_desc *chip = &chips[id->driver_data];
572 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); 534 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
573 int want_irq = false; 535 int want_irq = false;
536 unsigned char *buf;
574 537
575 if (!i2c_check_functionality(adapter, 538 if (!i2c_check_functionality(adapter,
576 I2C_FUNC_I2C | I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) 539 I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
540 I2C_FUNC_SMBUS_I2C_BLOCK))
577 return -EIO; 541 return -EIO;
578 542
579 if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL))) 543 if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL)))
@@ -581,18 +545,8 @@ static int __devinit ds1307_probe(struct i2c_client *client,
581 545
582 ds1307->client = client; 546 ds1307->client = client;
583 i2c_set_clientdata(client, ds1307); 547 i2c_set_clientdata(client, ds1307);
584
585 ds1307->msg[0].addr = client->addr;
586 ds1307->msg[0].flags = 0;
587 ds1307->msg[0].len = 1;
588 ds1307->msg[0].buf = &ds1307->reg_addr;
589
590 ds1307->msg[1].addr = client->addr;
591 ds1307->msg[1].flags = I2C_M_RD;
592 ds1307->msg[1].len = sizeof(ds1307->regs);
593 ds1307->msg[1].buf = ds1307->regs;
594
595 ds1307->type = id->driver_data; 548 ds1307->type = id->driver_data;
549 buf = ds1307->regs;
596 550
597 switch (ds1307->type) { 551 switch (ds1307->type) {
598 case ds_1337: 552 case ds_1337:
@@ -602,21 +556,15 @@ static int __devinit ds1307_probe(struct i2c_client *client,
602 INIT_WORK(&ds1307->work, ds1307_work); 556 INIT_WORK(&ds1307->work, ds1307_work);
603 want_irq = true; 557 want_irq = true;
604 } 558 }
605
606 ds1307->reg_addr = DS1337_REG_CONTROL;
607 ds1307->msg[1].len = 2;
608
609 /* get registers that the "rtc" read below won't read... */ 559 /* get registers that the "rtc" read below won't read... */
610 tmp = i2c_transfer(adapter, ds1307->msg, 2); 560 tmp = i2c_smbus_read_i2c_block_data(ds1307->client,
561 DS1337_REG_CONTROL, 2, buf);
611 if (tmp != 2) { 562 if (tmp != 2) {
612 pr_debug("read error %d\n", tmp); 563 pr_debug("read error %d\n", tmp);
613 err = -EIO; 564 err = -EIO;
614 goto exit_free; 565 goto exit_free;
615 } 566 }
616 567
617 ds1307->reg_addr = 0;
618 ds1307->msg[1].len = sizeof(ds1307->regs);
619
620 /* oscillator off? turn it on, so clock can tick. */ 568 /* oscillator off? turn it on, so clock can tick. */
621 if (ds1307->regs[0] & DS1337_BIT_nEOSC) 569 if (ds1307->regs[0] & DS1337_BIT_nEOSC)
622 ds1307->regs[0] &= ~DS1337_BIT_nEOSC; 570 ds1307->regs[0] &= ~DS1337_BIT_nEOSC;
@@ -647,9 +595,8 @@ static int __devinit ds1307_probe(struct i2c_client *client,
647 595
648read_rtc: 596read_rtc:
649 /* read RTC registers */ 597 /* read RTC registers */
650 598 tmp = i2c_smbus_read_i2c_block_data(ds1307->client, 0, 8, buf);
651 tmp = i2c_transfer(adapter, ds1307->msg, 2); 599 if (tmp != 8) {
652 if (tmp != 2) {
653 pr_debug("read error %d\n", tmp); 600 pr_debug("read error %d\n", tmp);
654 err = -EIO; 601 err = -EIO;
655 goto exit_free; 602 goto exit_free;
@@ -707,22 +654,6 @@ read_rtc:
707 break; 654 break;
708 } 655 }
709 656
710 tmp = ds1307->regs[DS1307_REG_SECS];
711 tmp = bcd2bin(tmp & 0x7f);
712 if (tmp > 60)
713 goto exit_bad;
714 tmp = bcd2bin(ds1307->regs[DS1307_REG_MIN] & 0x7f);
715 if (tmp > 60)
716 goto exit_bad;
717
718 tmp = bcd2bin(ds1307->regs[DS1307_REG_MDAY] & 0x3f);
719 if (tmp == 0 || tmp > 31)
720 goto exit_bad;
721
722 tmp = bcd2bin(ds1307->regs[DS1307_REG_MONTH] & 0x1f);
723 if (tmp == 0 || tmp > 12)
724 goto exit_bad;
725
726 tmp = ds1307->regs[DS1307_REG_HOUR]; 657 tmp = ds1307->regs[DS1307_REG_HOUR];
727 switch (ds1307->type) { 658 switch (ds1307->type) {
728 case ds_1340: 659 case ds_1340:
@@ -779,13 +710,6 @@ read_rtc:
779 710
780 return 0; 711 return 0;
781 712
782exit_bad:
783 dev_dbg(&client->dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n",
784 "bogus register",
785 ds1307->regs[0], ds1307->regs[1],
786 ds1307->regs[2], ds1307->regs[3],
787 ds1307->regs[4], ds1307->regs[5],
788 ds1307->regs[6]);
789exit_irq: 713exit_irq:
790 if (ds1307->rtc) 714 if (ds1307->rtc)
791 rtc_device_unregister(ds1307->rtc); 715 rtc_device_unregister(ds1307->rtc);
diff --git a/drivers/rtc/rtc-parisc.c b/drivers/rtc/rtc-parisc.c
index 346d633655e7..c6bfa6fe1a2a 100644
--- a/drivers/rtc/rtc-parisc.c
+++ b/drivers/rtc/rtc-parisc.c
@@ -34,7 +34,8 @@ static int parisc_get_time(struct device *dev, struct rtc_time *tm)
34static int parisc_set_time(struct device *dev, struct rtc_time *tm) 34static int parisc_set_time(struct device *dev, struct rtc_time *tm)
35{ 35{
36 struct parisc_rtc *p = dev_get_drvdata(dev); 36 struct parisc_rtc *p = dev_get_drvdata(dev);
37 unsigned long flags, ret; 37 unsigned long flags;
38 int ret;
38 39
39 spin_lock_irqsave(&p->lock, flags); 40 spin_lock_irqsave(&p->lock, flags);
40 ret = set_rtc_time(tm); 41 ret = set_rtc_time(tm);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 570ae59c1d5e..bd5914994142 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -336,6 +336,9 @@ static int
336dasd_state_ready_to_online(struct dasd_device * device) 336dasd_state_ready_to_online(struct dasd_device * device)
337{ 337{
338 int rc; 338 int rc;
339 struct gendisk *disk;
340 struct disk_part_iter piter;
341 struct hd_struct *part;
339 342
340 if (device->discipline->ready_to_online) { 343 if (device->discipline->ready_to_online) {
341 rc = device->discipline->ready_to_online(device); 344 rc = device->discipline->ready_to_online(device);
@@ -343,8 +346,14 @@ dasd_state_ready_to_online(struct dasd_device * device)
343 return rc; 346 return rc;
344 } 347 }
345 device->state = DASD_STATE_ONLINE; 348 device->state = DASD_STATE_ONLINE;
346 if (device->block) 349 if (device->block) {
347 dasd_schedule_block_bh(device->block); 350 dasd_schedule_block_bh(device->block);
351 disk = device->block->bdev->bd_disk;
352 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
353 while ((part = disk_part_iter_next(&piter)))
354 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
355 disk_part_iter_exit(&piter);
356 }
348 return 0; 357 return 0;
349} 358}
350 359
@@ -354,6 +363,9 @@ dasd_state_ready_to_online(struct dasd_device * device)
354static int dasd_state_online_to_ready(struct dasd_device *device) 363static int dasd_state_online_to_ready(struct dasd_device *device)
355{ 364{
356 int rc; 365 int rc;
366 struct gendisk *disk;
367 struct disk_part_iter piter;
368 struct hd_struct *part;
357 369
358 if (device->discipline->online_to_ready) { 370 if (device->discipline->online_to_ready) {
359 rc = device->discipline->online_to_ready(device); 371 rc = device->discipline->online_to_ready(device);
@@ -361,6 +373,13 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
361 return rc; 373 return rc;
362 } 374 }
363 device->state = DASD_STATE_READY; 375 device->state = DASD_STATE_READY;
376 if (device->block) {
377 disk = device->block->bdev->bd_disk;
378 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
379 while ((part = disk_part_iter_next(&piter)))
380 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
381 disk_part_iter_exit(&piter);
382 }
364 return 0; 383 return 0;
365} 384}
366 385
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index b8f9c00633f3..d82aad5224f0 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2621,7 +2621,7 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2621 } 2621 }
2622 } 2622 }
2623 2623
2624 /* double-check if current erp/cqr was successfull */ 2624 /* double-check if current erp/cqr was successful */
2625 if ((cqr->irb.scsw.cmd.cstat == 0x00) && 2625 if ((cqr->irb.scsw.cmd.cstat == 0x00) &&
2626 (cqr->irb.scsw.cmd.dstat == 2626 (cqr->irb.scsw.cmd.dstat ==
2627 (DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { 2627 (DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 2ef25731d197..300e28a531f8 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -206,6 +206,8 @@ dasd_feature_list(char *str, char **endp)
206 features |= DASD_FEATURE_USEDIAG; 206 features |= DASD_FEATURE_USEDIAG;
207 else if (len == 6 && !strncmp(str, "erplog", 6)) 207 else if (len == 6 && !strncmp(str, "erplog", 6))
208 features |= DASD_FEATURE_ERPLOG; 208 features |= DASD_FEATURE_ERPLOG;
209 else if (len == 8 && !strncmp(str, "failfast", 8))
210 features |= DASD_FEATURE_FAILFAST;
209 else { 211 else {
210 MESSAGE(KERN_WARNING, 212 MESSAGE(KERN_WARNING,
211 "unsupported feature: %*s, " 213 "unsupported feature: %*s, "
@@ -667,6 +669,51 @@ dasd_device_from_cdev(struct ccw_device *cdev)
667 */ 669 */
668 670
669/* 671/*
672 * failfast controls the behaviour, if no path is available
673 */
674static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr,
675 char *buf)
676{
677 struct dasd_devmap *devmap;
678 int ff_flag;
679
680 devmap = dasd_find_busid(dev->bus_id);
681 if (!IS_ERR(devmap))
682 ff_flag = (devmap->features & DASD_FEATURE_FAILFAST) != 0;
683 else
684 ff_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_FAILFAST) != 0;
685 return snprintf(buf, PAGE_SIZE, ff_flag ? "1\n" : "0\n");
686}
687
688static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr,
689 const char *buf, size_t count)
690{
691 struct dasd_devmap *devmap;
692 int val;
693 char *endp;
694
695 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
696 if (IS_ERR(devmap))
697 return PTR_ERR(devmap);
698
699 val = simple_strtoul(buf, &endp, 0);
700 if (((endp + 1) < (buf + count)) || (val > 1))
701 return -EINVAL;
702
703 spin_lock(&dasd_devmap_lock);
704 if (val)
705 devmap->features |= DASD_FEATURE_FAILFAST;
706 else
707 devmap->features &= ~DASD_FEATURE_FAILFAST;
708 if (devmap->device)
709 devmap->device->features = devmap->features;
710 spin_unlock(&dasd_devmap_lock);
711 return count;
712}
713
714static DEVICE_ATTR(failfast, 0644, dasd_ff_show, dasd_ff_store);
715
716/*
670 * readonly controls the readonly status of a dasd 717 * readonly controls the readonly status of a dasd
671 */ 718 */
672static ssize_t 719static ssize_t
@@ -1020,6 +1067,7 @@ static struct attribute * dasd_attrs[] = {
1020 &dev_attr_use_diag.attr, 1067 &dev_attr_use_diag.attr,
1021 &dev_attr_eer_enabled.attr, 1068 &dev_attr_eer_enabled.attr,
1022 &dev_attr_erplog.attr, 1069 &dev_attr_erplog.attr,
1070 &dev_attr_failfast.attr,
1023 NULL, 1071 NULL,
1024}; 1072};
1025 1073
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 7844461a995b..ef2a56952054 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -544,7 +544,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
544 } 544 }
545 cqr->retries = DIAG_MAX_RETRIES; 545 cqr->retries = DIAG_MAX_RETRIES;
546 cqr->buildclk = get_clock(); 546 cqr->buildclk = get_clock();
547 if (blk_noretry_request(req)) 547 if (blk_noretry_request(req) ||
548 block->base->features & DASD_FEATURE_FAILFAST)
548 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 549 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
549 cqr->startdev = memdev; 550 cqr->startdev = memdev;
550 cqr->memdev = memdev; 551 cqr->memdev = memdev;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index bd2c52e20762..bdb87998f364 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1700,7 +1700,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
1700 recid++; 1700 recid++;
1701 } 1701 }
1702 } 1702 }
1703 if (blk_noretry_request(req)) 1703 if (blk_noretry_request(req) ||
1704 block->base->features & DASD_FEATURE_FAILFAST)
1704 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1705 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1705 cqr->startdev = startdev; 1706 cqr->startdev = startdev;
1706 cqr->memdev = startdev; 1707 cqr->memdev = startdev;
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 7d442aeff3d1..f1d176021694 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -355,7 +355,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
355 recid++; 355 recid++;
356 } 356 }
357 } 357 }
358 if (blk_noretry_request(req)) 358 if (blk_noretry_request(req) ||
359 block->base->features & DASD_FEATURE_FAILFAST)
359 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 360 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
360 cqr->startdev = memdev; 361 cqr->startdev = memdev;
361 cqr->memdev = memdev; 362 cqr->memdev = memdev;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 05a14536c369..4a39084d9c95 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -199,7 +199,7 @@ struct dasd_ccw_req {
199#define DASD_CQR_ERROR 0x82 /* request is completed with error */ 199#define DASD_CQR_ERROR 0x82 /* request is completed with error */
200#define DASD_CQR_CLEAR_PENDING 0x83 /* request is clear pending */ 200#define DASD_CQR_CLEAR_PENDING 0x83 /* request is clear pending */
201#define DASD_CQR_CLEARED 0x84 /* request was cleared */ 201#define DASD_CQR_CLEARED 0x84 /* request was cleared */
202#define DASD_CQR_SUCCESS 0x85 /* request was successfull */ 202#define DASD_CQR_SUCCESS 0x85 /* request was successful */
203 203
204 204
205/* per dasd_ccw_req flags */ 205/* per dasd_ccw_req flags */
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 643033890e34..0769ced52dbd 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -100,7 +100,7 @@ comment "S/390 tape interface support"
100 100
101config S390_TAPE_BLOCK 101config S390_TAPE_BLOCK
102 bool "Support for tape block devices" 102 bool "Support for tape block devices"
103 depends on S390_TAPE 103 depends on S390_TAPE && BLOCK
104 help 104 help
105 Select this option if you want to access your channel-attached tape 105 Select this option if you want to access your channel-attached tape
106 devices using the block device interface. This interface is similar 106 devices using the block device interface. This interface is similar
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 4005c44a404c..71605a179d65 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -801,7 +801,7 @@ tape_3590_done(struct tape_device *device, struct tape_request *request)
801static inline int 801static inline int
802tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request) 802tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request)
803{ 803{
804 DBF_EVENT(3, "Error Recovery successfull for %s\n", 804 DBF_EVENT(3, "Error Recovery successful for %s\n",
805 tape_op_verbose[request->op]); 805 tape_op_verbose[request->op]);
806 return tape_3590_done(device, request); 806 return tape_3590_done(device, request);
807} 807}
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 06b71823f399..659f8a791656 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -379,7 +379,7 @@ int cio_commit_config(struct subchannel *sch)
379 if (ccode < 0) /* -EIO if msch gets a program check. */ 379 if (ccode < 0) /* -EIO if msch gets a program check. */
380 return ccode; 380 return ccode;
381 switch (ccode) { 381 switch (ccode) {
382 case 0: /* successfull */ 382 case 0: /* successful */
383 if (stsch(sch->schid, &schib) || 383 if (stsch(sch->schid, &schib) ||
384 !css_sch_is_valid(&schib)) 384 !css_sch_is_valid(&schib))
385 return -ENODEV; 385 return -ENODEV;
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index f8a3b6967f69..da7afb04e71f 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -169,6 +169,8 @@ static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
169 q->nr); 169 q->nr);
170 debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR, 170 debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
171 debugfs_root, q, &debugfs_fops); 171 debugfs_root, q, &debugfs_fops);
172 if (IS_ERR(debugfs_queues[i]))
173 debugfs_queues[i] = NULL;
172} 174}
173 175
174void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) 176void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 744f928a59ea..10cb0f8726e5 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -114,7 +114,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
114 * @count: count of buffers to examine 114 * @count: count of buffers to examine
115 * @auto_ack: automatically acknowledge buffers 115 * @auto_ack: automatically acknowledge buffers
116 * 116 *
117 * Returns the number of successfull extracted equal buffer states. 117 * Returns the number of successfully extracted equal buffer states.
118 * Stops processing if a state is different from the last buffers state. 118 * Stops processing if a state is different from the last buffers state.
119 */ 119 */
120static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, 120static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 591a2b3ae4cb..c4f1b046c3b1 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -916,6 +916,21 @@ static struct ethtool_ops qeth_l2_osn_ops = {
916 .get_drvinfo = qeth_core_get_drvinfo, 916 .get_drvinfo = qeth_core_get_drvinfo,
917}; 917};
918 918
919static struct net_device_ops qeth_l2_netdev_ops = {
920 .ndo_open = qeth_l2_open,
921 .ndo_stop = qeth_l2_stop,
922 .ndo_get_stats = qeth_get_stats,
923 .ndo_start_xmit = qeth_l2_hard_start_xmit,
924 .ndo_validate_addr = eth_validate_addr,
925 .ndo_set_multicast_list = qeth_l2_set_multicast_list,
926 .ndo_do_ioctl = qeth_l2_do_ioctl,
927 .ndo_set_mac_address = qeth_l2_set_mac_address,
928 .ndo_change_mtu = qeth_change_mtu,
929 .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
930 .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
931 .ndo_tx_timeout = qeth_tx_timeout,
932};
933
919static int qeth_l2_setup_netdev(struct qeth_card *card) 934static int qeth_l2_setup_netdev(struct qeth_card *card)
920{ 935{
921 switch (card->info.type) { 936 switch (card->info.type) {
@@ -937,19 +952,9 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
937 return -ENODEV; 952 return -ENODEV;
938 953
939 card->dev->ml_priv = card; 954 card->dev->ml_priv = card;
940 card->dev->tx_timeout = &qeth_tx_timeout;
941 card->dev->watchdog_timeo = QETH_TX_TIMEOUT; 955 card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
942 card->dev->open = qeth_l2_open;
943 card->dev->stop = qeth_l2_stop;
944 card->dev->hard_start_xmit = qeth_l2_hard_start_xmit;
945 card->dev->do_ioctl = qeth_l2_do_ioctl;
946 card->dev->get_stats = qeth_get_stats;
947 card->dev->change_mtu = qeth_change_mtu;
948 card->dev->set_multicast_list = qeth_l2_set_multicast_list;
949 card->dev->vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid;
950 card->dev->vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid;
951 card->dev->set_mac_address = qeth_l2_set_mac_address;
952 card->dev->mtu = card->info.initial_mtu; 956 card->dev->mtu = card->info.initial_mtu;
957 card->dev->netdev_ops = &qeth_l2_netdev_ops;
953 if (card->info.type != QETH_CARD_TYPE_OSN) 958 if (card->info.type != QETH_CARD_TYPE_OSN)
954 SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops); 959 SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops);
955 else 960 else
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 4693ee4e7b98..68d623ab7e6e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1829,28 +1829,6 @@ static void qeth_l3_vlan_rx_register(struct net_device *dev,
1829 1829
1830static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 1830static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1831{ 1831{
1832 struct net_device *vlandev;
1833 struct qeth_card *card = dev->ml_priv;
1834 struct in_device *in_dev;
1835
1836 if (card->info.type == QETH_CARD_TYPE_IQD)
1837 return;
1838
1839 vlandev = vlan_group_get_device(card->vlangrp, vid);
1840 vlandev->neigh_setup = qeth_l3_neigh_setup;
1841
1842 in_dev = in_dev_get(vlandev);
1843#ifdef CONFIG_SYSCTL
1844 neigh_sysctl_unregister(in_dev->arp_parms);
1845#endif
1846 neigh_parms_release(&arp_tbl, in_dev->arp_parms);
1847
1848 in_dev->arp_parms = neigh_parms_alloc(vlandev, &arp_tbl);
1849#ifdef CONFIG_SYSCTL
1850 neigh_sysctl_register(vlandev, in_dev->arp_parms, NET_IPV4,
1851 NET_IPV4_NEIGH, "ipv4", NULL, NULL);
1852#endif
1853 in_dev_put(in_dev);
1854 return; 1832 return;
1855} 1833}
1856 1834
@@ -2916,6 +2894,21 @@ qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np)
2916 return 0; 2894 return 0;
2917} 2895}
2918 2896
2897static struct net_device_ops qeth_l3_netdev_ops = {
2898 .ndo_open = qeth_l3_open,
2899 .ndo_stop = qeth_l3_stop,
2900 .ndo_get_stats = qeth_get_stats,
2901 .ndo_start_xmit = qeth_l3_hard_start_xmit,
2902 .ndo_validate_addr = eth_validate_addr,
2903 .ndo_set_multicast_list = qeth_l3_set_multicast_list,
2904 .ndo_do_ioctl = qeth_l3_do_ioctl,
2905 .ndo_change_mtu = qeth_change_mtu,
2906 .ndo_vlan_rx_register = qeth_l3_vlan_rx_register,
2907 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
2908 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
2909 .ndo_tx_timeout = qeth_tx_timeout,
2910};
2911
2919static int qeth_l3_setup_netdev(struct qeth_card *card) 2912static int qeth_l3_setup_netdev(struct qeth_card *card)
2920{ 2913{
2921 if (card->info.type == QETH_CARD_TYPE_OSAE) { 2914 if (card->info.type == QETH_CARD_TYPE_OSAE) {
@@ -2930,7 +2923,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
2930 card->dev = alloc_etherdev(0); 2923 card->dev = alloc_etherdev(0);
2931 if (!card->dev) 2924 if (!card->dev)
2932 return -ENODEV; 2925 return -ENODEV;
2933 card->dev->neigh_setup = qeth_l3_neigh_setup; 2926 qeth_l3_netdev_ops.ndo_neigh_setup =
2927 qeth_l3_neigh_setup;
2934 2928
2935 /*IPv6 address autoconfiguration stuff*/ 2929 /*IPv6 address autoconfiguration stuff*/
2936 qeth_l3_get_unique_id(card); 2930 qeth_l3_get_unique_id(card);
@@ -2947,21 +2941,10 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
2947 } else 2941 } else
2948 return -ENODEV; 2942 return -ENODEV;
2949 2943
2950 card->dev->hard_start_xmit = qeth_l3_hard_start_xmit;
2951 card->dev->ml_priv = card; 2944 card->dev->ml_priv = card;
2952 card->dev->tx_timeout = &qeth_tx_timeout;
2953 card->dev->watchdog_timeo = QETH_TX_TIMEOUT; 2945 card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
2954 card->dev->open = qeth_l3_open;
2955 card->dev->stop = qeth_l3_stop;
2956 card->dev->do_ioctl = qeth_l3_do_ioctl;
2957 card->dev->get_stats = qeth_get_stats;
2958 card->dev->change_mtu = qeth_change_mtu;
2959 card->dev->set_multicast_list = qeth_l3_set_multicast_list;
2960 card->dev->vlan_rx_register = qeth_l3_vlan_rx_register;
2961 card->dev->vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid;
2962 card->dev->vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid;
2963 card->dev->mtu = card->info.initial_mtu; 2946 card->dev->mtu = card->info.initial_mtu;
2964 card->dev->set_mac_address = NULL; 2947 card->dev->netdev_ops = &qeth_l3_netdev_ops;
2965 SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops); 2948 SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops);
2966 card->dev->features |= NETIF_F_HW_VLAN_TX | 2949 card->dev->features |= NETIF_F_HW_VLAN_TX |
2967 NETIF_F_HW_VLAN_RX | 2950 NETIF_F_HW_VLAN_RX |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index b7322976d2b7..256c7bec7bd7 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -884,6 +884,7 @@ config SCSI_IBMVSCSI
884 tristate "IBM Virtual SCSI support" 884 tristate "IBM Virtual SCSI support"
885 depends on PPC_PSERIES || PPC_ISERIES 885 depends on PPC_PSERIES || PPC_ISERIES
886 select SCSI_SRP_ATTRS 886 select SCSI_SRP_ATTRS
887 select VIOPATH if PPC_ISERIES
887 help 888 help
888 This is the IBM POWER Virtual SCSI Client 889 This is the IBM POWER Virtual SCSI Client
889 890
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index 9e64b21ef637..c889d8458684 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -318,7 +318,7 @@ NCR_D700_probe(struct device *dev)
318 return -ENOMEM; 318 return -ENOMEM;
319 319
320 p->dev = dev; 320 p->dev = dev;
321 snprintf(p->name, sizeof(p->name), "D700(%s)", dev->bus_id); 321 snprintf(p->name, sizeof(p->name), "D700(%s)", dev_name(dev));
322 if (request_irq(irq, NCR_D700_intr, IRQF_SHARED, p->name, p)) { 322 if (request_irq(irq, NCR_D700_intr, IRQF_SHARED, p->name, p)) {
323 printk(KERN_ERR "D700: request_irq failed\n"); 323 printk(KERN_ERR "D700: request_irq failed\n");
324 kfree(p); 324 kfree(p);
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 07d572feceed..37dd47136fb1 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -169,10 +169,8 @@ int __init a2091_detect(struct scsi_host_template *tpnt)
169 continue; 169 continue;
170 170
171 instance = scsi_register (tpnt, sizeof (struct WD33C93_hostdata)); 171 instance = scsi_register (tpnt, sizeof (struct WD33C93_hostdata));
172 if (instance == NULL) { 172 if (instance == NULL)
173 release_mem_region(address, 256); 173 goto release;
174 continue;
175 }
176 instance->base = ZTWO_VADDR(address); 174 instance->base = ZTWO_VADDR(address);
177 instance->irq = IRQ_AMIGA_PORTS; 175 instance->irq = IRQ_AMIGA_PORTS;
178 instance->unique_id = z->slotaddr; 176 instance->unique_id = z->slotaddr;
@@ -183,10 +181,18 @@ int __init a2091_detect(struct scsi_host_template *tpnt)
183 HDATA(instance)->fast = 0; 181 HDATA(instance)->fast = 0;
184 HDATA(instance)->dma_mode = CTRL_DMA; 182 HDATA(instance)->dma_mode = CTRL_DMA;
185 wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10); 183 wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
186 request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI", 184 if (request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI",
187 instance); 185 instance))
186 goto unregister;
188 DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN; 187 DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
189 num_a2091++; 188 num_a2091++;
189 continue;
190
191unregister:
192 scsi_unregister(instance);
193 wd33c93_release();
194release:
195 release_mem_region(address, 256);
190 } 196 }
191 197
192 return num_a2091; 198 return num_a2091;
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 2f602720193e..7507d8bc57a1 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2527,7 +2527,7 @@ static void asc_prt_scsi_host(struct Scsi_Host *s)
2527{ 2527{
2528 struct asc_board *boardp = shost_priv(s); 2528 struct asc_board *boardp = shost_priv(s);
2529 2529
2530 printk("Scsi_Host at addr 0x%p, device %s\n", s, boardp->dev->bus_id); 2530 printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev));
2531 printk(" host_busy %u, host_no %d, last_reset %d,\n", 2531 printk(" host_busy %u, host_no %d, last_reset %d,\n",
2532 s->host_busy, s->host_no, (unsigned)s->last_reset); 2532 s->host_busy, s->host_no, (unsigned)s->last_reset);
2533 2533
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index d4640ef6d44f..78eb86fc6276 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -189,7 +189,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
189 asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE); 189 asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
190 /* send a hard reset */ 190 /* send a hard reset */
191 ASD_DPRINTK("sending %s reset to %s\n", 191 ASD_DPRINTK("sending %s reset to %s\n",
192 reset_type ? "hard" : "soft", phy->dev.bus_id); 192 reset_type ? "hard" : "soft", dev_name(&phy->dev));
193 res = sas_phy_reset(phy, reset_type); 193 res = sas_phy_reset(phy, reset_type);
194 if (res == TMF_RESP_FUNC_COMPLETE) { 194 if (res == TMF_RESP_FUNC_COMPLETE) {
195 /* wait for the maximum settle time */ 195 /* wait for the maximum settle time */
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
index 1a41f04264f7..08f3a09d9233 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/skbuff.h> 13#include <linux/skbuff.h>
14#include <linux/scatterlist.h>
14 15
15/* from cxgb3 LLD */ 16/* from cxgb3 LLD */
16#include "common.h" 17#include "common.h"
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index ca7363752401..5d1bf7e3d245 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -329,12 +329,16 @@ int __init gvp11_detect(struct scsi_host_template *tpnt)
329 (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10 329 (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
330 : WD33C93_FS_12_15); 330 : WD33C93_FS_12_15);
331 331
332 request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, "GVP11 SCSI", 332 if (request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, "GVP11 SCSI",
333 instance); 333 instance))
334 goto unregister;
334 DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE; 335 DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
335 num_gvp11++; 336 num_gvp11++;
336 continue; 337 continue;
337 338
339unregister:
340 scsi_unregister(instance);
341 wd33c93_release();
338release: 342release:
339 release_mem_region(address, 256); 343 release_mem_region(address, 256);
340 } 344 }
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 3fdbb13e80a8..aa670a1d1513 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -388,8 +388,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
388 shost->dma_boundary = 0xffffffff; 388 shost->dma_boundary = 0xffffffff;
389 389
390 device_initialize(&shost->shost_gendev); 390 device_initialize(&shost->shost_gendev);
391 snprintf(shost->shost_gendev.bus_id, BUS_ID_SIZE, "host%d", 391 dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
392 shost->host_no);
393#ifndef CONFIG_SYSFS_DEPRECATED 392#ifndef CONFIG_SYSFS_DEPRECATED
394 shost->shost_gendev.bus = &scsi_bus_type; 393 shost->shost_gendev.bus = &scsi_bus_type;
395#endif 394#endif
@@ -398,8 +397,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
398 device_initialize(&shost->shost_dev); 397 device_initialize(&shost->shost_dev);
399 shost->shost_dev.parent = &shost->shost_gendev; 398 shost->shost_dev.parent = &shost->shost_gendev;
400 shost->shost_dev.class = &shost_class; 399 shost->shost_dev.class = &shost_class;
401 snprintf(shost->shost_dev.bus_id, BUS_ID_SIZE, "host%d", 400 dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
402 shost->host_no);
403 shost->shost_dev.groups = scsi_sysfs_shost_attr_groups; 401 shost->shost_dev.groups = scsi_sysfs_shost_attr_groups;
404 402
405 shost->ehandler = kthread_run(scsi_error_handler, shost, 403 shost->ehandler = kthread_run(scsi_error_handler, shost,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 44f202f33101..ee0739b217b6 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -101,7 +101,7 @@ static const struct {
101 { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" }, 101 { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
102 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" }, 102 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
103 { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" }, 103 { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
104 { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_NO_CONNECT, 1, 1, "network down" }, 104 { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
105 { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" }, 105 { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
106 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" }, 106 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
107 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" }, 107 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
@@ -115,11 +115,11 @@ static const struct {
115 115
116 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" }, 116 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
117 { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" }, 117 { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
118 { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ABORT, 0, 1, "invalid parameter" }, 118 { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
119 { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ABORT, 0, 1, "missing parameter" }, 119 { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
120 { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" }, 120 { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
121 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ABORT, 0, 1, "transaction cancelled" }, 121 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
122 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ABORT, 0, 1, "transaction cancelled implicit" }, 122 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
123 { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" }, 123 { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
124 { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" }, 124 { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
125 { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" }, 125 { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
@@ -1145,10 +1145,10 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1145 login_info->async.len = vhost->async_crq.size * sizeof(*vhost->async_crq.msgs); 1145 login_info->async.len = vhost->async_crq.size * sizeof(*vhost->async_crq.msgs);
1146 strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME); 1146 strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1147 strncpy(login_info->device_name, 1147 strncpy(login_info->device_name,
1148 vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME); 1148 dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
1149 1149
1150 location = of_get_property(of_node, "ibm,loc-code", NULL); 1150 location = of_get_property(of_node, "ibm,loc-code", NULL);
1151 location = location ? location : vhost->dev->bus_id; 1151 location = location ? location : dev_name(vhost->dev);
1152 strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME); 1152 strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
1153} 1153}
1154 1154
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 868d35ea01bb..5c541f7850f9 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -89,6 +89,7 @@ static int max_id = 64;
89static int max_channel = 3; 89static int max_channel = 3;
90static int init_timeout = 5; 90static int init_timeout = 5;
91static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; 91static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
92static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
92 93
93static struct scsi_transport_template *ibmvscsi_transport_template; 94static struct scsi_transport_template *ibmvscsi_transport_template;
94 95
@@ -1633,7 +1634,7 @@ static struct scsi_host_template driver_template = {
1633static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev) 1634static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
1634{ 1635{
1635 /* iu_storage data allocated in initialize_event_pool */ 1636 /* iu_storage data allocated in initialize_event_pool */
1636 unsigned long desired_io = max_requests * sizeof(union viosrp_iu); 1637 unsigned long desired_io = max_events * sizeof(union viosrp_iu);
1637 1638
1638 /* add io space for sg data */ 1639 /* add io space for sg data */
1639 desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 * 1640 desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 *
@@ -1657,7 +1658,6 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1657 1658
1658 vdev->dev.driver_data = NULL; 1659 vdev->dev.driver_data = NULL;
1659 1660
1660 driver_template.can_queue = max_requests - 2;
1661 host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); 1661 host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
1662 if (!host) { 1662 if (!host) {
1663 dev_err(&vdev->dev, "couldn't allocate host data\n"); 1663 dev_err(&vdev->dev, "couldn't allocate host data\n");
@@ -1673,12 +1673,12 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1673 atomic_set(&hostdata->request_limit, -1); 1673 atomic_set(&hostdata->request_limit, -1);
1674 hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT; 1674 hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
1675 1675
1676 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_requests); 1676 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
1677 if (rc != 0 && rc != H_RESOURCE) { 1677 if (rc != 0 && rc != H_RESOURCE) {
1678 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); 1678 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
1679 goto init_crq_failed; 1679 goto init_crq_failed;
1680 } 1680 }
1681 if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) { 1681 if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) {
1682 dev_err(&vdev->dev, "couldn't initialize event pool\n"); 1682 dev_err(&vdev->dev, "couldn't initialize event pool\n");
1683 goto init_pool_failed; 1683 goto init_pool_failed;
1684 } 1684 }
@@ -1730,7 +1730,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1730 add_host_failed: 1730 add_host_failed:
1731 release_event_pool(&hostdata->pool, hostdata); 1731 release_event_pool(&hostdata->pool, hostdata);
1732 init_pool_failed: 1732 init_pool_failed:
1733 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_requests); 1733 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
1734 init_crq_failed: 1734 init_crq_failed:
1735 scsi_host_put(host); 1735 scsi_host_put(host);
1736 scsi_host_alloc_failed: 1736 scsi_host_alloc_failed:
@@ -1742,7 +1742,7 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
1742 struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; 1742 struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
1743 release_event_pool(&hostdata->pool, hostdata); 1743 release_event_pool(&hostdata->pool, hostdata);
1744 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, 1744 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
1745 max_requests); 1745 max_events);
1746 1746
1747 srp_remove_host(hostdata->host); 1747 srp_remove_host(hostdata->host);
1748 scsi_remove_host(hostdata->host); 1748 scsi_remove_host(hostdata->host);
@@ -1779,6 +1779,10 @@ int __init ibmvscsi_module_init(void)
1779{ 1779{
1780 int ret; 1780 int ret;
1781 1781
1782 /* Ensure we have two requests to do error recovery */
1783 driver_template.can_queue = max_requests;
1784 max_events = max_requests + 2;
1785
1782 if (firmware_has_feature(FW_FEATURE_ISERIES)) 1786 if (firmware_has_feature(FW_FEATURE_ISERIES))
1783 ibmvscsi_ops = &iseriesvscsi_ops; 1787 ibmvscsi_ops = &iseriesvscsi_ops;
1784 else if (firmware_has_feature(FW_FEATURE_VIO)) 1788 else if (firmware_has_feature(FW_FEATURE_VIO))
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 0edfb1fa63ce..841f460edbc4 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2184,7 +2184,7 @@ static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2184 sizeof(struct ipr_dump_entry_header); 2184 sizeof(struct ipr_dump_entry_header);
2185 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; 2185 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2186 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID; 2186 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2187 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id); 2187 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2188 driver_dump->hdr.num_entries++; 2188 driver_dump->hdr.num_entries++;
2189} 2189}
2190 2190
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 59459141b437..8f872f816fe4 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1272,7 +1272,7 @@ struct ipr_dump_entry_header {
1272 1272
1273struct ipr_dump_location_entry { 1273struct ipr_dump_location_entry {
1274 struct ipr_dump_entry_header hdr; 1274 struct ipr_dump_entry_header hdr;
1275 u8 location[BUS_ID_SIZE]; 1275 u8 location[20];
1276}__attribute__((packed)); 1276}__attribute__((packed));
1277 1277
1278struct ipr_dump_trace_entry { 1278struct ipr_dump_trace_entry {
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index 3126824da36d..4a4e6954ec79 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -103,8 +103,7 @@ lasi700_probe(struct parisc_device *dev)
103 103
104 hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); 104 hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
105 if (!hostdata) { 105 if (!hostdata) {
106 printk(KERN_ERR "%s: Failed to allocate host data\n", 106 dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
107 dev->dev.bus_id);
108 return -ENOMEM; 107 return -ENOMEM;
109 } 108 }
110 109
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 709a6f75ca9d..facc5bfcf7db 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -169,7 +169,7 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
169 if (res) { 169 if (res) {
170 printk("sas: driver on pcidev %s cannot handle " 170 printk("sas: driver on pcidev %s cannot handle "
171 "device %llx, error:%d\n", 171 "device %llx, error:%d\n",
172 sas_ha->dev->bus_id, 172 dev_name(sas_ha->dev),
173 SAS_ADDR(dev->sas_addr), res); 173 SAS_ADDR(dev->sas_addr), res);
174 } 174 }
175 } 175 }
diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c
index bf34a236f946..c17c25030f1c 100644
--- a/drivers/scsi/libsas/sas_dump.c
+++ b/drivers/scsi/libsas/sas_dump.c
@@ -56,7 +56,7 @@ void sas_dprint_phye(int phyid, enum phy_event pe)
56 56
57void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he) 57void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he)
58{ 58{
59 SAS_DPRINTK("ha %s: %s event\n", sas_ha->dev->bus_id, 59 SAS_DPRINTK("ha %s: %s event\n", dev_name(sas_ha->dev),
60 sas_hae_str[he]); 60 sas_hae_str[he]);
61} 61}
62 62
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index 16f93123271e..d110a366c48a 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -199,8 +199,8 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
199 break; 199 break;
200 200
201 case SMP_DISCOVER: 201 case SMP_DISCOVER:
202 req->data_len =- 16; 202 req->data_len -= 16;
203 if (req->data_len < 0) { 203 if ((int)req->data_len < 0) {
204 req->data_len = 0; 204 req->data_len = 0;
205 error = -EINVAL; 205 error = -EINVAL;
206 goto out; 206 goto out;
@@ -215,8 +215,8 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
215 break; 215 break;
216 216
217 case SMP_REPORT_PHY_SATA: 217 case SMP_REPORT_PHY_SATA:
218 req->data_len =- 16; 218 req->data_len -= 16;
219 if (req->data_len < 0) { 219 if ((int)req->data_len < 0) {
220 req->data_len = 0; 220 req->data_len = 0;
221 error = -EINVAL; 221 error = -EINVAL;
222 goto out; 222 goto out;
@@ -238,8 +238,8 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
238 break; 238 break;
239 239
240 case SMP_PHY_CONTROL: 240 case SMP_PHY_CONTROL:
241 req->data_len =- 44; 241 req->data_len -= 44;
242 if (req->data_len < 0) { 242 if ((int)req->data_len < 0) {
243 req->data_len = 0; 243 req->data_len = 0;
244 error = -EINVAL; 244 error = -EINVAL;
245 goto out; 245 goto out;
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 139935a121b4..e6ac59c023f1 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -113,7 +113,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
113 sas_port_add_phy(port->port, phy->phy); 113 sas_port_add_phy(port->port, phy->phy);
114 114
115 SAS_DPRINTK("%s added to %s, phy_mask:0x%x (%16llx)\n", 115 SAS_DPRINTK("%s added to %s, phy_mask:0x%x (%16llx)\n",
116 phy->phy->dev.bus_id,port->port->dev.bus_id, 116 dev_name(&phy->phy->dev), dev_name(&port->port->dev),
117 port->phy_mask, 117 port->phy_mask,
118 SAS_ADDR(port->attached_sas_addr)); 118 SAS_ADDR(port->attached_sas_addr));
119 119
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 4c77038c8f1c..6c867311cef1 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1795,12 +1795,13 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba)
1795int 1795int
1796lpfc_online(struct lpfc_hba *phba) 1796lpfc_online(struct lpfc_hba *phba)
1797{ 1797{
1798 struct lpfc_vport *vport = phba->pport; 1798 struct lpfc_vport *vport;
1799 struct lpfc_vport **vports; 1799 struct lpfc_vport **vports;
1800 int i; 1800 int i;
1801 1801
1802 if (!phba) 1802 if (!phba)
1803 return 0; 1803 return 0;
1804 vport = phba->pport;
1804 1805
1805 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 1806 if (!(vport->fc_flag & FC_OFFLINE_MODE))
1806 return 0; 1807 return 0;
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c
index 1dd70d7a4947..23e5a876bb10 100644
--- a/drivers/scsi/mvsas.c
+++ b/drivers/scsi/mvsas.c
@@ -2959,7 +2959,7 @@ static int __devinit mvs_hw_init(struct mvs_info *mvi)
2959 2959
2960 /* enable auto port detection */ 2960 /* enable auto port detection */
2961 mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN); 2961 mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
2962 msleep(100); 2962 msleep(1100);
2963 /* init and reset phys */ 2963 /* init and reset phys */
2964 for (i = 0; i < mvi->chip->n_phy; i++) { 2964 for (i = 0; i < mvi->chip->n_phy; i++) {
2965 u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]); 2965 u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index 165ff884f48e..67cde0138061 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -114,7 +114,7 @@ static int aha152x_probe(struct pcmcia_device *link)
114 link->io.NumPorts1 = 0x20; 114 link->io.NumPorts1 = 0x20;
115 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 115 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
116 link->io.IOAddrLines = 10; 116 link->io.IOAddrLines = 10;
117 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 117 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
118 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 118 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
119 link->conf.Attributes = CONF_ENABLE_IRQ; 119 link->conf.Attributes = CONF_ENABLE_IRQ;
120 link->conf.IntType = INT_MEMORY_AND_IO; 120 link->conf.IntType = INT_MEMORY_AND_IO;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index cd53627cc761..c7acef50d5da 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -303,7 +303,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
303 else if (start == (ha->flt_region_boot * 4) || 303 else if (start == (ha->flt_region_boot * 4) ||
304 start == (ha->flt_region_fw * 4)) 304 start == (ha->flt_region_fw * 4))
305 valid = 1; 305 valid = 1;
306 else if (IS_QLA25XX(ha) && 306 else if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) &&
307 start == (ha->flt_region_vpd_nvram * 4)) 307 start == (ha->flt_region_vpd_nvram * 4))
308 valid = 1; 308 valid = 1;
309 if (!valid) { 309 if (!valid) {
@@ -815,6 +815,21 @@ qla2x00_total_isp_aborts_show(struct device *dev,
815 ha->qla_stats.total_isp_aborts); 815 ha->qla_stats.total_isp_aborts);
816} 816}
817 817
818static ssize_t
819qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
820 char *buf)
821{
822 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
823 struct qla_hw_data *ha = vha->hw;
824
825 if (!IS_QLA81XX(ha))
826 return snprintf(buf, PAGE_SIZE, "\n");
827
828 return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x (%x)\n",
829 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
830 ha->mpi_version[3], ha->mpi_capabilities);
831}
832
818static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 833static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
819static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 834static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
820static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 835static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -839,6 +854,7 @@ static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
839 NULL); 854 NULL);
840static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show, 855static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
841 NULL); 856 NULL);
857static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
842 858
843struct device_attribute *qla2x00_host_attrs[] = { 859struct device_attribute *qla2x00_host_attrs[] = {
844 &dev_attr_driver_version, 860 &dev_attr_driver_version,
@@ -858,6 +874,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
858 &dev_attr_optrom_fcode_version, 874 &dev_attr_optrom_fcode_version,
859 &dev_attr_optrom_fw_version, 875 &dev_attr_optrom_fw_version,
860 &dev_attr_total_isp_aborts, 876 &dev_attr_total_isp_aborts,
877 &dev_attr_mpi_version,
861 NULL, 878 NULL,
862}; 879};
863 880
@@ -892,6 +909,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
892 case PORT_SPEED_8GB: 909 case PORT_SPEED_8GB:
893 speed = FC_PORTSPEED_8GBIT; 910 speed = FC_PORTSPEED_8GBIT;
894 break; 911 break;
912 case PORT_SPEED_10GB:
913 speed = FC_PORTSPEED_10GBIT;
914 break;
895 } 915 }
896 fc_host_speed(shost) = speed; 916 fc_host_speed(shost) = speed;
897} 917}
@@ -1382,7 +1402,9 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
1382 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; 1402 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
1383 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; 1403 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
1384 1404
1385 if (IS_QLA25XX(ha)) 1405 if (IS_QLA81XX(ha))
1406 speed = FC_PORTSPEED_10GBIT;
1407 else if (IS_QLA25XX(ha))
1386 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | 1408 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
1387 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 1409 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
1388 else if (IS_QLA24XX_TYPE(ha)) 1410 else if (IS_QLA24XX_TYPE(ha))
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 1cf77772623b..34760f8d4f17 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -310,6 +310,76 @@ qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
310 *buf++ = htons(RD_REG_WORD(dmp_reg++)); 310 *buf++ = htons(RD_REG_WORD(dmp_reg++));
311} 311}
312 312
313static inline void *
314qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
315{
316 if (!ha->eft)
317 return ptr;
318
319 memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
320 return ptr + ntohl(ha->fw_dump->eft_size);
321}
322
323static inline void *
324qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
325{
326 uint32_t cnt;
327 uint32_t *iter_reg;
328 struct qla2xxx_fce_chain *fcec = ptr;
329
330 if (!ha->fce)
331 return ptr;
332
333 *last_chain = &fcec->type;
334 fcec->type = __constant_htonl(DUMP_CHAIN_FCE);
335 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
336 fce_calc_size(ha->fce_bufs));
337 fcec->size = htonl(fce_calc_size(ha->fce_bufs));
338 fcec->addr_l = htonl(LSD(ha->fce_dma));
339 fcec->addr_h = htonl(MSD(ha->fce_dma));
340
341 iter_reg = fcec->eregs;
342 for (cnt = 0; cnt < 8; cnt++)
343 *iter_reg++ = htonl(ha->fce_mb[cnt]);
344
345 memcpy(iter_reg, ha->fce, ntohl(fcec->size));
346
347 return iter_reg;
348}
349
350static inline void *
351qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
352{
353 uint32_t cnt, que_idx;
354 uint8_t req_cnt, rsp_cnt, que_cnt;
355 struct qla2xxx_mq_chain *mq = ptr;
356 struct device_reg_25xxmq __iomem *reg;
357
358 if (!ha->mqenable)
359 return ptr;
360
361 mq = ptr;
362 *last_chain = &mq->type;
363 mq->type = __constant_htonl(DUMP_CHAIN_MQ);
364 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
365
366 req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
367 rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
368 que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
369 mq->count = htonl(que_cnt);
370 for (cnt = 0; cnt < que_cnt; cnt++) {
371 reg = (struct device_reg_25xxmq *) ((void *)
372 ha->mqiobase + cnt * QLA_QUE_PAGE);
373 que_idx = cnt * 4;
374 mq->qregs[que_idx] = htonl(RD_REG_DWORD(&reg->req_q_in));
375 mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(&reg->req_q_out));
376 mq->qregs[que_idx+2] = htonl(RD_REG_DWORD(&reg->rsp_q_in));
377 mq->qregs[que_idx+3] = htonl(RD_REG_DWORD(&reg->rsp_q_out));
378 }
379
380 return ptr + sizeof(struct qla2xxx_mq_chain);
381}
382
313/** 383/**
314 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. 384 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
315 * @ha: HA context 385 * @ha: HA context
@@ -913,8 +983,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
913 goto qla24xx_fw_dump_failed_0; 983 goto qla24xx_fw_dump_failed_0;
914 984
915 nxt = qla2xxx_copy_queues(ha, nxt); 985 nxt = qla2xxx_copy_queues(ha, nxt);
916 if (ha->eft) 986
917 memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size)); 987 qla24xx_copy_eft(ha, nxt);
918 988
919qla24xx_fw_dump_failed_0: 989qla24xx_fw_dump_failed_0:
920 if (rval != QLA_SUCCESS) { 990 if (rval != QLA_SUCCESS) {
@@ -942,19 +1012,14 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
942 uint32_t risc_address; 1012 uint32_t risc_address;
943 struct qla_hw_data *ha = vha->hw; 1013 struct qla_hw_data *ha = vha->hw;
944 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1014 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
945 struct device_reg_25xxmq __iomem *reg25;
946 uint32_t __iomem *dmp_reg; 1015 uint32_t __iomem *dmp_reg;
947 uint32_t *iter_reg; 1016 uint32_t *iter_reg;
948 uint16_t __iomem *mbx_reg; 1017 uint16_t __iomem *mbx_reg;
949 unsigned long flags; 1018 unsigned long flags;
950 struct qla25xx_fw_dump *fw; 1019 struct qla25xx_fw_dump *fw;
951 uint32_t ext_mem_cnt; 1020 uint32_t ext_mem_cnt;
952 void *nxt; 1021 void *nxt, *nxt_chain;
953 struct qla2xxx_fce_chain *fcec; 1022 uint32_t *last_chain = NULL;
954 struct qla2xxx_mq_chain *mq = NULL;
955 uint32_t qreg_size;
956 uint8_t req_cnt, rsp_cnt, que_cnt;
957 uint32_t que_idx;
958 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1023 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
959 1024
960 risc_address = ext_mem_cnt = 0; 1025 risc_address = ext_mem_cnt = 0;
@@ -1001,28 +1066,6 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1001 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); 1066 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1002 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window)); 1067 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1003 1068
1004 /* Multi queue registers */
1005 if (ha->mqenable) {
1006 qreg_size = sizeof(struct qla2xxx_mq_chain);
1007 mq = kzalloc(qreg_size, GFP_KERNEL);
1008 if (!mq)
1009 goto qla25xx_fw_dump_failed_0;
1010 req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
1011 rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
1012 que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
1013 mq->count = htonl(que_cnt);
1014 mq->chain_size = htonl(qreg_size);
1015 mq->type = __constant_htonl(DUMP_CHAIN_MQ);
1016 for (cnt = 0; cnt < que_cnt; cnt++) {
1017 reg25 = (struct device_reg_25xxmq *) ((void *)
1018 ha->mqiobase + cnt * QLA_QUE_PAGE);
1019 que_idx = cnt * 4;
1020 mq->qregs[que_idx] = htonl(reg25->req_q_in);
1021 mq->qregs[que_idx+1] = htonl(reg25->req_q_out);
1022 mq->qregs[que_idx+2] = htonl(reg25->rsp_q_in);
1023 mq->qregs[que_idx+3] = htonl(reg25->rsp_q_out);
1024 }
1025 }
1026 WRT_REG_DWORD(&reg->iobase_window, 0x00); 1069 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1027 RD_REG_DWORD(&reg->iobase_window); 1070 RD_REG_DWORD(&reg->iobase_window);
1028 1071
@@ -1240,6 +1283,10 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1240 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1283 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1241 qla24xx_read_window(reg, 0x6F00, 16, iter_reg); 1284 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1242 1285
1286 /* Multi queue registers */
1287 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1288 &last_chain);
1289
1243 rval = qla24xx_soft_reset(ha); 1290 rval = qla24xx_soft_reset(ha);
1244 if (rval != QLA_SUCCESS) 1291 if (rval != QLA_SUCCESS)
1245 goto qla25xx_fw_dump_failed_0; 1292 goto qla25xx_fw_dump_failed_0;
@@ -1249,39 +1296,341 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1249 if (rval != QLA_SUCCESS) 1296 if (rval != QLA_SUCCESS)
1250 goto qla25xx_fw_dump_failed_0; 1297 goto qla25xx_fw_dump_failed_0;
1251 1298
1252 /* Fibre Channel Trace Buffer. */
1253 nxt = qla2xxx_copy_queues(ha, nxt); 1299 nxt = qla2xxx_copy_queues(ha, nxt);
1254 if (ha->eft)
1255 memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size));
1256 1300
1257 /* Fibre Channel Event Buffer. */ 1301 nxt = qla24xx_copy_eft(ha, nxt);
1258 if (!ha->fce) 1302
1259 goto qla25xx_fw_dump_failed_0; 1303 /* Chain entries -- started with MQ. */
1304 qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1305 if (last_chain) {
1306 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1307 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
1308 }
1260 1309
1261 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1310qla25xx_fw_dump_failed_0:
1311 if (rval != QLA_SUCCESS) {
1312 qla_printk(KERN_WARNING, ha,
1313 "Failed to dump firmware (%x)!!!\n", rval);
1314 ha->fw_dumped = 0;
1262 1315
1263 if (ha->mqenable) {
1264 nxt = nxt + ntohl(ha->fw_dump->eft_size);
1265 memcpy(nxt, mq, qreg_size);
1266 kfree(mq);
1267 fcec = nxt + qreg_size;
1268 } else { 1316 } else {
1269 fcec = nxt + ntohl(ha->fw_dump->eft_size); 1317 qla_printk(KERN_INFO, ha,
1318 "Firmware dump saved to temp buffer (%ld/%p).\n",
1319 base_vha->host_no, ha->fw_dump);
1320 ha->fw_dumped = 1;
1270 } 1321 }
1271 fcec->type = __constant_htonl(DUMP_CHAIN_FCE | DUMP_CHAIN_LAST);
1272 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
1273 fce_calc_size(ha->fce_bufs));
1274 fcec->size = htonl(fce_calc_size(ha->fce_bufs));
1275 fcec->addr_l = htonl(LSD(ha->fce_dma));
1276 fcec->addr_h = htonl(MSD(ha->fce_dma));
1277 1322
1278 iter_reg = fcec->eregs; 1323qla25xx_fw_dump_failed:
1279 for (cnt = 0; cnt < 8; cnt++) 1324 if (!hardware_locked)
1280 *iter_reg++ = htonl(ha->fce_mb[cnt]); 1325 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1326}
1281 1327
1282 memcpy(iter_reg, ha->fce, ntohl(fcec->size)); 1328void
1329qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1330{
1331 int rval;
1332 uint32_t cnt;
1333 uint32_t risc_address;
1334 struct qla_hw_data *ha = vha->hw;
1335 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1336 uint32_t __iomem *dmp_reg;
1337 uint32_t *iter_reg;
1338 uint16_t __iomem *mbx_reg;
1339 unsigned long flags;
1340 struct qla81xx_fw_dump *fw;
1341 uint32_t ext_mem_cnt;
1342 void *nxt, *nxt_chain;
1343 uint32_t *last_chain = NULL;
1344 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1283 1345
1284qla25xx_fw_dump_failed_0: 1346 risc_address = ext_mem_cnt = 0;
1347 flags = 0;
1348
1349 if (!hardware_locked)
1350 spin_lock_irqsave(&ha->hardware_lock, flags);
1351
1352 if (!ha->fw_dump) {
1353 qla_printk(KERN_WARNING, ha,
1354 "No buffer available for dump!!!\n");
1355 goto qla81xx_fw_dump_failed;
1356 }
1357
1358 if (ha->fw_dumped) {
1359 qla_printk(KERN_WARNING, ha,
1360 "Firmware has been previously dumped (%p) -- ignoring "
1361 "request...\n", ha->fw_dump);
1362 goto qla81xx_fw_dump_failed;
1363 }
1364 fw = &ha->fw_dump->isp.isp81;
1365 qla2xxx_prep_dump(ha, ha->fw_dump);
1366
1367 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1368
1369 /* Pause RISC. */
1370 rval = qla24xx_pause_risc(reg);
1371 if (rval != QLA_SUCCESS)
1372 goto qla81xx_fw_dump_failed_0;
1373
1374 /* Host/Risc registers. */
1375 iter_reg = fw->host_risc_reg;
1376 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1377 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1378
1379 /* PCIe registers. */
1380 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1381 RD_REG_DWORD(&reg->iobase_addr);
1382 WRT_REG_DWORD(&reg->iobase_window, 0x01);
1383 dmp_reg = &reg->iobase_c4;
1384 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
1385 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
1386 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1387 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1388
1389 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1390 RD_REG_DWORD(&reg->iobase_window);
1391
1392 /* Host interface registers. */
1393 dmp_reg = &reg->flash_addr;
1394 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
1395 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1396
1397 /* Disable interrupts. */
1398 WRT_REG_DWORD(&reg->ictrl, 0);
1399 RD_REG_DWORD(&reg->ictrl);
1400
1401 /* Shadow registers. */
1402 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1403 RD_REG_DWORD(&reg->iobase_addr);
1404 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1405 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1406
1407 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1408 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1409
1410 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1411 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1412
1413 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1414 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1415
1416 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1417 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1418
1419 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1420 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1421
1422 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1423 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1424
1425 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1426 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1427
1428 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1429 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1430
1431 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1432 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1433
1434 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1435 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1436
1437 /* RISC I/O register. */
1438 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1439 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1440
1441 /* Mailbox registers. */
1442 mbx_reg = &reg->mailbox0;
1443 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
1444 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
1445
1446 /* Transfer sequence registers. */
1447 iter_reg = fw->xseq_gp_reg;
1448 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1449 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1450 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1451 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1452 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1453 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1454 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1455 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1456
1457 iter_reg = fw->xseq_0_reg;
1458 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1459 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1460 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1461
1462 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1463
1464 /* Receive sequence registers. */
1465 iter_reg = fw->rseq_gp_reg;
1466 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1467 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1468 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1469 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1470 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1471 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1472 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1473 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1474
1475 iter_reg = fw->rseq_0_reg;
1476 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1477 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1478
1479 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1480 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1481
1482 /* Auxiliary sequence registers. */
1483 iter_reg = fw->aseq_gp_reg;
1484 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1485 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1486 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1487 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1488 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1489 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1490 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1491 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1492
1493 iter_reg = fw->aseq_0_reg;
1494 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1495 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1496
1497 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1498 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1499
1500 /* Command DMA registers. */
1501 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1502
1503 /* Queues. */
1504 iter_reg = fw->req0_dma_reg;
1505 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1506 dmp_reg = &reg->iobase_q;
1507 for (cnt = 0; cnt < 7; cnt++)
1508 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1509
1510 iter_reg = fw->resp0_dma_reg;
1511 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1512 dmp_reg = &reg->iobase_q;
1513 for (cnt = 0; cnt < 7; cnt++)
1514 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1515
1516 iter_reg = fw->req1_dma_reg;
1517 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1518 dmp_reg = &reg->iobase_q;
1519 for (cnt = 0; cnt < 7; cnt++)
1520 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1521
1522 /* Transmit DMA registers. */
1523 iter_reg = fw->xmt0_dma_reg;
1524 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1525 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1526
1527 iter_reg = fw->xmt1_dma_reg;
1528 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1529 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1530
1531 iter_reg = fw->xmt2_dma_reg;
1532 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1533 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1534
1535 iter_reg = fw->xmt3_dma_reg;
1536 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1537 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1538
1539 iter_reg = fw->xmt4_dma_reg;
1540 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1541 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1542
1543 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1544
1545 /* Receive DMA registers. */
1546 iter_reg = fw->rcvt0_data_dma_reg;
1547 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1548 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1549
1550 iter_reg = fw->rcvt1_data_dma_reg;
1551 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1552 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1553
1554 /* RISC registers. */
1555 iter_reg = fw->risc_gp_reg;
1556 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1557 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1558 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1559 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1560 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1561 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1562 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1563 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1564
1565 /* Local memory controller registers. */
1566 iter_reg = fw->lmc_reg;
1567 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1568 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1569 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1570 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1571 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1572 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1573 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1574 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1575
1576 /* Fibre Protocol Module registers. */
1577 iter_reg = fw->fpm_hdw_reg;
1578 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1579 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1580 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1581 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1582 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1583 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1584 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1585 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1586 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1587 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1588 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1589 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1590 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
1591 qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
1592
1593 /* Frame Buffer registers. */
1594 iter_reg = fw->fb_hdw_reg;
1595 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1596 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1597 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1598 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1599 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1600 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1601 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1602 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1603 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1604 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1605 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1606 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
1607 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1608
1609 /* Multi queue registers */
1610 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1611 &last_chain);
1612
1613 rval = qla24xx_soft_reset(ha);
1614 if (rval != QLA_SUCCESS)
1615 goto qla81xx_fw_dump_failed_0;
1616
1617 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1618 &nxt);
1619 if (rval != QLA_SUCCESS)
1620 goto qla81xx_fw_dump_failed_0;
1621
1622 nxt = qla2xxx_copy_queues(ha, nxt);
1623
1624 nxt = qla24xx_copy_eft(ha, nxt);
1625
1626 /* Chain entries -- started with MQ. */
1627 qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1628 if (last_chain) {
1629 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1630 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
1631 }
1632
1633qla81xx_fw_dump_failed_0:
1285 if (rval != QLA_SUCCESS) { 1634 if (rval != QLA_SUCCESS) {
1286 qla_printk(KERN_WARNING, ha, 1635 qla_printk(KERN_WARNING, ha,
1287 "Failed to dump firmware (%x)!!!\n", rval); 1636 "Failed to dump firmware (%x)!!!\n", rval);
@@ -1294,10 +1643,11 @@ qla25xx_fw_dump_failed_0:
1294 ha->fw_dumped = 1; 1643 ha->fw_dumped = 1;
1295 } 1644 }
1296 1645
1297qla25xx_fw_dump_failed: 1646qla81xx_fw_dump_failed:
1298 if (!hardware_locked) 1647 if (!hardware_locked)
1299 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1648 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1300} 1649}
1650
1301/****************************************************************************/ 1651/****************************************************************************/
1302/* Driver Debug Functions. */ 1652/* Driver Debug Functions. */
1303/****************************************************************************/ 1653/****************************************************************************/
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index c1794a70a45f..f660dd70b72e 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -251,6 +251,45 @@ struct qla25xx_fw_dump {
251 uint32_t ext_mem[1]; 251 uint32_t ext_mem[1];
252}; 252};
253 253
254struct qla81xx_fw_dump {
255 uint32_t host_status;
256 uint32_t host_risc_reg[32];
257 uint32_t pcie_regs[4];
258 uint32_t host_reg[32];
259 uint32_t shadow_reg[11];
260 uint32_t risc_io_reg;
261 uint16_t mailbox_reg[32];
262 uint32_t xseq_gp_reg[128];
263 uint32_t xseq_0_reg[48];
264 uint32_t xseq_1_reg[16];
265 uint32_t rseq_gp_reg[128];
266 uint32_t rseq_0_reg[32];
267 uint32_t rseq_1_reg[16];
268 uint32_t rseq_2_reg[16];
269 uint32_t aseq_gp_reg[128];
270 uint32_t aseq_0_reg[32];
271 uint32_t aseq_1_reg[16];
272 uint32_t aseq_2_reg[16];
273 uint32_t cmd_dma_reg[16];
274 uint32_t req0_dma_reg[15];
275 uint32_t resp0_dma_reg[15];
276 uint32_t req1_dma_reg[15];
277 uint32_t xmt0_dma_reg[32];
278 uint32_t xmt1_dma_reg[32];
279 uint32_t xmt2_dma_reg[32];
280 uint32_t xmt3_dma_reg[32];
281 uint32_t xmt4_dma_reg[32];
282 uint32_t xmt_data_dma_reg[16];
283 uint32_t rcvt0_data_dma_reg[32];
284 uint32_t rcvt1_data_dma_reg[32];
285 uint32_t risc_gp_reg[128];
286 uint32_t lmc_reg[128];
287 uint32_t fpm_hdw_reg[224];
288 uint32_t fb_hdw_reg[208];
289 uint32_t code_ram[0x2000];
290 uint32_t ext_mem[1];
291};
292
254#define EFT_NUM_BUFFERS 4 293#define EFT_NUM_BUFFERS 4
255#define EFT_BYTES_PER_BUFFER 0x4000 294#define EFT_BYTES_PER_BUFFER 0x4000
256#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS)) 295#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS))
@@ -313,5 +352,6 @@ struct qla2xxx_fw_dump {
313 struct qla2300_fw_dump isp23; 352 struct qla2300_fw_dump isp23;
314 struct qla24xx_fw_dump isp24; 353 struct qla24xx_fw_dump isp24;
315 struct qla25xx_fw_dump isp25; 354 struct qla25xx_fw_dump isp25;
355 struct qla81xx_fw_dump isp81;
316 } isp; 356 } isp;
317}; 357};
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a29c95204975..023ee77fb027 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -187,7 +187,6 @@ struct req_que;
187 * SCSI Request Block 187 * SCSI Request Block
188 */ 188 */
189typedef struct srb { 189typedef struct srb {
190 struct scsi_qla_host *vha; /* HA the SP is queued on */
191 struct req_que *que; 190 struct req_que *que;
192 struct fc_port *fcport; 191 struct fc_port *fcport;
193 192
@@ -2136,7 +2135,6 @@ struct qla_msix_entry {
2136/* Work events. */ 2135/* Work events. */
2137enum qla_work_type { 2136enum qla_work_type {
2138 QLA_EVT_AEN, 2137 QLA_EVT_AEN,
2139 QLA_EVT_HWE_LOG,
2140}; 2138};
2141 2139
2142 2140
@@ -2151,10 +2149,6 @@ struct qla_work_evt {
2151 enum fc_host_event_code code; 2149 enum fc_host_event_code code;
2152 u32 data; 2150 u32 data;
2153 } aen; 2151 } aen;
2154 struct {
2155 uint16_t code;
2156 uint16_t d1, d2, d3;
2157 } hwe;
2158 } u; 2152 } u;
2159}; 2153};
2160 2154
@@ -2309,6 +2303,7 @@ struct qla_hw_data {
2309#define PORT_SPEED_2GB 0x01 2303#define PORT_SPEED_2GB 0x01
2310#define PORT_SPEED_4GB 0x03 2304#define PORT_SPEED_4GB 0x03
2311#define PORT_SPEED_8GB 0x04 2305#define PORT_SPEED_8GB 0x04
2306#define PORT_SPEED_10GB 0x13
2312 uint16_t link_data_rate; /* F/W operating speed */ 2307 uint16_t link_data_rate; /* F/W operating speed */
2313 2308
2314 uint8_t current_topology; 2309 uint8_t current_topology;
@@ -2328,6 +2323,7 @@ struct qla_hw_data {
2328 2323
2329#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532 2324#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
2330#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432 2325#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
2326#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001
2331 uint32_t device_type; 2327 uint32_t device_type;
2332#define DT_ISP2100 BIT_0 2328#define DT_ISP2100 BIT_0
2333#define DT_ISP2200 BIT_1 2329#define DT_ISP2200 BIT_1
@@ -2342,7 +2338,8 @@ struct qla_hw_data {
2342#define DT_ISP5432 BIT_10 2338#define DT_ISP5432 BIT_10
2343#define DT_ISP2532 BIT_11 2339#define DT_ISP2532 BIT_11
2344#define DT_ISP8432 BIT_12 2340#define DT_ISP8432 BIT_12
2345#define DT_ISP_LAST (DT_ISP8432 << 1) 2341#define DT_ISP8001 BIT_13
2342#define DT_ISP_LAST (DT_ISP8001 << 1)
2346 2343
2347#define DT_IIDMA BIT_26 2344#define DT_IIDMA BIT_26
2348#define DT_FWI2 BIT_27 2345#define DT_FWI2 BIT_27
@@ -2364,6 +2361,7 @@ struct qla_hw_data {
2364#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432) 2361#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432)
2365#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532) 2362#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
2366#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432) 2363#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432)
2364#define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001)
2367 2365
2368#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ 2366#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
2369 IS_QLA6312(ha) || IS_QLA6322(ha)) 2367 IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2373,8 +2371,11 @@ struct qla_hw_data {
2373#define IS_QLA84XX(ha) (IS_QLA8432(ha)) 2371#define IS_QLA84XX(ha) (IS_QLA8432(ha))
2374#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ 2372#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
2375 IS_QLA84XX(ha)) 2373 IS_QLA84XX(ha))
2374#define IS_QLA81XX(ha) (IS_QLA8001(ha))
2376#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \ 2375#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
2377 IS_QLA25XX(ha)) 2376 IS_QLA25XX(ha) || IS_QLA81XX(ha))
2377#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
2378 (ha)->flags.msix_enabled)
2378 2379
2379#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA) 2380#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
2380#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2) 2381#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
@@ -2472,6 +2473,9 @@ struct qla_hw_data {
2472 uint8_t fw_seriallink_options[4]; 2473 uint8_t fw_seriallink_options[4];
2473 uint16_t fw_seriallink_options24[4]; 2474 uint16_t fw_seriallink_options24[4];
2474 2475
2476 uint8_t mpi_version[4];
2477 uint32_t mpi_capabilities;
2478
2475 /* Firmware dump information. */ 2479 /* Firmware dump information. */
2476 struct qla2xxx_fw_dump *fw_dump; 2480 struct qla2xxx_fw_dump *fw_dump;
2477 uint32_t fw_dump_len; 2481 uint32_t fw_dump_len;
@@ -2480,6 +2484,7 @@ struct qla_hw_data {
2480 dma_addr_t eft_dma; 2484 dma_addr_t eft_dma;
2481 void *eft; 2485 void *eft;
2482 2486
2487 uint32_t chain_offset;
2483 struct dentry *dfs_dir; 2488 struct dentry *dfs_dir;
2484 struct dentry *dfs_fce; 2489 struct dentry *dfs_fce;
2485 dma_addr_t fce_dma; 2490 dma_addr_t fce_dma;
@@ -2489,10 +2494,6 @@ struct qla_hw_data {
2489 uint64_t fce_wr, fce_rd; 2494 uint64_t fce_wr, fce_rd;
2490 struct mutex fce_mutex; 2495 struct mutex fce_mutex;
2491 2496
2492 uint32_t hw_event_start;
2493 uint32_t hw_event_ptr;
2494 uint32_t hw_event_pause_errors;
2495
2496 uint32_t pci_attr; 2497 uint32_t pci_attr;
2497 uint16_t chip_revision; 2498 uint16_t chip_revision;
2498 2499
@@ -2522,6 +2523,12 @@ struct qla_hw_data {
2522 uint8_t fcode_revision[16]; 2523 uint8_t fcode_revision[16];
2523 uint32_t fw_revision[4]; 2524 uint32_t fw_revision[4];
2524 2525
2526 /* Offsets for flash/nvram access (set to ~0 if not used). */
2527 uint32_t flash_conf_off;
2528 uint32_t flash_data_off;
2529 uint32_t nvram_conf_off;
2530 uint32_t nvram_data_off;
2531
2525 uint32_t fdt_wrt_disable; 2532 uint32_t fdt_wrt_disable;
2526 uint32_t fdt_erase_cmd; 2533 uint32_t fdt_erase_cmd;
2527 uint32_t fdt_block_size; 2534 uint32_t fdt_block_size;
@@ -2533,7 +2540,6 @@ struct qla_hw_data {
2533 uint32_t flt_region_boot; 2540 uint32_t flt_region_boot;
2534 uint32_t flt_region_fw; 2541 uint32_t flt_region_fw;
2535 uint32_t flt_region_vpd_nvram; 2542 uint32_t flt_region_vpd_nvram;
2536 uint32_t flt_region_hw_event;
2537 uint32_t flt_region_npiv_conf; 2543 uint32_t flt_region_npiv_conf;
2538 2544
2539 /* Needed for BEACON */ 2545 /* Needed for BEACON */
@@ -2737,6 +2743,7 @@ typedef struct scsi_qla_host {
2737#define OPTROM_SIZE_2322 0x100000 2743#define OPTROM_SIZE_2322 0x100000
2738#define OPTROM_SIZE_24XX 0x100000 2744#define OPTROM_SIZE_24XX 0x100000
2739#define OPTROM_SIZE_25XX 0x200000 2745#define OPTROM_SIZE_25XX 0x200000
2746#define OPTROM_SIZE_81XX 0x400000
2740 2747
2741#include "qla_gbl.h" 2748#include "qla_gbl.h"
2742#include "qla_dbg.h" 2749#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 0e366a1b44b3..c66036da7d2b 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -113,7 +113,8 @@ int
113qla2x00_dfs_setup(scsi_qla_host_t *vha) 113qla2x00_dfs_setup(scsi_qla_host_t *vha)
114{ 114{
115 struct qla_hw_data *ha = vha->hw; 115 struct qla_hw_data *ha = vha->hw;
116 if (!IS_QLA25XX(ha)) 116
117 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
117 goto out; 118 goto out;
118 if (!ha->fce) 119 if (!ha->fce)
119 goto out; 120 goto out;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index ee1f1e794c2d..7abb045a0410 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1215,9 +1215,10 @@ struct qla_fdt_layout {
1215 1215
1216struct qla_flt_location { 1216struct qla_flt_location {
1217 uint8_t sig[4]; 1217 uint8_t sig[4];
1218 uint32_t start_lo; 1218 uint16_t start_lo;
1219 uint32_t start_hi; 1219 uint16_t start_hi;
1220 uint16_t unused; 1220 uint8_t version;
1221 uint8_t unused[5];
1221 uint16_t checksum; 1222 uint16_t checksum;
1222}; 1223};
1223 1224
@@ -1390,4 +1391,291 @@ struct access_chip_rsp_84xx {
1390 1391
1391 uint32_t reserved[12]; 1392 uint32_t reserved[12];
1392}; 1393};
1394
1395/* 81XX Support **************************************************************/
1396
1397#define MBA_DCBX_START 0x8016
1398#define MBA_DCBX_COMPLETE 0x8030
1399#define MBA_FCF_CONF_ERR 0x8031
1400#define MBA_DCBX_PARAM_UPDATE 0x8032
1401#define MBA_IDC_COMPLETE 0x8100
1402#define MBA_IDC_NOTIFY 0x8101
1403#define MBA_IDC_TIME_EXT 0x8102
1404
1405struct nvram_81xx {
1406 /* NVRAM header. */
1407 uint8_t id[4];
1408 uint16_t nvram_version;
1409 uint16_t reserved_0;
1410
1411 /* Firmware Initialization Control Block. */
1412 uint16_t version;
1413 uint16_t reserved_1;
1414 uint16_t frame_payload_size;
1415 uint16_t execution_throttle;
1416 uint16_t exchange_count;
1417 uint16_t reserved_2;
1418
1419 uint8_t port_name[WWN_SIZE];
1420 uint8_t node_name[WWN_SIZE];
1421
1422 uint16_t login_retry_count;
1423 uint16_t reserved_3;
1424 uint16_t interrupt_delay_timer;
1425 uint16_t login_timeout;
1426
1427 uint32_t firmware_options_1;
1428 uint32_t firmware_options_2;
1429 uint32_t firmware_options_3;
1430
1431 uint16_t reserved_4[4];
1432
1433 /* Offset 64. */
1434 uint8_t enode_mac[6];
1435 uint16_t reserved_5[5];
1436
1437 /* Offset 80. */
1438 uint16_t reserved_6[24];
1439
1440 /* Offset 128. */
1441 uint16_t reserved_7[64];
1442
1443 /*
1444 * BIT 0 = Enable spinup delay
1445 * BIT 1 = Disable BIOS
1446 * BIT 2 = Enable Memory Map BIOS
1447 * BIT 3 = Enable Selectable Boot
1448 * BIT 4 = Disable RISC code load
1449 * BIT 5 = Disable Serdes
1450 * BIT 6 = Opt boot mode
1451 * BIT 7 = Interrupt enable
1452 *
1453 * BIT 8 = EV Control enable
1454 * BIT 9 = Enable lip reset
1455 * BIT 10 = Enable lip full login
1456 * BIT 11 = Enable target reset
1457 * BIT 12 = Stop firmware
1458 * BIT 13 = Enable nodename option
1459 * BIT 14 = Default WWPN valid
1460 * BIT 15 = Enable alternate WWN
1461 *
1462 * BIT 16 = CLP LUN string
1463 * BIT 17 = CLP Target string
1464 * BIT 18 = CLP BIOS enable string
1465 * BIT 19 = CLP Serdes string
1466 * BIT 20 = CLP WWPN string
1467 * BIT 21 = CLP WWNN string
1468 * BIT 22 =
1469 * BIT 23 =
1470 * BIT 24 = Keep WWPN
1471 * BIT 25 = Temp WWPN
1472 * BIT 26-31 =
1473 */
1474 uint32_t host_p;
1475
1476 uint8_t alternate_port_name[WWN_SIZE];
1477 uint8_t alternate_node_name[WWN_SIZE];
1478
1479 uint8_t boot_port_name[WWN_SIZE];
1480 uint16_t boot_lun_number;
1481 uint16_t reserved_8;
1482
1483 uint8_t alt1_boot_port_name[WWN_SIZE];
1484 uint16_t alt1_boot_lun_number;
1485 uint16_t reserved_9;
1486
1487 uint8_t alt2_boot_port_name[WWN_SIZE];
1488 uint16_t alt2_boot_lun_number;
1489 uint16_t reserved_10;
1490
1491 uint8_t alt3_boot_port_name[WWN_SIZE];
1492 uint16_t alt3_boot_lun_number;
1493 uint16_t reserved_11;
1494
1495 /*
1496 * BIT 0 = Selective Login
1497 * BIT 1 = Alt-Boot Enable
1498 * BIT 2 = Reserved
1499 * BIT 3 = Boot Order List
1500 * BIT 4 = Reserved
1501 * BIT 5 = Selective LUN
1502 * BIT 6 = Reserved
1503 * BIT 7-31 =
1504 */
1505 uint32_t efi_parameters;
1506
1507 uint8_t reset_delay;
1508 uint8_t reserved_12;
1509 uint16_t reserved_13;
1510
1511 uint16_t boot_id_number;
1512 uint16_t reserved_14;
1513
1514 uint16_t max_luns_per_target;
1515 uint16_t reserved_15;
1516
1517 uint16_t port_down_retry_count;
1518 uint16_t link_down_timeout;
1519
1520 /* FCode parameters. */
1521 uint16_t fcode_parameter;
1522
1523 uint16_t reserved_16[3];
1524
1525 /* Offset 352. */
1526 uint8_t reserved_17[4];
1527 uint16_t reserved_18[5];
1528 uint8_t reserved_19[2];
1529 uint16_t reserved_20[8];
1530
1531 /* Offset 384. */
1532 uint8_t reserved_21[16];
1533 uint16_t reserved_22[8];
1534
1535 /* Offset 416. */
1536 uint16_t reserved_23[32];
1537
1538 /* Offset 480. */
1539 uint8_t model_name[16];
1540
1541 /* Offset 496. */
1542 uint16_t feature_mask_l;
1543 uint16_t feature_mask_h;
1544 uint16_t reserved_24[2];
1545
1546 uint16_t subsystem_vendor_id;
1547 uint16_t subsystem_device_id;
1548
1549 uint32_t checksum;
1550};
1551
1552/*
1553 * ISP Initialization Control Block.
1554 * Little endian except where noted.
1555 */
1556#define ICB_VERSION 1
1557struct init_cb_81xx {
1558 uint16_t version;
1559 uint16_t reserved_1;
1560
1561 uint16_t frame_payload_size;
1562 uint16_t execution_throttle;
1563 uint16_t exchange_count;
1564
1565 uint16_t reserved_2;
1566
1567 uint8_t port_name[WWN_SIZE]; /* Big endian. */
1568 uint8_t node_name[WWN_SIZE]; /* Big endian. */
1569
1570 uint16_t response_q_inpointer;
1571 uint16_t request_q_outpointer;
1572
1573 uint16_t login_retry_count;
1574
1575 uint16_t prio_request_q_outpointer;
1576
1577 uint16_t response_q_length;
1578 uint16_t request_q_length;
1579
1580 uint16_t reserved_3;
1581
1582 uint16_t prio_request_q_length;
1583
1584 uint32_t request_q_address[2];
1585 uint32_t response_q_address[2];
1586 uint32_t prio_request_q_address[2];
1587
1588 uint8_t reserved_4[8];
1589
1590 uint16_t atio_q_inpointer;
1591 uint16_t atio_q_length;
1592 uint32_t atio_q_address[2];
1593
1594 uint16_t interrupt_delay_timer; /* 100us increments. */
1595 uint16_t login_timeout;
1596
1597 /*
1598 * BIT 0-3 = Reserved
1599 * BIT 4 = Enable Target Mode
1600 * BIT 5 = Disable Initiator Mode
1601 * BIT 6 = Reserved
1602 * BIT 7 = Reserved
1603 *
1604 * BIT 8-13 = Reserved
1605 * BIT 14 = Node Name Option
1606 * BIT 15-31 = Reserved
1607 */
1608 uint32_t firmware_options_1;
1609
1610 /*
1611 * BIT 0 = Operation Mode bit 0
1612 * BIT 1 = Operation Mode bit 1
1613 * BIT 2 = Operation Mode bit 2
1614 * BIT 3 = Operation Mode bit 3
1615 * BIT 4-7 = Reserved
1616 *
1617 * BIT 8 = Enable Class 2
1618 * BIT 9 = Enable ACK0
1619 * BIT 10 = Reserved
1620 * BIT 11 = Enable FC-SP Security
1621 * BIT 12 = FC Tape Enable
1622 * BIT 13 = Reserved
1623 * BIT 14 = Enable Target PRLI Control
1624 * BIT 15-31 = Reserved
1625 */
1626 uint32_t firmware_options_2;
1627
1628 /*
1629 * BIT 0-3 = Reserved
1630 * BIT 4 = FCP RSP Payload bit 0
1631 * BIT 5 = FCP RSP Payload bit 1
1632 * BIT 6 = Enable Receive Out-of-Order data frame handling
1633 * BIT 7 = Reserved
1634 *
1635 * BIT 8 = Reserved
1636 * BIT 9 = Enable Out-of-Order FCP_XFER_RDY relative offset handling
1637 * BIT 10-16 = Reserved
1638 * BIT 17 = Enable multiple FCFs
1639 * BIT 18-20 = MAC addressing mode
1640 * BIT 21-25 = Ethernet data rate
1641 * BIT 26 = Enable ethernet header rx IOCB for ATIO q
1642 * BIT 27 = Enable ethernet header rx IOCB for response q
1643 * BIT 28 = SPMA selection bit 0
1644 * BIT 28 = SPMA selection bit 1
1645 * BIT 30-31 = Reserved
1646 */
1647 uint32_t firmware_options_3;
1648
1649 uint8_t reserved_5[8];
1650
1651 uint8_t enode_mac[6];
1652
1653 uint8_t reserved_6[10];
1654};
1655
1656struct mid_init_cb_81xx {
1657 struct init_cb_81xx init_cb;
1658
1659 uint16_t count;
1660 uint16_t options;
1661
1662 struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC];
1663};
1664
1665#define FARX_ACCESS_FLASH_CONF_81XX 0x7FFD0000
1666#define FARX_ACCESS_FLASH_DATA_81XX 0x7F800000
1667
1668/* 81XX Flash locations -- occupies second 2MB region. */
1669#define FA_BOOT_CODE_ADDR_81 0x80000
1670#define FA_RISC_CODE_ADDR_81 0xA0000
1671#define FA_FW_AREA_ADDR_81 0xC0000
1672#define FA_VPD_NVRAM_ADDR_81 0xD0000
1673#define FA_FEATURE_ADDR_81 0xD4000
1674#define FA_FLASH_DESCR_ADDR_81 0xD8000
1675#define FA_FLASH_LAYOUT_ADDR_81 0xD8400
1676#define FA_HW_EVENT0_ADDR_81 0xDC000
1677#define FA_HW_EVENT1_ADDR_81 0xDC400
1678#define FA_NPIV_CONF0_ADDR_81 0xD1000
1679#define FA_NPIV_CONF1_ADDR_81 0xD2000
1680
1393#endif 1681#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0011e31205db..ba4913353752 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -28,8 +28,10 @@ extern void qla2x00_reset_adapter(struct scsi_qla_host *);
28extern void qla24xx_reset_adapter(struct scsi_qla_host *); 28extern void qla24xx_reset_adapter(struct scsi_qla_host *);
29extern int qla2x00_nvram_config(struct scsi_qla_host *); 29extern int qla2x00_nvram_config(struct scsi_qla_host *);
30extern int qla24xx_nvram_config(struct scsi_qla_host *); 30extern int qla24xx_nvram_config(struct scsi_qla_host *);
31extern int qla81xx_nvram_config(struct scsi_qla_host *);
31extern void qla2x00_update_fw_options(struct scsi_qla_host *); 32extern void qla2x00_update_fw_options(struct scsi_qla_host *);
32extern void qla24xx_update_fw_options(scsi_qla_host_t *); 33extern void qla24xx_update_fw_options(scsi_qla_host_t *);
34extern void qla81xx_update_fw_options(scsi_qla_host_t *);
33extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *); 35extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *);
34extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *); 36extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *);
35 37
@@ -69,8 +71,6 @@ extern int qla2x00_loop_reset(scsi_qla_host_t *);
69extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 71extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
70extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum 72extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
71 fc_host_event_code, u32); 73 fc_host_event_code, u32);
72extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t,
73 uint16_t, uint16_t);
74 74
75extern void qla2x00_abort_fcport_cmds(fc_port_t *); 75extern void qla2x00_abort_fcport_cmds(fc_port_t *);
76extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *, 76extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
@@ -143,7 +143,7 @@ qla2x00_execute_fw(scsi_qla_host_t *, uint32_t);
143 143
144extern void 144extern void
145qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, 145qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *,
146 uint16_t *, uint16_t *, uint16_t *, uint32_t *); 146 uint16_t *, uint16_t *, uint16_t *, uint32_t *, uint8_t *, uint32_t *);
147 147
148extern int 148extern int
149qla2x00_get_fw_options(scsi_qla_host_t *, uint16_t *); 149qla2x00_get_fw_options(scsi_qla_host_t *, uint16_t *);
@@ -317,9 +317,6 @@ extern uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
317extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *); 317extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *);
318extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *); 318extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
319 319
320extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t,
321 uint16_t, uint16_t);
322
323extern int qla2xxx_get_flash_info(scsi_qla_host_t *); 320extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
324extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t); 321extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
325 322
@@ -332,6 +329,7 @@ extern void qla2100_fw_dump(scsi_qla_host_t *, int);
332extern void qla2300_fw_dump(scsi_qla_host_t *, int); 329extern void qla2300_fw_dump(scsi_qla_host_t *, int);
333extern void qla24xx_fw_dump(scsi_qla_host_t *, int); 330extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
334extern void qla25xx_fw_dump(scsi_qla_host_t *, int); 331extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
332extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
335extern void qla2x00_dump_regs(scsi_qla_host_t *); 333extern void qla2x00_dump_regs(scsi_qla_host_t *);
336extern void qla2x00_dump_buffer(uint8_t *, uint32_t); 334extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
337 335
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 0a6f72973996..557f58d5bf88 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1535,7 +1535,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1535 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1535 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1536 eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED); 1536 eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1537 eiter->len = __constant_cpu_to_be16(4 + 4); 1537 eiter->len = __constant_cpu_to_be16(4 + 4);
1538 if (IS_QLA25XX(ha)) 1538 if (IS_QLA81XX(ha))
1539 eiter->a.sup_speed = __constant_cpu_to_be32(
1540 FDMI_PORT_SPEED_10GB);
1541 else if (IS_QLA25XX(ha))
1539 eiter->a.sup_speed = __constant_cpu_to_be32( 1542 eiter->a.sup_speed = __constant_cpu_to_be32(
1540 FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB| 1543 FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
1541 FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_8GB); 1544 FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_8GB);
@@ -1575,6 +1578,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1575 eiter->a.cur_speed = 1578 eiter->a.cur_speed =
1576 __constant_cpu_to_be32(FDMI_PORT_SPEED_8GB); 1579 __constant_cpu_to_be32(FDMI_PORT_SPEED_8GB);
1577 break; 1580 break;
1581 case PORT_SPEED_10GB:
1582 eiter->a.cur_speed =
1583 __constant_cpu_to_be32(FDMI_PORT_SPEED_10GB);
1584 break;
1578 default: 1585 default:
1579 eiter->a.cur_speed = 1586 eiter->a.cur_speed =
1580 __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN); 1587 __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 52ed56ecf195..2d4f32b4df5c 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -552,10 +552,6 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
552 d2 = RD_REG_DWORD(&reg->ctrl_status); 552 d2 = RD_REG_DWORD(&reg->ctrl_status);
553 barrier(); 553 barrier();
554 } 554 }
555 if (cnt == 0 || hw_evt)
556 qla2xxx_hw_event_log(vha, HW_EVENT_RESET_ERR,
557 RD_REG_WORD(&reg->mailbox1), RD_REG_WORD(&reg->mailbox2),
558 RD_REG_WORD(&reg->mailbox3));
559 555
560 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET); 556 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
561 RD_REG_DWORD(&reg->hccr); 557 RD_REG_DWORD(&reg->hccr);
@@ -574,6 +570,9 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
574 } 570 }
575 571
576 spin_unlock_irqrestore(&ha->hardware_lock, flags); 572 spin_unlock_irqrestore(&ha->hardware_lock, flags);
573
574 if (IS_NOPOLLING_TYPE(ha))
575 ha->isp_ops->enable_intrs(ha);
577} 576}
578 577
579/** 578/**
@@ -779,16 +778,19 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
779 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 778 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
780 sizeof(uint16_t); 779 sizeof(uint16_t);
781 } else if (IS_FWI2_CAPABLE(ha)) { 780 } else if (IS_FWI2_CAPABLE(ha)) {
782 fixed_size = IS_QLA25XX(ha) ? 781 if (IS_QLA81XX(ha))
783 offsetof(struct qla25xx_fw_dump, ext_mem) : 782 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
784 offsetof(struct qla24xx_fw_dump, ext_mem); 783 else if (IS_QLA25XX(ha))
784 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
785 else
786 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
785 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 787 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
786 sizeof(uint32_t); 788 sizeof(uint32_t);
787 if (ha->mqenable) 789 if (ha->mqenable)
788 mq_size = sizeof(struct qla2xxx_mq_chain); 790 mq_size = sizeof(struct qla2xxx_mq_chain);
789 791
790 /* Allocate memory for Fibre Channel Event Buffer. */ 792 /* Allocate memory for Fibre Channel Event Buffer. */
791 if (!IS_QLA25XX(ha)) 793 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
792 goto try_eft; 794 goto try_eft;
793 795
794 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 796 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
@@ -851,7 +853,9 @@ cont_alloc:
851 853
852 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 854 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
853 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + 855 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size +
854 mq_size + eft_size + fce_size; 856 eft_size;
857 ha->chain_offset = dump_size;
858 dump_size += mq_size + fce_size;
855 859
856 ha->fw_dump = vmalloc(dump_size); 860 ha->fw_dump = vmalloc(dump_size);
857 if (!ha->fw_dump) { 861 if (!ha->fw_dump) {
@@ -987,7 +991,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
987 &ha->fw_major_version, 991 &ha->fw_major_version,
988 &ha->fw_minor_version, 992 &ha->fw_minor_version,
989 &ha->fw_subminor_version, 993 &ha->fw_subminor_version,
990 &ha->fw_attributes, &ha->fw_memory_size); 994 &ha->fw_attributes, &ha->fw_memory_size,
995 ha->mpi_version, &ha->mpi_capabilities);
991 ha->flags.npiv_supported = 0; 996 ha->flags.npiv_supported = 0;
992 if (IS_QLA2XXX_MIDTYPE(ha) && 997 if (IS_QLA2XXX_MIDTYPE(ha) &&
993 (ha->fw_attributes & BIT_2)) { 998 (ha->fw_attributes & BIT_2)) {
@@ -1665,10 +1670,6 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
1665 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 1670 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
1666 "invalid -- WWPN) defaults.\n"); 1671 "invalid -- WWPN) defaults.\n");
1667 1672
1668 if (chksum)
1669 qla2xxx_hw_event_log(vha, HW_EVENT_NVRAM_CHKSUM_ERR, 0,
1670 MSW(chksum), LSW(chksum));
1671
1672 /* 1673 /*
1673 * Set default initialization control block. 1674 * Set default initialization control block.
1674 */ 1675 */
@@ -4255,3 +4256,269 @@ qla84xx_init_chip(scsi_qla_host_t *vha)
4255 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED: 4256 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
4256 QLA_SUCCESS; 4257 QLA_SUCCESS;
4257} 4258}
4259
4260/* 81XX Support **************************************************************/
4261
4262int
4263qla81xx_nvram_config(scsi_qla_host_t *vha)
4264{
4265 int rval;
4266 struct init_cb_81xx *icb;
4267 struct nvram_81xx *nv;
4268 uint32_t *dptr;
4269 uint8_t *dptr1, *dptr2;
4270 uint32_t chksum;
4271 uint16_t cnt;
4272 struct qla_hw_data *ha = vha->hw;
4273
4274 rval = QLA_SUCCESS;
4275 icb = (struct init_cb_81xx *)ha->init_cb;
4276 nv = ha->nvram;
4277
4278 /* Determine NVRAM starting address. */
4279 ha->nvram_size = sizeof(struct nvram_81xx);
4280 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
4281 ha->vpd_size = FA_NVRAM_VPD_SIZE;
4282 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
4283 if (PCI_FUNC(ha->pdev->devfn) & 1) {
4284 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
4285 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
4286 }
4287
4288 /* Get VPD data into cache */
4289 ha->vpd = ha->nvram + VPD_OFFSET;
4290 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
4291 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
4292
4293 /* Get NVRAM data into cache and calculate checksum. */
4294 dptr = (uint32_t *)nv;
4295 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
4296 ha->nvram_size);
4297 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4298 chksum += le32_to_cpu(*dptr++);
4299
4300 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no));
4301 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
4302
4303 /* Bad NVRAM data, set defaults parameters. */
4304 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
4305 || nv->id[3] != ' ' ||
4306 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4307 /* Reset NVRAM data. */
4308 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
4309 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
4310 le16_to_cpu(nv->nvram_version));
4311 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
4312 "invalid -- WWPN) defaults.\n");
4313
4314 /*
4315 * Set default initialization control block.
4316 */
4317 memset(nv, 0, ha->nvram_size);
4318 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
4319 nv->version = __constant_cpu_to_le16(ICB_VERSION);
4320 nv->frame_payload_size = __constant_cpu_to_le16(2048);
4321 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4322 nv->exchange_count = __constant_cpu_to_le16(0);
4323 nv->port_name[0] = 0x21;
4324 nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn);
4325 nv->port_name[2] = 0x00;
4326 nv->port_name[3] = 0xe0;
4327 nv->port_name[4] = 0x8b;
4328 nv->port_name[5] = 0x1c;
4329 nv->port_name[6] = 0x55;
4330 nv->port_name[7] = 0x86;
4331 nv->node_name[0] = 0x20;
4332 nv->node_name[1] = 0x00;
4333 nv->node_name[2] = 0x00;
4334 nv->node_name[3] = 0xe0;
4335 nv->node_name[4] = 0x8b;
4336 nv->node_name[5] = 0x1c;
4337 nv->node_name[6] = 0x55;
4338 nv->node_name[7] = 0x86;
4339 nv->login_retry_count = __constant_cpu_to_le16(8);
4340 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
4341 nv->login_timeout = __constant_cpu_to_le16(0);
4342 nv->firmware_options_1 =
4343 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
4344 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
4345 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4346 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
4347 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
4348 nv->efi_parameters = __constant_cpu_to_le32(0);
4349 nv->reset_delay = 5;
4350 nv->max_luns_per_target = __constant_cpu_to_le16(128);
4351 nv->port_down_retry_count = __constant_cpu_to_le16(30);
4352 nv->link_down_timeout = __constant_cpu_to_le16(30);
4353 nv->enode_mac[0] = 0x01;
4354 nv->enode_mac[1] = 0x02;
4355 nv->enode_mac[2] = 0x03;
4356 nv->enode_mac[3] = 0x04;
4357 nv->enode_mac[4] = 0x05;
4358 nv->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn);
4359
4360 rval = 1;
4361 }
4362
4363 /* Reset Initialization control block */
4364 memset(icb, 0, sizeof(struct init_cb_81xx));
4365
4366 /* Copy 1st segment. */
4367 dptr1 = (uint8_t *)icb;
4368 dptr2 = (uint8_t *)&nv->version;
4369 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
4370 while (cnt--)
4371 *dptr1++ = *dptr2++;
4372
4373 icb->login_retry_count = nv->login_retry_count;
4374
4375 /* Copy 2nd segment. */
4376 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
4377 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
4378 cnt = (uint8_t *)&icb->reserved_5 -
4379 (uint8_t *)&icb->interrupt_delay_timer;
4380 while (cnt--)
4381 *dptr1++ = *dptr2++;
4382
4383 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
4384 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
4385 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
4386 icb->enode_mac[0] = 0x01;
4387 icb->enode_mac[1] = 0x02;
4388 icb->enode_mac[2] = 0x03;
4389 icb->enode_mac[3] = 0x04;
4390 icb->enode_mac[4] = 0x05;
4391 icb->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn);
4392 }
4393
4394 /*
4395 * Setup driver NVRAM options.
4396 */
4397 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
4398 "QLE81XX");
4399
4400 /* Use alternate WWN? */
4401 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
4402 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4403 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4404 }
4405
4406 /* Prepare nodename */
4407 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
4408 /*
4409 * Firmware will apply the following mask if the nodename was
4410 * not provided.
4411 */
4412 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4413 icb->node_name[0] &= 0xF0;
4414 }
4415
4416 /* Set host adapter parameters. */
4417 ha->flags.disable_risc_code_load = 0;
4418 ha->flags.enable_lip_reset = 0;
4419 ha->flags.enable_lip_full_login =
4420 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
4421 ha->flags.enable_target_reset =
4422 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
4423 ha->flags.enable_led_scheme = 0;
4424 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
4425
4426 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
4427 (BIT_6 | BIT_5 | BIT_4)) >> 4;
4428
4429 /* save HBA serial number */
4430 ha->serial0 = icb->port_name[5];
4431 ha->serial1 = icb->port_name[6];
4432 ha->serial2 = icb->port_name[7];
4433 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4434 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
4435
4436 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4437
4438 ha->retry_count = le16_to_cpu(nv->login_retry_count);
4439
4440 /* Set minimum login_timeout to 4 seconds. */
4441 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
4442 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
4443 if (le16_to_cpu(nv->login_timeout) < 4)
4444 nv->login_timeout = __constant_cpu_to_le16(4);
4445 ha->login_timeout = le16_to_cpu(nv->login_timeout);
4446 icb->login_timeout = nv->login_timeout;
4447
4448 /* Set minimum RATOV to 100 tenths of a second. */
4449 ha->r_a_tov = 100;
4450
4451 ha->loop_reset_delay = nv->reset_delay;
4452
4453 /* Link Down Timeout = 0:
4454 *
4455 * When Port Down timer expires we will start returning
4456 * I/O's to OS with "DID_NO_CONNECT".
4457 *
4458 * Link Down Timeout != 0:
4459 *
4460 * The driver waits for the link to come up after link down
4461 * before returning I/Os to OS with "DID_NO_CONNECT".
4462 */
4463 if (le16_to_cpu(nv->link_down_timeout) == 0) {
4464 ha->loop_down_abort_time =
4465 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
4466 } else {
4467 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
4468 ha->loop_down_abort_time =
4469 (LOOP_DOWN_TIME - ha->link_down_timeout);
4470 }
4471
4472 /* Need enough time to try and get the port back. */
4473 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
4474 if (qlport_down_retry)
4475 ha->port_down_retry_count = qlport_down_retry;
4476
4477 /* Set login_retry_count */
4478 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
4479 if (ha->port_down_retry_count ==
4480 le16_to_cpu(nv->port_down_retry_count) &&
4481 ha->port_down_retry_count > 3)
4482 ha->login_retry_count = ha->port_down_retry_count;
4483 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4484 ha->login_retry_count = ha->port_down_retry_count;
4485 if (ql2xloginretrycount)
4486 ha->login_retry_count = ql2xloginretrycount;
4487
4488 /* Enable ZIO. */
4489 if (!vha->flags.init_done) {
4490 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
4491 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4492 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
4493 le16_to_cpu(icb->interrupt_delay_timer): 2;
4494 }
4495 icb->firmware_options_2 &= __constant_cpu_to_le32(
4496 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
4497 vha->flags.process_response_queue = 0;
4498 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4499 ha->zio_mode = QLA_ZIO_MODE_6;
4500
4501 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
4502 "(%d us).\n", vha->host_no, ha->zio_mode,
4503 ha->zio_timer * 100));
4504 qla_printk(KERN_INFO, ha,
4505 "ZIO mode %d enabled; timer delay (%d us).\n",
4506 ha->zio_mode, ha->zio_timer * 100);
4507
4508 icb->firmware_options_2 |= cpu_to_le32(
4509 (uint32_t)ha->zio_mode);
4510 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
4511 vha->flags.process_response_queue = 1;
4512 }
4513
4514 if (rval) {
4515 DEBUG2_3(printk(KERN_WARNING
4516 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
4517 }
4518 return (rval);
4519}
4520
4521void
4522qla81xx_update_fw_options(scsi_qla_host_t *ha)
4523{
4524}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 5bedc9d05942..2258152b1f41 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -173,7 +173,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
173 return; 173 return;
174 } 174 }
175 175
176 vha = sp->vha; 176 vha = sp->fcport->vha;
177 req = sp->que; 177 req = sp->que;
178 178
179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
@@ -234,7 +234,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
234 return; 234 return;
235 } 235 }
236 236
237 vha = sp->vha; 237 vha = sp->fcport->vha;
238 req = sp->que; 238 req = sp->que;
239 239
240 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 240 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
@@ -294,7 +294,7 @@ qla2x00_start_scsi(srb_t *sp)
294 294
295 /* Setup device pointers. */ 295 /* Setup device pointers. */
296 ret = 0; 296 ret = 0;
297 vha = sp->vha; 297 vha = sp->fcport->vha;
298 ha = vha->hw; 298 ha = vha->hw;
299 reg = &ha->iobase->isp; 299 reg = &ha->iobase->isp;
300 cmd = sp->cmd; 300 cmd = sp->cmd;
@@ -353,7 +353,6 @@ qla2x00_start_scsi(srb_t *sp)
353 /* Build command packet */ 353 /* Build command packet */
354 req->current_outstanding_cmd = handle; 354 req->current_outstanding_cmd = handle;
355 req->outstanding_cmds[handle] = sp; 355 req->outstanding_cmds[handle] = sp;
356 sp->vha = vha;
357 sp->que = req; 356 sp->que = req;
358 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 357 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
359 req->cnt -= req_cnt; 358 req->cnt -= req_cnt;
@@ -656,7 +655,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
656 return; 655 return;
657 } 656 }
658 657
659 vha = sp->vha; 658 vha = sp->fcport->vha;
660 req = sp->que; 659 req = sp->que;
661 660
662 /* Set transfer direction */ 661 /* Set transfer direction */
@@ -723,7 +722,7 @@ qla24xx_start_scsi(srb_t *sp)
723 struct req_que *req = NULL; 722 struct req_que *req = NULL;
724 struct rsp_que *rsp = NULL; 723 struct rsp_que *rsp = NULL;
725 struct scsi_cmnd *cmd = sp->cmd; 724 struct scsi_cmnd *cmd = sp->cmd;
726 struct scsi_qla_host *vha = sp->vha; 725 struct scsi_qla_host *vha = sp->fcport->vha;
727 struct qla_hw_data *ha = vha->hw; 726 struct qla_hw_data *ha = vha->hw;
728 uint16_t que_id; 727 uint16_t que_id;
729 728
@@ -791,7 +790,6 @@ qla24xx_start_scsi(srb_t *sp)
791 /* Build command packet. */ 790 /* Build command packet. */
792 req->current_outstanding_cmd = handle; 791 req->current_outstanding_cmd = handle;
793 req->outstanding_cmds[handle] = sp; 792 req->outstanding_cmds[handle] = sp;
794 sp->vha = vha;
795 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 793 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
796 req->cnt -= req_cnt; 794 req->cnt -= req_cnt;
797 795
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d5fb79a88001..789fc576f222 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -275,7 +275,7 @@ void
275qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 275qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
276{ 276{
277#define LS_UNKNOWN 2 277#define LS_UNKNOWN 2
278 static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; 278 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
279 char *link_speed; 279 char *link_speed;
280 uint16_t handle_cnt; 280 uint16_t handle_cnt;
281 uint16_t cnt; 281 uint16_t cnt;
@@ -288,6 +288,8 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
288 288
289 /* Setup to process RIO completion. */ 289 /* Setup to process RIO completion. */
290 handle_cnt = 0; 290 handle_cnt = 0;
291 if (IS_QLA81XX(ha))
292 goto skip_rio;
291 switch (mb[0]) { 293 switch (mb[0]) {
292 case MBA_SCSI_COMPLETION: 294 case MBA_SCSI_COMPLETION:
293 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 295 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
@@ -339,7 +341,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
339 default: 341 default:
340 break; 342 break;
341 } 343 }
342 344skip_rio:
343 switch (mb[0]) { 345 switch (mb[0]) {
344 case MBA_SCSI_COMPLETION: /* Fast Post */ 346 case MBA_SCSI_COMPLETION: /* Fast Post */
345 if (!vha->flags.online) 347 if (!vha->flags.online)
@@ -362,7 +364,6 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
362 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", 364 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
363 mb[1], mb[2], mb[3]); 365 mb[1], mb[2], mb[3]);
364 366
365 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
366 ha->isp_ops->fw_dump(vha, 1); 367 ha->isp_ops->fw_dump(vha, 1);
367 368
368 if (IS_FWI2_CAPABLE(ha)) { 369 if (IS_FWI2_CAPABLE(ha)) {
@@ -387,7 +388,6 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
387 vha->host_no)); 388 vha->host_no));
388 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n"); 389 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
389 390
390 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
391 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 391 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
392 break; 392 break;
393 393
@@ -396,7 +396,6 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
396 vha->host_no)); 396 vha->host_no));
397 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n"); 397 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
398 398
399 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
400 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 399 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
401 break; 400 break;
402 401
@@ -436,6 +435,8 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
436 link_speed = link_speeds[LS_UNKNOWN]; 435 link_speed = link_speeds[LS_UNKNOWN];
437 if (mb[1] < 5) 436 if (mb[1] < 5)
438 link_speed = link_speeds[mb[1]]; 437 link_speed = link_speeds[mb[1]];
438 else if (mb[1] == 0x13)
439 link_speed = link_speeds[5];
439 ha->link_data_rate = mb[1]; 440 ha->link_data_rate = mb[1];
440 } 441 }
441 442
@@ -495,12 +496,17 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
495 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 496 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
496 break; 497 break;
497 498
499 /* case MBA_DCBX_COMPLETE: */
498 case MBA_POINT_TO_POINT: /* Point-to-Point */ 500 case MBA_POINT_TO_POINT: /* Point-to-Point */
499 if (IS_QLA2100(ha)) 501 if (IS_QLA2100(ha))
500 break; 502 break;
501 503
502 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n", 504 if (IS_QLA81XX(ha))
503 vha->host_no)); 505 DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
506 "%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
507 else
508 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE "
509 "received.\n", vha->host_no));
504 510
505 /* 511 /*
506 * Until there's a transition from loop down to loop up, treat 512 * Until there's a transition from loop down to loop up, treat
@@ -641,10 +647,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
641 647
642 /* case MBA_RIO_RESPONSE: */ 648 /* case MBA_RIO_RESPONSE: */
643 case MBA_ZIO_RESPONSE: 649 case MBA_ZIO_RESPONSE:
644 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n", 650 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n",
645 vha->host_no));
646 DEBUG(printk(KERN_INFO
647 "scsi(%ld): [R|Z]IO update completion.\n",
648 vha->host_no)); 651 vha->host_no));
649 652
650 if (IS_FWI2_CAPABLE(ha)) 653 if (IS_FWI2_CAPABLE(ha))
@@ -698,6 +701,35 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
698 } 701 }
699 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 702 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
700 break; 703 break;
704 case MBA_DCBX_START:
705 DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n",
706 vha->host_no, mb[1], mb[2], mb[3]));
707 break;
708 case MBA_DCBX_PARAM_UPDATE:
709 DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- "
710 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
711 break;
712 case MBA_FCF_CONF_ERR:
713 DEBUG2(printk("scsi(%ld): FCF Configuration Error -- "
714 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
715 break;
716 case MBA_IDC_COMPLETE:
717 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation "
718 "Complete -- %04x %04x %04x\n", vha->host_no, mb[1], mb[2],
719 mb[3]));
720 break;
721 case MBA_IDC_NOTIFY:
722 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation "
723 "Request Notification -- %04x %04x %04x\n", vha->host_no,
724 mb[1], mb[2], mb[3]));
725 /**** Mailbox registers 4 - 7 valid!!! */
726 break;
727 case MBA_IDC_TIME_EXT:
728 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation "
729 "Time Extension -- %04x %04x %04x\n", vha->host_no, mb[1],
730 mb[2], mb[3]));
731 /**** Mailbox registers 4 - 7 valid!!! */
732 break;
701 } 733 }
702 734
703 if (!vha->vp_idx && ha->num_vhosts) 735 if (!vha->vp_idx && ha->num_vhosts)
@@ -1510,7 +1542,7 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
1510 struct qla_hw_data *ha = vha->hw; 1542 struct qla_hw_data *ha = vha->hw;
1511 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1543 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1512 1544
1513 if (!IS_QLA25XX(ha)) 1545 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
1514 return; 1546 return;
1515 1547
1516 rval = QLA_SUCCESS; 1548 rval = QLA_SUCCESS;
@@ -1590,12 +1622,6 @@ qla24xx_intr_handler(int irq, void *dev_id)
1590 if (pci_channel_offline(ha->pdev)) 1622 if (pci_channel_offline(ha->pdev))
1591 break; 1623 break;
1592 1624
1593 if (ha->hw_event_pause_errors == 0)
1594 qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
1595 0, MSW(stat), LSW(stat));
1596 else if (ha->hw_event_pause_errors < 0xffffffff)
1597 ha->hw_event_pause_errors++;
1598
1599 hccr = RD_REG_DWORD(&reg->hccr); 1625 hccr = RD_REG_DWORD(&reg->hccr);
1600 1626
1601 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1627 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
@@ -1740,12 +1766,6 @@ qla24xx_msix_default(int irq, void *dev_id)
1740 if (pci_channel_offline(ha->pdev)) 1766 if (pci_channel_offline(ha->pdev))
1741 break; 1767 break;
1742 1768
1743 if (ha->hw_event_pause_errors == 0)
1744 qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
1745 0, MSW(stat), LSW(stat));
1746 else if (ha->hw_event_pause_errors < 0xffffffff)
1747 ha->hw_event_pause_errors++;
1748
1749 hccr = RD_REG_DWORD(&reg->hccr); 1769 hccr = RD_REG_DWORD(&reg->hccr);
1750 1770
1751 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1771 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
@@ -1944,7 +1964,8 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
1944 device_reg_t __iomem *reg = ha->iobase; 1964 device_reg_t __iomem *reg = ha->iobase;
1945 1965
1946 /* If possible, enable MSI-X. */ 1966 /* If possible, enable MSI-X. */
1947 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha)) 1967 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
1968 !IS_QLA8432(ha) && !IS_QLA8001(ha))
1948 goto skip_msix; 1969 goto skip_msix;
1949 1970
1950 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || 1971 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
@@ -1979,7 +2000,8 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
1979 "MSI-X: Falling back-to INTa mode -- %d.\n", ret); 2000 "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
1980skip_msix: 2001skip_msix:
1981 2002
1982 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha)) 2003 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2004 !IS_QLA8001(ha))
1983 goto skip_msi; 2005 goto skip_msi;
1984 2006
1985 ret = pci_enable_msi(ha->pdev); 2007 ret = pci_enable_msi(ha->pdev);
@@ -2000,6 +2022,12 @@ skip_msi:
2000 ha->flags.inta_enabled = 1; 2022 ha->flags.inta_enabled = 1;
2001clear_risc_ints: 2023clear_risc_ints:
2002 2024
2025 /*
2026 * FIXME: Noted that 8014s were being dropped during NK testing.
2027 * Timing deltas during MSI-X/INTa transitions?
2028 */
2029 if (IS_QLA81XX(ha))
2030 goto fail;
2003 spin_lock_irq(&ha->hardware_lock); 2031 spin_lock_irq(&ha->hardware_lock);
2004 if (IS_FWI2_CAPABLE(ha)) { 2032 if (IS_FWI2_CAPABLE(ha)) {
2005 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT); 2033 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
@@ -2044,7 +2072,7 @@ qla2x00_get_rsp_host(struct rsp_que *rsp)
2044 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) { 2072 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2045 sp = req->outstanding_cmds[pkt->handle]; 2073 sp = req->outstanding_cmds[pkt->handle];
2046 if (sp) 2074 if (sp)
2047 vha = sp->vha; 2075 vha = sp->fcport->vha;
2048 } 2076 }
2049 } 2077 }
2050 if (!vha) 2078 if (!vha)
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index a99976f5fabd..db4df45234a5 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -123,8 +123,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
123 123
124 /* Wait for mbx cmd completion until timeout */ 124 /* Wait for mbx cmd completion until timeout */
125 125
126 if (!abort_active && io_lock_on) { 126 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
127
128 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 127 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
129 128
130 if (IS_FWI2_CAPABLE(ha)) 129 if (IS_FWI2_CAPABLE(ha))
@@ -218,7 +217,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
218 /* Clean up */ 217 /* Clean up */
219 ha->mcp = NULL; 218 ha->mcp = NULL;
220 219
221 if (abort_active || !io_lock_on) { 220 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
222 DEBUG11(printk("%s(%ld): checking for additional resp " 221 DEBUG11(printk("%s(%ld): checking for additional resp "
223 "interrupt.\n", __func__, base_vha->host_no)); 222 "interrupt.\n", __func__, base_vha->host_no));
224 223
@@ -412,7 +411,8 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
412 */ 411 */
413void 412void
414qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, 413qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
415 uint16_t *subminor, uint16_t *attributes, uint32_t *memory) 414 uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi,
415 uint32_t *mpi_caps)
416{ 416{
417 int rval; 417 int rval;
418 mbx_cmd_t mc; 418 mbx_cmd_t mc;
@@ -423,6 +423,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
423 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 423 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
424 mcp->out_mb = MBX_0; 424 mcp->out_mb = MBX_0;
425 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 425 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
426 if (IS_QLA81XX(vha->hw))
427 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
426 mcp->flags = 0; 428 mcp->flags = 0;
427 mcp->tov = MBX_TOV_SECONDS; 429 mcp->tov = MBX_TOV_SECONDS;
428 rval = qla2x00_mailbox_command(vha, mcp); 430 rval = qla2x00_mailbox_command(vha, mcp);
@@ -436,6 +438,13 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
436 *memory = 0x1FFFF; /* Defaults to 128KB. */ 438 *memory = 0x1FFFF; /* Defaults to 128KB. */
437 else 439 else
438 *memory = (mcp->mb[5] << 16) | mcp->mb[4]; 440 *memory = (mcp->mb[5] << 16) | mcp->mb[4];
441 if (IS_QLA81XX(vha->hw)) {
442 mpi[0] = mcp->mb[10] >> 8;
443 mpi[1] = mcp->mb[10] & 0xff;
444 mpi[2] = mcp->mb[11] >> 8;
445 mpi[3] = mcp->mb[11] & 0xff;
446 *mpi_caps = (mcp->mb[12] << 16) | mcp->mb[13];
447 }
439 448
440 if (rval != QLA_SUCCESS) { 449 if (rval != QLA_SUCCESS) {
441 /*EMPTY*/ 450 /*EMPTY*/
@@ -568,7 +577,6 @@ int
568qla2x00_mbx_reg_test(scsi_qla_host_t *vha) 577qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
569{ 578{
570 int rval; 579 int rval;
571 struct qla_hw_data *ha = vha->hw;
572 mbx_cmd_t mc; 580 mbx_cmd_t mc;
573 mbx_cmd_t *mcp = &mc; 581 mbx_cmd_t *mcp = &mc;
574 582
@@ -595,14 +603,6 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
595 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || 603 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
596 mcp->mb[7] != 0x2525) 604 mcp->mb[7] != 0x2525)
597 rval = QLA_FUNCTION_FAILED; 605 rval = QLA_FUNCTION_FAILED;
598 if (rval == QLA_FUNCTION_FAILED) {
599 struct device_reg_24xx __iomem *reg =
600 &ha->iobase->isp24;
601
602 qla2xxx_hw_event_log(vha, HW_EVENT_ISP_ERR, 0,
603 LSW(RD_REG_DWORD(&reg->hccr)),
604 LSW(RD_REG_DWORD(&reg->istatus)));
605 }
606 } 606 }
607 607
608 if (rval != QLA_SUCCESS) { 608 if (rval != QLA_SUCCESS) {
@@ -1363,7 +1363,13 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
1363 1363
1364 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1364 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1365 1365
1366 if (IS_FWI2_CAPABLE(vha->hw)) { 1366 if (IS_QLA81XX(vha->hw)) {
1367 /* Logout across all FCFs. */
1368 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1369 mcp->mb[1] = BIT_1;
1370 mcp->mb[2] = 0;
1371 mcp->out_mb = MBX_2|MBX_1|MBX_0;
1372 } else if (IS_FWI2_CAPABLE(vha->hw)) {
1367 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1373 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1368 mcp->mb[1] = BIT_6; 1374 mcp->mb[1] = BIT_6;
1369 mcp->mb[2] = 0; 1375 mcp->mb[2] = 0;
@@ -1853,6 +1859,9 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
1853 mbx_cmd_t mc; 1859 mbx_cmd_t mc;
1854 mbx_cmd_t *mcp = &mc; 1860 mbx_cmd_t *mcp = &mc;
1855 1861
1862 if (IS_QLA81XX(vha->hw))
1863 return QLA_SUCCESS;
1864
1856 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", 1865 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
1857 vha->host_no)); 1866 vha->host_no));
1858 1867
@@ -2512,7 +2521,7 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2512 mbx_cmd_t mc; 2521 mbx_cmd_t mc;
2513 mbx_cmd_t *mcp = &mc; 2522 mbx_cmd_t *mcp = &mc;
2514 2523
2515 if (!IS_QLA25XX(vha->hw)) 2524 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
2516 return QLA_FUNCTION_FAILED; 2525 return QLA_FUNCTION_FAILED;
2517 2526
2518 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2527 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
@@ -3155,7 +3164,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp,
3155 mcp->mb[7] = LSW(MSD(rsp->dma)); 3164 mcp->mb[7] = LSW(MSD(rsp->dma));
3156 mcp->mb[5] = rsp->length; 3165 mcp->mb[5] = rsp->length;
3157 mcp->mb[11] = rsp->vp_idx; 3166 mcp->mb[11] = rsp->vp_idx;
3158 mcp->mb[14] = rsp->msix->vector; 3167 mcp->mb[14] = rsp->msix->entry;
3159 mcp->mb[13] = rsp->rid; 3168 mcp->mb[13] = rsp->rid;
3160 3169
3161 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) + 3170 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 386ffeae5b5a..886323130fcc 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -614,8 +614,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
614 req->vp_idx = vp_idx; 614 req->vp_idx = vp_idx;
615 req->qos = qos; 615 req->qos = qos;
616 616
617 if (ha->rsp_q_map[rsp_que]) 617 if (ha->rsp_q_map[rsp_que]) {
618 req->rsp = ha->rsp_q_map[rsp_que]; 618 req->rsp = ha->rsp_q_map[rsp_que];
619 req->rsp->req = req;
620 }
619 /* Use alternate PCI bus number */ 621 /* Use alternate PCI bus number */
620 if (MSB(req->rid)) 622 if (MSB(req->rid))
621 options |= BIT_4; 623 options |= BIT_4;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8ea927788b3f..4a71f522f925 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -404,26 +404,9 @@ static char *
404qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str) 404qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
405{ 405{
406 struct qla_hw_data *ha = vha->hw; 406 struct qla_hw_data *ha = vha->hw;
407 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
408 ha->fw_minor_version,
409 ha->fw_subminor_version);
410 407
411 if (ha->fw_attributes & BIT_0) 408 sprintf(str, "%d.%02d.%02d (%x)", ha->fw_major_version,
412 strcat(str, "[Class 2] "); 409 ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
413 if (ha->fw_attributes & BIT_1)
414 strcat(str, "[IP] ");
415 if (ha->fw_attributes & BIT_2)
416 strcat(str, "[Multi-ID] ");
417 if (ha->fw_attributes & BIT_3)
418 strcat(str, "[SB-2] ");
419 if (ha->fw_attributes & BIT_4)
420 strcat(str, "[T10 CRC] ");
421 if (ha->fw_attributes & BIT_5)
422 strcat(str, "[VI] ");
423 if (ha->fw_attributes & BIT_10)
424 strcat(str, "[84XX] ");
425 if (ha->fw_attributes & BIT_13)
426 strcat(str, "[Experimental]");
427 return str; 410 return str;
428} 411}
429 412
@@ -438,7 +421,6 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
438 if (!sp) 421 if (!sp)
439 return sp; 422 return sp;
440 423
441 sp->vha = vha;
442 sp->fcport = fcport; 424 sp->fcport = fcport;
443 sp->cmd = cmd; 425 sp->cmd = cmd;
444 sp->que = ha->req_q_map[0]; 426 sp->que = ha->req_q_map[0];
@@ -1182,7 +1164,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1182 continue; 1164 continue;
1183 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 1165 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1184 sp = req->outstanding_cmds[cnt]; 1166 sp = req->outstanding_cmds[cnt];
1185 if (sp && sp->vha == vha) { 1167 if (sp && sp->fcport->vha == vha) {
1186 req->outstanding_cmds[cnt] = NULL; 1168 req->outstanding_cmds[cnt] = NULL;
1187 sp->cmd->result = res; 1169 sp->cmd->result = res;
1188 qla2x00_sp_compl(ha, sp); 1170 qla2x00_sp_compl(ha, sp);
@@ -1329,6 +1311,8 @@ qla24xx_disable_intrs(struct qla_hw_data *ha)
1329 unsigned long flags = 0; 1311 unsigned long flags = 0;
1330 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1312 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1331 1313
1314 if (IS_NOPOLLING_TYPE(ha))
1315 return;
1332 spin_lock_irqsave(&ha->hardware_lock, flags); 1316 spin_lock_irqsave(&ha->hardware_lock, flags);
1333 ha->interrupts_on = 0; 1317 ha->interrupts_on = 0;
1334 WRT_REG_DWORD(&reg->ictrl, 0); 1318 WRT_REG_DWORD(&reg->ictrl, 0);
@@ -1488,6 +1472,44 @@ static struct isp_operations qla25xx_isp_ops = {
1488 .rd_req_reg = qla24xx_rd_req_reg, 1472 .rd_req_reg = qla24xx_rd_req_reg,
1489}; 1473};
1490 1474
1475static struct isp_operations qla81xx_isp_ops = {
1476 .pci_config = qla25xx_pci_config,
1477 .reset_chip = qla24xx_reset_chip,
1478 .chip_diag = qla24xx_chip_diag,
1479 .config_rings = qla24xx_config_rings,
1480 .reset_adapter = qla24xx_reset_adapter,
1481 .nvram_config = qla81xx_nvram_config,
1482 .update_fw_options = qla81xx_update_fw_options,
1483 .load_risc = qla24xx_load_risc,
1484 .pci_info_str = qla24xx_pci_info_str,
1485 .fw_version_str = qla24xx_fw_version_str,
1486 .intr_handler = qla24xx_intr_handler,
1487 .enable_intrs = qla24xx_enable_intrs,
1488 .disable_intrs = qla24xx_disable_intrs,
1489 .abort_command = qla24xx_abort_command,
1490 .target_reset = qla24xx_abort_target,
1491 .lun_reset = qla24xx_lun_reset,
1492 .fabric_login = qla24xx_login_fabric,
1493 .fabric_logout = qla24xx_fabric_logout,
1494 .calc_req_entries = NULL,
1495 .build_iocbs = NULL,
1496 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1497 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
1498 .read_nvram = qla25xx_read_nvram_data,
1499 .write_nvram = qla25xx_write_nvram_data,
1500 .fw_dump = qla81xx_fw_dump,
1501 .beacon_on = qla24xx_beacon_on,
1502 .beacon_off = qla24xx_beacon_off,
1503 .beacon_blink = qla24xx_beacon_blink,
1504 .read_optrom = qla25xx_read_optrom_data,
1505 .write_optrom = qla24xx_write_optrom_data,
1506 .get_flash_version = qla24xx_get_flash_version,
1507 .start_scsi = qla24xx_start_scsi,
1508 .wrt_req_reg = qla24xx_wrt_req_reg,
1509 .wrt_rsp_reg = qla24xx_wrt_rsp_reg,
1510 .rd_req_reg = qla24xx_rd_req_reg,
1511};
1512
1491static inline void 1513static inline void
1492qla2x00_set_isp_flags(struct qla_hw_data *ha) 1514qla2x00_set_isp_flags(struct qla_hw_data *ha)
1493{ 1515{
@@ -1567,6 +1589,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
1567 ha->device_type |= DT_IIDMA; 1589 ha->device_type |= DT_IIDMA;
1568 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1590 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1569 break; 1591 break;
1592 case PCI_DEVICE_ID_QLOGIC_ISP8001:
1593 ha->device_type |= DT_ISP8001;
1594 ha->device_type |= DT_ZIO_SUPPORTED;
1595 ha->device_type |= DT_FWI2;
1596 ha->device_type |= DT_IIDMA;
1597 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1598 break;
1570 } 1599 }
1571} 1600}
1572 1601
@@ -1629,7 +1658,7 @@ skip_pio:
1629 1658
1630 /* Determine queue resources */ 1659 /* Determine queue resources */
1631 ha->max_queues = 1; 1660 ha->max_queues = 1;
1632 if (ql2xmaxqueues <= 1 || !IS_QLA25XX(ha)) 1661 if (ql2xmaxqueues <= 1 || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1633 goto mqiobase_exit; 1662 goto mqiobase_exit;
1634 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1663 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1635 pci_resource_len(ha->pdev, 3)); 1664 pci_resource_len(ha->pdev, 3));
@@ -1706,7 +1735,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1706 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 || 1735 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
1707 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || 1736 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
1708 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || 1737 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
1709 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532) { 1738 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
1739 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001) {
1710 bars = pci_select_bars(pdev, IORESOURCE_MEM); 1740 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1711 sht = &qla24xx_driver_template; 1741 sht = &qla24xx_driver_template;
1712 mem_only = 1; 1742 mem_only = 1;
@@ -1760,6 +1790,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1760 rsp_length = RESPONSE_ENTRY_CNT_2100; 1790 rsp_length = RESPONSE_ENTRY_CNT_2100;
1761 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 1791 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
1762 ha->gid_list_info_size = 4; 1792 ha->gid_list_info_size = 4;
1793 ha->flash_conf_off = ~0;
1794 ha->flash_data_off = ~0;
1795 ha->nvram_conf_off = ~0;
1796 ha->nvram_data_off = ~0;
1763 ha->isp_ops = &qla2100_isp_ops; 1797 ha->isp_ops = &qla2100_isp_ops;
1764 } else if (IS_QLA2200(ha)) { 1798 } else if (IS_QLA2200(ha)) {
1765 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1799 ha->mbx_count = MAILBOX_REGISTER_COUNT;
@@ -1767,6 +1801,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1767 rsp_length = RESPONSE_ENTRY_CNT_2100; 1801 rsp_length = RESPONSE_ENTRY_CNT_2100;
1768 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 1802 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
1769 ha->gid_list_info_size = 4; 1803 ha->gid_list_info_size = 4;
1804 ha->flash_conf_off = ~0;
1805 ha->flash_data_off = ~0;
1806 ha->nvram_conf_off = ~0;
1807 ha->nvram_data_off = ~0;
1770 ha->isp_ops = &qla2100_isp_ops; 1808 ha->isp_ops = &qla2100_isp_ops;
1771 } else if (IS_QLA23XX(ha)) { 1809 } else if (IS_QLA23XX(ha)) {
1772 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1810 ha->mbx_count = MAILBOX_REGISTER_COUNT;
@@ -1776,6 +1814,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1776 ha->gid_list_info_size = 6; 1814 ha->gid_list_info_size = 6;
1777 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1815 if (IS_QLA2322(ha) || IS_QLA6322(ha))
1778 ha->optrom_size = OPTROM_SIZE_2322; 1816 ha->optrom_size = OPTROM_SIZE_2322;
1817 ha->flash_conf_off = ~0;
1818 ha->flash_data_off = ~0;
1819 ha->nvram_conf_off = ~0;
1820 ha->nvram_data_off = ~0;
1779 ha->isp_ops = &qla2300_isp_ops; 1821 ha->isp_ops = &qla2300_isp_ops;
1780 } else if (IS_QLA24XX_TYPE(ha)) { 1822 } else if (IS_QLA24XX_TYPE(ha)) {
1781 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1823 ha->mbx_count = MAILBOX_REGISTER_COUNT;
@@ -1787,6 +1829,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1787 ha->optrom_size = OPTROM_SIZE_24XX; 1829 ha->optrom_size = OPTROM_SIZE_24XX;
1788 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX; 1830 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
1789 ha->isp_ops = &qla24xx_isp_ops; 1831 ha->isp_ops = &qla24xx_isp_ops;
1832 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
1833 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
1834 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
1835 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
1790 } else if (IS_QLA25XX(ha)) { 1836 } else if (IS_QLA25XX(ha)) {
1791 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1837 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1792 req_length = REQUEST_ENTRY_CNT_24XX; 1838 req_length = REQUEST_ENTRY_CNT_24XX;
@@ -1797,6 +1843,23 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1797 ha->optrom_size = OPTROM_SIZE_25XX; 1843 ha->optrom_size = OPTROM_SIZE_25XX;
1798 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 1844 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
1799 ha->isp_ops = &qla25xx_isp_ops; 1845 ha->isp_ops = &qla25xx_isp_ops;
1846 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
1847 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
1848 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
1849 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
1850 } else if (IS_QLA81XX(ha)) {
1851 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1852 req_length = REQUEST_ENTRY_CNT_24XX;
1853 rsp_length = RESPONSE_ENTRY_CNT_2300;
1854 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1855 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
1856 ha->gid_list_info_size = 8;
1857 ha->optrom_size = OPTROM_SIZE_81XX;
1858 ha->isp_ops = &qla81xx_isp_ops;
1859 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
1860 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
1861 ha->nvram_conf_off = ~0;
1862 ha->nvram_data_off = ~0;
1800 } 1863 }
1801 1864
1802 mutex_init(&ha->vport_lock); 1865 mutex_init(&ha->vport_lock);
@@ -2458,23 +2521,6 @@ qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
2458 return qla2x00_post_work(vha, e, 1); 2521 return qla2x00_post_work(vha, e, 1);
2459} 2522}
2460 2523
2461int
2462qla2x00_post_hwe_work(struct scsi_qla_host *vha, uint16_t code, uint16_t d1,
2463 uint16_t d2, uint16_t d3)
2464{
2465 struct qla_work_evt *e;
2466
2467 e = qla2x00_alloc_work(vha, QLA_EVT_HWE_LOG, 1);
2468 if (!e)
2469 return QLA_FUNCTION_FAILED;
2470
2471 e->u.hwe.code = code;
2472 e->u.hwe.d1 = d1;
2473 e->u.hwe.d2 = d2;
2474 e->u.hwe.d3 = d3;
2475 return qla2x00_post_work(vha, e, 1);
2476}
2477
2478static void 2524static void
2479qla2x00_do_work(struct scsi_qla_host *vha) 2525qla2x00_do_work(struct scsi_qla_host *vha)
2480{ 2526{
@@ -2492,10 +2538,6 @@ qla2x00_do_work(struct scsi_qla_host *vha)
2492 fc_host_post_event(vha->host, fc_get_event_number(), 2538 fc_host_post_event(vha->host, fc_get_event_number(),
2493 e->u.aen.code, e->u.aen.data); 2539 e->u.aen.code, e->u.aen.data);
2494 break; 2540 break;
2495 case QLA_EVT_HWE_LOG:
2496 qla2xxx_hw_event_log(vha, e->u.hwe.code, e->u.hwe.d1,
2497 e->u.hwe.d2, e->u.hwe.d3);
2498 break;
2499 } 2541 }
2500 if (e->flags & QLA_EVT_FLAG_FREE) 2542 if (e->flags & QLA_EVT_FLAG_FREE)
2501 kfree(e); 2543 kfree(e);
@@ -2914,13 +2956,14 @@ qla2x00_timer(scsi_qla_host_t *vha)
2914 2956
2915/* Firmware interface routines. */ 2957/* Firmware interface routines. */
2916 2958
2917#define FW_BLOBS 6 2959#define FW_BLOBS 7
2918#define FW_ISP21XX 0 2960#define FW_ISP21XX 0
2919#define FW_ISP22XX 1 2961#define FW_ISP22XX 1
2920#define FW_ISP2300 2 2962#define FW_ISP2300 2
2921#define FW_ISP2322 3 2963#define FW_ISP2322 3
2922#define FW_ISP24XX 4 2964#define FW_ISP24XX 4
2923#define FW_ISP25XX 5 2965#define FW_ISP25XX 5
2966#define FW_ISP81XX 6
2924 2967
2925#define FW_FILE_ISP21XX "ql2100_fw.bin" 2968#define FW_FILE_ISP21XX "ql2100_fw.bin"
2926#define FW_FILE_ISP22XX "ql2200_fw.bin" 2969#define FW_FILE_ISP22XX "ql2200_fw.bin"
@@ -2928,6 +2971,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
2928#define FW_FILE_ISP2322 "ql2322_fw.bin" 2971#define FW_FILE_ISP2322 "ql2322_fw.bin"
2929#define FW_FILE_ISP24XX "ql2400_fw.bin" 2972#define FW_FILE_ISP24XX "ql2400_fw.bin"
2930#define FW_FILE_ISP25XX "ql2500_fw.bin" 2973#define FW_FILE_ISP25XX "ql2500_fw.bin"
2974#define FW_FILE_ISP81XX "ql8100_fw.bin"
2931 2975
2932static DEFINE_MUTEX(qla_fw_lock); 2976static DEFINE_MUTEX(qla_fw_lock);
2933 2977
@@ -2938,6 +2982,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
2938 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, 2982 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
2939 { .name = FW_FILE_ISP24XX, }, 2983 { .name = FW_FILE_ISP24XX, },
2940 { .name = FW_FILE_ISP25XX, }, 2984 { .name = FW_FILE_ISP25XX, },
2985 { .name = FW_FILE_ISP81XX, },
2941}; 2986};
2942 2987
2943struct fw_blob * 2988struct fw_blob *
@@ -2959,6 +3004,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
2959 blob = &qla_fw_blobs[FW_ISP24XX]; 3004 blob = &qla_fw_blobs[FW_ISP24XX];
2960 } else if (IS_QLA25XX(ha)) { 3005 } else if (IS_QLA25XX(ha)) {
2961 blob = &qla_fw_blobs[FW_ISP25XX]; 3006 blob = &qla_fw_blobs[FW_ISP25XX];
3007 } else if (IS_QLA81XX(ha)) {
3008 blob = &qla_fw_blobs[FW_ISP81XX];
2962 } 3009 }
2963 3010
2964 mutex_lock(&qla_fw_lock); 3011 mutex_lock(&qla_fw_lock);
@@ -3112,6 +3159,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
3112 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, 3159 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
3113 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, 3160 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
3114 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, 3161 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
3162 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
3115 { 0 }, 3163 { 0 },
3116}; 3164};
3117MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 3165MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
@@ -3200,3 +3248,4 @@ MODULE_FIRMWARE(FW_FILE_ISP2300);
3200MODULE_FIRMWARE(FW_FILE_ISP2322); 3248MODULE_FIRMWARE(FW_FILE_ISP2322);
3201MODULE_FIRMWARE(FW_FILE_ISP24XX); 3249MODULE_FIRMWARE(FW_FILE_ISP24XX);
3202MODULE_FIRMWARE(FW_FILE_ISP25XX); 3250MODULE_FIRMWARE(FW_FILE_ISP25XX);
3251MODULE_FIRMWARE(FW_FILE_ISP81XX);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index c538ee1b1a31..303f8ee11f25 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -425,27 +425,27 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
425#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4) 425#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
426 426
427static inline uint32_t 427static inline uint32_t
428flash_conf_to_access_addr(uint32_t faddr) 428flash_conf_addr(struct qla_hw_data *ha, uint32_t faddr)
429{ 429{
430 return FARX_ACCESS_FLASH_CONF | faddr; 430 return ha->flash_conf_off | faddr;
431} 431}
432 432
433static inline uint32_t 433static inline uint32_t
434flash_data_to_access_addr(uint32_t faddr) 434flash_data_addr(struct qla_hw_data *ha, uint32_t faddr)
435{ 435{
436 return FARX_ACCESS_FLASH_DATA | faddr; 436 return ha->flash_data_off | faddr;
437} 437}
438 438
439static inline uint32_t 439static inline uint32_t
440nvram_conf_to_access_addr(uint32_t naddr) 440nvram_conf_addr(struct qla_hw_data *ha, uint32_t naddr)
441{ 441{
442 return FARX_ACCESS_NVRAM_CONF | naddr; 442 return ha->nvram_conf_off | naddr;
443} 443}
444 444
445static inline uint32_t 445static inline uint32_t
446nvram_data_to_access_addr(uint32_t naddr) 446nvram_data_addr(struct qla_hw_data *ha, uint32_t naddr)
447{ 447{
448 return FARX_ACCESS_NVRAM_DATA | naddr; 448 return ha->nvram_data_off | naddr;
449} 449}
450 450
451static uint32_t 451static uint32_t
@@ -481,10 +481,12 @@ qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
481 uint32_t dwords) 481 uint32_t dwords)
482{ 482{
483 uint32_t i; 483 uint32_t i;
484 struct qla_hw_data *ha = vha->hw;
485
484 /* Dword reads to flash. */ 486 /* Dword reads to flash. */
485 for (i = 0; i < dwords; i++, faddr++) 487 for (i = 0; i < dwords; i++, faddr++)
486 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(vha->hw, 488 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
487 flash_data_to_access_addr(faddr))); 489 flash_data_addr(ha, faddr)));
488 490
489 return dwptr; 491 return dwptr;
490} 492}
@@ -518,7 +520,7 @@ qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
518{ 520{
519 uint32_t ids; 521 uint32_t ids;
520 522
521 ids = qla24xx_read_flash_dword(ha, flash_data_to_access_addr(0xd03ab)); 523 ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x03ab));
522 *man_id = LSB(ids); 524 *man_id = LSB(ids);
523 *flash_id = MSB(ids); 525 *flash_id = MSB(ids);
524 526
@@ -530,8 +532,7 @@ qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
530 * Example: ATMEL 0x00 01 45 1F 532 * Example: ATMEL 0x00 01 45 1F
531 * Extract MFG and Dev ID from last two bytes. 533 * Extract MFG and Dev ID from last two bytes.
532 */ 534 */
533 ids = qla24xx_read_flash_dword(ha, 535 ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x009f));
534 flash_data_to_access_addr(0xd009f));
535 *man_id = LSB(ids); 536 *man_id = LSB(ids);
536 *flash_id = MSB(ids); 537 *flash_id = MSB(ids);
537 } 538 }
@@ -555,9 +556,13 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
555 556
556 /* Begin with sane defaults. */ 557 /* Begin with sane defaults. */
557 loc = locations[0]; 558 loc = locations[0];
558 *start = IS_QLA24XX_TYPE(ha) ? FA_FLASH_LAYOUT_ADDR_24: 559 *start = 0;
559 FA_FLASH_LAYOUT_ADDR; 560 if (IS_QLA24XX_TYPE(ha))
560 561 *start = FA_FLASH_LAYOUT_ADDR_24;
562 else if (IS_QLA25XX(ha))
563 *start = FA_FLASH_LAYOUT_ADDR;
564 else if (IS_QLA81XX(ha))
565 *start = FA_FLASH_LAYOUT_ADDR_81;
561 /* Begin with first PCI expansion ROM header. */ 566 /* Begin with first PCI expansion ROM header. */
562 buf = (uint8_t *)req->ring; 567 buf = (uint8_t *)req->ring;
563 dcode = (uint32_t *)req->ring; 568 dcode = (uint32_t *)req->ring;
@@ -618,6 +623,22 @@ static void
618qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) 623qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
619{ 624{
620 const char *loc, *locations[] = { "DEF", "FLT" }; 625 const char *loc, *locations[] = { "DEF", "FLT" };
626 const uint32_t def_fw[] =
627 { FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR_81 };
628 const uint32_t def_boot[] =
629 { FA_BOOT_CODE_ADDR, FA_BOOT_CODE_ADDR, FA_BOOT_CODE_ADDR_81 };
630 const uint32_t def_vpd_nvram[] =
631 { FA_VPD_NVRAM_ADDR, FA_VPD_NVRAM_ADDR, FA_VPD_NVRAM_ADDR_81 };
632 const uint32_t def_fdt[] =
633 { FA_FLASH_DESCR_ADDR_24, FA_FLASH_DESCR_ADDR,
634 FA_FLASH_DESCR_ADDR_81 };
635 const uint32_t def_npiv_conf0[] =
636 { FA_NPIV_CONF0_ADDR_24, FA_NPIV_CONF0_ADDR,
637 FA_NPIV_CONF0_ADDR_81 };
638 const uint32_t def_npiv_conf1[] =
639 { FA_NPIV_CONF1_ADDR_24, FA_NPIV_CONF1_ADDR,
640 FA_NPIV_CONF1_ADDR_81 };
641 uint32_t def;
621 uint16_t *wptr; 642 uint16_t *wptr;
622 uint16_t cnt, chksum; 643 uint16_t cnt, chksum;
623 uint32_t start; 644 uint32_t start;
@@ -676,20 +697,12 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
676 case FLT_REG_FDT: 697 case FLT_REG_FDT:
677 ha->flt_region_fdt = start; 698 ha->flt_region_fdt = start;
678 break; 699 break;
679 case FLT_REG_HW_EVENT_0:
680 if (!PCI_FUNC(ha->pdev->devfn))
681 ha->flt_region_hw_event = start;
682 break;
683 case FLT_REG_HW_EVENT_1:
684 if (PCI_FUNC(ha->pdev->devfn))
685 ha->flt_region_hw_event = start;
686 break;
687 case FLT_REG_NPIV_CONF_0: 700 case FLT_REG_NPIV_CONF_0:
688 if (!PCI_FUNC(ha->pdev->devfn)) 701 if (!(PCI_FUNC(ha->pdev->devfn) & 1))
689 ha->flt_region_npiv_conf = start; 702 ha->flt_region_npiv_conf = start;
690 break; 703 break;
691 case FLT_REG_NPIV_CONF_1: 704 case FLT_REG_NPIV_CONF_1:
692 if (PCI_FUNC(ha->pdev->devfn)) 705 if (PCI_FUNC(ha->pdev->devfn) & 1)
693 ha->flt_region_npiv_conf = start; 706 ha->flt_region_npiv_conf = start;
694 break; 707 break;
695 } 708 }
@@ -699,22 +712,24 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
699no_flash_data: 712no_flash_data:
700 /* Use hardcoded defaults. */ 713 /* Use hardcoded defaults. */
701 loc = locations[0]; 714 loc = locations[0];
702 ha->flt_region_fw = FA_RISC_CODE_ADDR; 715 def = 0;
703 ha->flt_region_boot = FA_BOOT_CODE_ADDR; 716 if (IS_QLA24XX_TYPE(ha))
704 ha->flt_region_vpd_nvram = FA_VPD_NVRAM_ADDR; 717 def = 0;
705 ha->flt_region_fdt = IS_QLA24XX_TYPE(ha) ? FA_FLASH_DESCR_ADDR_24: 718 else if (IS_QLA25XX(ha))
706 FA_FLASH_DESCR_ADDR; 719 def = 1;
707 ha->flt_region_hw_event = !PCI_FUNC(ha->pdev->devfn) ? 720 else if (IS_QLA81XX(ha))
708 FA_HW_EVENT0_ADDR: FA_HW_EVENT1_ADDR; 721 def = 2;
709 ha->flt_region_npiv_conf = !PCI_FUNC(ha->pdev->devfn) ? 722 ha->flt_region_fw = def_fw[def];
710 (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF0_ADDR_24: FA_NPIV_CONF0_ADDR): 723 ha->flt_region_boot = def_boot[def];
711 (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF1_ADDR_24: FA_NPIV_CONF1_ADDR); 724 ha->flt_region_vpd_nvram = def_vpd_nvram[def];
725 ha->flt_region_fdt = def_fdt[def];
726 ha->flt_region_npiv_conf = !(PCI_FUNC(ha->pdev->devfn) & 1) ?
727 def_npiv_conf0[def]: def_npiv_conf1[def];
712done: 728done:
713 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x " 729 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
714 "vpd_nvram=0x%x fdt=0x%x flt=0x%x hwe=0x%x npiv=0x%x.\n", loc, 730 "vpd_nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x.\n", loc,
715 ha->flt_region_boot, ha->flt_region_fw, ha->flt_region_vpd_nvram, 731 ha->flt_region_boot, ha->flt_region_fw, ha->flt_region_vpd_nvram,
716 ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_hw_event, 732 ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_npiv_conf));
717 ha->flt_region_npiv_conf));
718} 733}
719 734
720static void 735static void
@@ -757,14 +772,14 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
757 mid = le16_to_cpu(fdt->man_id); 772 mid = le16_to_cpu(fdt->man_id);
758 fid = le16_to_cpu(fdt->id); 773 fid = le16_to_cpu(fdt->id);
759 ha->fdt_wrt_disable = fdt->wrt_disable_bits; 774 ha->fdt_wrt_disable = fdt->wrt_disable_bits;
760 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd); 775 ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0300 | fdt->erase_cmd);
761 ha->fdt_block_size = le32_to_cpu(fdt->block_size); 776 ha->fdt_block_size = le32_to_cpu(fdt->block_size);
762 if (fdt->unprotect_sec_cmd) { 777 if (fdt->unprotect_sec_cmd) {
763 ha->fdt_unprotect_sec_cmd = flash_conf_to_access_addr(0x0300 | 778 ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0300 |
764 fdt->unprotect_sec_cmd); 779 fdt->unprotect_sec_cmd);
765 ha->fdt_protect_sec_cmd = fdt->protect_sec_cmd ? 780 ha->fdt_protect_sec_cmd = fdt->protect_sec_cmd ?
766 flash_conf_to_access_addr(0x0300 | fdt->protect_sec_cmd): 781 flash_conf_addr(ha, 0x0300 | fdt->protect_sec_cmd):
767 flash_conf_to_access_addr(0x0336); 782 flash_conf_addr(ha, 0x0336);
768 } 783 }
769 goto done; 784 goto done;
770no_flash_data: 785no_flash_data:
@@ -773,7 +788,7 @@ no_flash_data:
773 mid = man_id; 788 mid = man_id;
774 fid = flash_id; 789 fid = flash_id;
775 ha->fdt_wrt_disable = 0x9c; 790 ha->fdt_wrt_disable = 0x9c;
776 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x03d8); 791 ha->fdt_erase_cmd = flash_conf_addr(ha, 0x03d8);
777 switch (man_id) { 792 switch (man_id) {
778 case 0xbf: /* STT flash. */ 793 case 0xbf: /* STT flash. */
779 if (flash_id == 0x8e) 794 if (flash_id == 0x8e)
@@ -782,16 +797,16 @@ no_flash_data:
782 ha->fdt_block_size = FLASH_BLK_SIZE_32K; 797 ha->fdt_block_size = FLASH_BLK_SIZE_32K;
783 798
784 if (flash_id == 0x80) 799 if (flash_id == 0x80)
785 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0352); 800 ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0352);
786 break; 801 break;
787 case 0x13: /* ST M25P80. */ 802 case 0x13: /* ST M25P80. */
788 ha->fdt_block_size = FLASH_BLK_SIZE_64K; 803 ha->fdt_block_size = FLASH_BLK_SIZE_64K;
789 break; 804 break;
790 case 0x1f: /* Atmel 26DF081A. */ 805 case 0x1f: /* Atmel 26DF081A. */
791 ha->fdt_block_size = FLASH_BLK_SIZE_4K; 806 ha->fdt_block_size = FLASH_BLK_SIZE_4K;
792 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0320); 807 ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0320);
793 ha->fdt_unprotect_sec_cmd = flash_conf_to_access_addr(0x0339); 808 ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0339);
794 ha->fdt_protect_sec_cmd = flash_conf_to_access_addr(0x0336); 809 ha->fdt_protect_sec_cmd = flash_conf_addr(ha, 0x0336);
795 break; 810 break;
796 default: 811 default:
797 /* Default to 64 kb sector size. */ 812 /* Default to 64 kb sector size. */
@@ -813,7 +828,7 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
813 uint32_t flt_addr; 828 uint32_t flt_addr;
814 struct qla_hw_data *ha = vha->hw; 829 struct qla_hw_data *ha = vha->hw;
815 830
816 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 831 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA81XX(ha))
817 return QLA_SUCCESS; 832 return QLA_SUCCESS;
818 833
819 ret = qla2xxx_find_flt_start(vha, &flt_addr); 834 ret = qla2xxx_find_flt_start(vha, &flt_addr);
@@ -838,7 +853,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
838 struct qla_npiv_entry *entry; 853 struct qla_npiv_entry *entry;
839 struct qla_hw_data *ha = vha->hw; 854 struct qla_hw_data *ha = vha->hw;
840 855
841 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 856 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA81XX(ha))
842 return; 857 return;
843 858
844 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, 859 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
@@ -930,9 +945,9 @@ qla24xx_unprotect_flash(struct qla_hw_data *ha)
930 return; 945 return;
931 946
932 /* Disable flash write-protection. */ 947 /* Disable flash write-protection. */
933 qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0); 948 qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0);
934 /* Some flash parts need an additional zero-write to clear bits.*/ 949 /* Some flash parts need an additional zero-write to clear bits.*/
935 qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0); 950 qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0);
936} 951}
937 952
938static void 953static void
@@ -945,11 +960,10 @@ qla24xx_protect_flash(struct qla_hw_data *ha)
945 goto skip_wrt_protect; 960 goto skip_wrt_protect;
946 961
947 /* Enable flash write-protection and wait for completion. */ 962 /* Enable flash write-protection and wait for completion. */
948 qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 963 qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101),
949 ha->fdt_wrt_disable); 964 ha->fdt_wrt_disable);
950 for (cnt = 300; cnt && 965 for (cnt = 300; cnt &&
951 qla24xx_read_flash_dword(ha, 966 qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x005)) & BIT_0;
952 flash_conf_to_access_addr(0x005)) & BIT_0;
953 cnt--) { 967 cnt--) {
954 udelay(10); 968 udelay(10);
955 } 969 }
@@ -977,7 +991,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
977 ret = QLA_SUCCESS; 991 ret = QLA_SUCCESS;
978 992
979 /* Prepare burst-capable write on supported ISPs. */ 993 /* Prepare burst-capable write on supported ISPs. */
980 if (IS_QLA25XX(ha) && !(faddr & 0xfff) && 994 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && !(faddr & 0xfff) &&
981 dwords > OPTROM_BURST_DWORDS) { 995 dwords > OPTROM_BURST_DWORDS) {
982 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 996 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
983 &optrom_dma, GFP_KERNEL); 997 &optrom_dma, GFP_KERNEL);
@@ -989,7 +1003,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
989 } 1003 }
990 1004
991 rest_addr = (ha->fdt_block_size >> 2) - 1; 1005 rest_addr = (ha->fdt_block_size >> 2) - 1;
992 sec_mask = 0x80000 - (ha->fdt_block_size >> 2); 1006 sec_mask = (ha->optrom_size >> 2) - (ha->fdt_block_size >> 2);
993 1007
994 qla24xx_unprotect_flash(ha); 1008 qla24xx_unprotect_flash(ha);
995 1009
@@ -1024,13 +1038,13 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1024 *s = cpu_to_le32(*d); 1038 *s = cpu_to_le32(*d);
1025 1039
1026 ret = qla2x00_load_ram(vha, optrom_dma, 1040 ret = qla2x00_load_ram(vha, optrom_dma,
1027 flash_data_to_access_addr(faddr), 1041 flash_data_addr(ha, faddr),
1028 OPTROM_BURST_DWORDS); 1042 OPTROM_BURST_DWORDS);
1029 if (ret != QLA_SUCCESS) { 1043 if (ret != QLA_SUCCESS) {
1030 qla_printk(KERN_WARNING, ha, 1044 qla_printk(KERN_WARNING, ha,
1031 "Unable to burst-write optrom segment " 1045 "Unable to burst-write optrom segment "
1032 "(%x/%x/%llx).\n", ret, 1046 "(%x/%x/%llx).\n", ret,
1033 flash_data_to_access_addr(faddr), 1047 flash_data_addr(ha, faddr),
1034 (unsigned long long)optrom_dma); 1048 (unsigned long long)optrom_dma);
1035 qla_printk(KERN_WARNING, ha, 1049 qla_printk(KERN_WARNING, ha,
1036 "Reverting to slow-write.\n"); 1050 "Reverting to slow-write.\n");
@@ -1047,7 +1061,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1047 } 1061 }
1048 1062
1049 ret = qla24xx_write_flash_dword(ha, 1063 ret = qla24xx_write_flash_dword(ha,
1050 flash_data_to_access_addr(faddr), cpu_to_le32(*dwptr)); 1064 flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
1051 if (ret != QLA_SUCCESS) { 1065 if (ret != QLA_SUCCESS) {
1052 DEBUG9(printk("%s(%ld) Unable to program flash " 1066 DEBUG9(printk("%s(%ld) Unable to program flash "
1053 "address=%x data=%x.\n", __func__, 1067 "address=%x data=%x.\n", __func__,
@@ -1098,12 +1112,13 @@ qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1098{ 1112{
1099 uint32_t i; 1113 uint32_t i;
1100 uint32_t *dwptr; 1114 uint32_t *dwptr;
1115 struct qla_hw_data *ha = vha->hw;
1101 1116
1102 /* Dword reads to flash. */ 1117 /* Dword reads to flash. */
1103 dwptr = (uint32_t *)buf; 1118 dwptr = (uint32_t *)buf;
1104 for (i = 0; i < bytes >> 2; i++, naddr++) 1119 for (i = 0; i < bytes >> 2; i++, naddr++)
1105 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(vha->hw, 1120 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
1106 nvram_data_to_access_addr(naddr))); 1121 nvram_data_addr(ha, naddr)));
1107 1122
1108 return buf; 1123 return buf;
1109} 1124}
@@ -1160,17 +1175,14 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1160 RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */ 1175 RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
1161 1176
1162 /* Disable NVRAM write-protection. */ 1177 /* Disable NVRAM write-protection. */
1163 qla24xx_write_flash_dword(ha, nvram_conf_to_access_addr(0x101), 1178 qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0);
1164 0); 1179 qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0);
1165 qla24xx_write_flash_dword(ha, nvram_conf_to_access_addr(0x101),
1166 0);
1167 1180
1168 /* Dword writes to flash. */ 1181 /* Dword writes to flash. */
1169 dwptr = (uint32_t *)buf; 1182 dwptr = (uint32_t *)buf;
1170 for (i = 0; i < bytes >> 2; i++, naddr++, dwptr++) { 1183 for (i = 0; i < bytes >> 2; i++, naddr++, dwptr++) {
1171 ret = qla24xx_write_flash_dword(ha, 1184 ret = qla24xx_write_flash_dword(ha,
1172 nvram_data_to_access_addr(naddr), 1185 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
1173 cpu_to_le32(*dwptr));
1174 if (ret != QLA_SUCCESS) { 1186 if (ret != QLA_SUCCESS) {
1175 DEBUG9(qla_printk("Unable to program nvram address=%x " 1187 DEBUG9(qla_printk("Unable to program nvram address=%x "
1176 "data=%x.\n", naddr, *dwptr)); 1188 "data=%x.\n", naddr, *dwptr));
@@ -1179,8 +1191,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1179 } 1191 }
1180 1192
1181 /* Enable NVRAM write-protection. */ 1193 /* Enable NVRAM write-protection. */
1182 qla24xx_write_flash_dword(ha, nvram_conf_to_access_addr(0x101), 1194 qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0x8c);
1183 0x8c);
1184 1195
1185 /* Disable flash write. */ 1196 /* Disable flash write. */
1186 WRT_REG_DWORD(&reg->ctrl_status, 1197 WRT_REG_DWORD(&reg->ctrl_status,
@@ -1202,8 +1213,7 @@ qla25xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1202 dwptr = (uint32_t *)buf; 1213 dwptr = (uint32_t *)buf;
1203 for (i = 0; i < bytes >> 2; i++, naddr++) 1214 for (i = 0; i < bytes >> 2; i++, naddr++)
1204 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, 1215 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
1205 flash_data_to_access_addr(ha->flt_region_vpd_nvram | 1216 flash_data_addr(ha, ha->flt_region_vpd_nvram | naddr)));
1206 naddr)));
1207 1217
1208 return buf; 1218 return buf;
1209} 1219}
@@ -2246,12 +2256,12 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2246 burst = left; 2256 burst = left;
2247 2257
2248 rval = qla2x00_dump_ram(vha, optrom_dma, 2258 rval = qla2x00_dump_ram(vha, optrom_dma,
2249 flash_data_to_access_addr(faddr), burst); 2259 flash_data_addr(ha, faddr), burst);
2250 if (rval) { 2260 if (rval) {
2251 qla_printk(KERN_WARNING, ha, 2261 qla_printk(KERN_WARNING, ha,
2252 "Unable to burst-read optrom segment " 2262 "Unable to burst-read optrom segment "
2253 "(%x/%x/%llx).\n", rval, 2263 "(%x/%x/%llx).\n", rval,
2254 flash_data_to_access_addr(faddr), 2264 flash_data_addr(ha, faddr),
2255 (unsigned long long)optrom_dma); 2265 (unsigned long long)optrom_dma);
2256 qla_printk(KERN_WARNING, ha, 2266 qla_printk(KERN_WARNING, ha,
2257 "Reverting to slow-read.\n"); 2267 "Reverting to slow-read.\n");
@@ -2648,108 +2658,3 @@ qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size)
2648 2658
2649 return 0; 2659 return 0;
2650} 2660}
2651
2652static int
2653qla2xxx_hw_event_store(scsi_qla_host_t *vha, uint32_t *fdata)
2654{
2655 uint32_t d[2], faddr;
2656 struct qla_hw_data *ha = vha->hw;
2657
2658 /* Locate first empty entry. */
2659 for (;;) {
2660 if (ha->hw_event_ptr >=
2661 ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
2662 DEBUG2(qla_printk(KERN_WARNING, ha,
2663 "HW event -- Log Full!\n"));
2664 return QLA_MEMORY_ALLOC_FAILED;
2665 }
2666
2667 qla24xx_read_flash_data(vha, d, ha->hw_event_ptr, 2);
2668 faddr = flash_data_to_access_addr(ha->hw_event_ptr);
2669 ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
2670 if (d[0] == __constant_cpu_to_le32(0xffffffff) &&
2671 d[1] == __constant_cpu_to_le32(0xffffffff)) {
2672 qla24xx_unprotect_flash(ha);
2673
2674 qla24xx_write_flash_dword(ha, faddr++,
2675 cpu_to_le32(jiffies));
2676 qla24xx_write_flash_dword(ha, faddr++, 0);
2677 qla24xx_write_flash_dword(ha, faddr++, *fdata++);
2678 qla24xx_write_flash_dword(ha, faddr++, *fdata);
2679
2680 qla24xx_protect_flash(ha);
2681 break;
2682 }
2683 }
2684 return QLA_SUCCESS;
2685}
2686
2687int
2688qla2xxx_hw_event_log(scsi_qla_host_t *vha, uint16_t code, uint16_t d1,
2689 uint16_t d2, uint16_t d3)
2690{
2691#define QMARK(a, b, c, d) \
2692 cpu_to_le32(LSB(a) << 24 | LSB(b) << 16 | LSB(c) << 8 | LSB(d))
2693 struct qla_hw_data *ha = vha->hw;
2694 int rval;
2695 uint32_t marker[2], fdata[4];
2696
2697 if (ha->flt_region_hw_event == 0)
2698 return QLA_FUNCTION_FAILED;
2699
2700 DEBUG2(qla_printk(KERN_WARNING, ha,
2701 "HW event -- code=%x, d1=%x, d2=%x, d3=%x.\n", code, d1, d2, d3));
2702
2703 /* If marker not already found, locate or write. */
2704 if (!ha->flags.hw_event_marker_found) {
2705 /* Create marker. */
2706 marker[0] = QMARK('L', ha->fw_major_version,
2707 ha->fw_minor_version, ha->fw_subminor_version);
2708 marker[1] = QMARK(QLA_DRIVER_MAJOR_VER, QLA_DRIVER_MINOR_VER,
2709 QLA_DRIVER_PATCH_VER, QLA_DRIVER_BETA_VER);
2710
2711 /* Locate marker. */
2712 ha->hw_event_ptr = ha->flt_region_hw_event;
2713 for (;;) {
2714 qla24xx_read_flash_data(vha, fdata, ha->hw_event_ptr,
2715 4);
2716 if (fdata[0] == __constant_cpu_to_le32(0xffffffff) &&
2717 fdata[1] == __constant_cpu_to_le32(0xffffffff))
2718 break;
2719 ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
2720 if (ha->hw_event_ptr >=
2721 ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
2722 DEBUG2(qla_printk(KERN_WARNING, ha,
2723 "HW event -- Log Full!\n"));
2724 return QLA_MEMORY_ALLOC_FAILED;
2725 }
2726 if (fdata[2] == marker[0] && fdata[3] == marker[1]) {
2727 ha->flags.hw_event_marker_found = 1;
2728 break;
2729 }
2730 }
2731 /* No marker, write it. */
2732 if (!ha->flags.hw_event_marker_found) {
2733 rval = qla2xxx_hw_event_store(vha, marker);
2734 if (rval != QLA_SUCCESS) {
2735 DEBUG2(qla_printk(KERN_WARNING, ha,
2736 "HW event -- Failed marker write=%x.!\n",
2737 rval));
2738 return rval;
2739 }
2740 ha->flags.hw_event_marker_found = 1;
2741 }
2742 }
2743
2744 /* Store error. */
2745 fdata[0] = cpu_to_le32(code << 16 | d1);
2746 fdata[1] = cpu_to_le32(d2 << 16 | d3);
2747 rval = qla2xxx_hw_event_store(vha, fdata);
2748 if (rval != QLA_SUCCESS) {
2749 DEBUG2(qla_printk(KERN_WARNING, ha,
2750 "HW event -- Failed error write=%x.!\n",
2751 rval));
2752 }
2753
2754 return rval;
2755}
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index be22f3a09f8d..808bab6ef06b 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.03-k1" 10#define QLA2XXX_VERSION "8.03.00-k1"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 3
14#define QLA_DRIVER_PATCH_VER 3 14#define QLA_DRIVER_PATCH_VER 0
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index 913a931176ef..8e5c169b03fb 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -237,8 +237,7 @@ int raid_component_add(struct raid_template *r,struct device *raid_dev,
237 rc->dev.parent = get_device(component_dev); 237 rc->dev.parent = get_device(component_dev);
238 rc->num = rd->component_count++; 238 rc->num = rd->component_count++;
239 239
240 snprintf(rc->dev.bus_id, sizeof(rc->dev.bus_id), 240 dev_set_name(&rc->dev, "component-%d", rc->num);
241 "component-%d", rc->num);
242 list_add_tail(&rc->node, &rd->component_list); 241 list_add_tail(&rc->node, &rd->component_list);
243 rc->dev.class = &raid_class.class; 242 rc->dev.class = &raid_class.class;
244 err = device_add(&rc->dev); 243 err = device_add(&rc->dev);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index f8b79d401d58..42e72a2c1f98 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -651,10 +651,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
651 unsigned long timeout; 651 unsigned long timeout;
652 int rtn = 0; 652 int rtn = 0;
653 653
654 /*
655 * We will use a queued command if possible, otherwise we will
656 * emulate the queuing and calling of completion function ourselves.
657 */
658 atomic_inc(&cmd->device->iorequest_cnt); 654 atomic_inc(&cmd->device->iorequest_cnt);
659 655
660 /* check if the device is still usable */ 656 /* check if the device is still usable */
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 27c633f55794..6eebd0bbe8a8 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2508,7 +2508,7 @@ static void pseudo_0_release(struct device *dev)
2508} 2508}
2509 2509
2510static struct device pseudo_primary = { 2510static struct device pseudo_primary = {
2511 .bus_id = "pseudo_0", 2511 .init_name = "pseudo_0",
2512 .release = pseudo_0_release, 2512 .release = pseudo_0_release,
2513}; 2513};
2514 2514
@@ -2680,7 +2680,7 @@ static int sdebug_add_adapter(void)
2680 sdbg_host->dev.bus = &pseudo_lld_bus; 2680 sdbg_host->dev.bus = &pseudo_lld_bus;
2681 sdbg_host->dev.parent = &pseudo_primary; 2681 sdbg_host->dev.parent = &pseudo_primary;
2682 sdbg_host->dev.release = &sdebug_release_adapter; 2682 sdbg_host->dev.release = &sdebug_release_adapter;
2683 sprintf(sdbg_host->dev.bus_id, "adapter%d", scsi_debug_add_host); 2683 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
2684 2684
2685 error = device_register(&sdbg_host->dev); 2685 error = device_register(&sdbg_host->dev);
2686 2686
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index d86ebea9350a..ad6a1370761e 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -124,34 +124,22 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
124enum blk_eh_timer_return scsi_times_out(struct request *req) 124enum blk_eh_timer_return scsi_times_out(struct request *req)
125{ 125{
126 struct scsi_cmnd *scmd = req->special; 126 struct scsi_cmnd *scmd = req->special;
127 enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
128 enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED; 127 enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
129 128
130 scsi_log_completion(scmd, TIMEOUT_ERROR); 129 scsi_log_completion(scmd, TIMEOUT_ERROR);
131 130
132 if (scmd->device->host->transportt->eh_timed_out) 131 if (scmd->device->host->transportt->eh_timed_out)
133 eh_timed_out = scmd->device->host->transportt->eh_timed_out; 132 rtn = scmd->device->host->transportt->eh_timed_out(scmd);
134 else if (scmd->device->host->hostt->eh_timed_out) 133 else if (scmd->device->host->hostt->eh_timed_out)
135 eh_timed_out = scmd->device->host->hostt->eh_timed_out; 134 rtn = scmd->device->host->hostt->eh_timed_out(scmd);
136 else
137 eh_timed_out = NULL;
138 135
139 if (eh_timed_out) { 136 if (unlikely(rtn == BLK_EH_NOT_HANDLED &&
140 rtn = eh_timed_out(scmd); 137 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
141 switch (rtn) {
142 case BLK_EH_NOT_HANDLED:
143 break;
144 default:
145 return rtn;
146 }
147 }
148
149 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
150 scmd->result |= DID_TIME_OUT << 16; 138 scmd->result |= DID_TIME_OUT << 16;
151 return BLK_EH_HANDLED; 139 rtn = BLK_EH_HANDLED;
152 } 140 }
153 141
154 return BLK_EH_NOT_HANDLED; 142 return rtn;
155} 143}
156 144
157/** 145/**
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 2ae4f8fc5831..b98f763931c5 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -167,10 +167,17 @@ EXPORT_SYMBOL(scsi_set_medium_removal);
167static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg) 167static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg)
168{ 168{
169 struct device *dev = scsi_get_device(sdev->host); 169 struct device *dev = scsi_get_device(sdev->host);
170 const char *name;
170 171
171 if (!dev) 172 if (!dev)
172 return -ENXIO; 173 return -ENXIO;
173 return copy_to_user(arg, dev->bus_id, sizeof(dev->bus_id))? -EFAULT: 0; 174
175 name = dev_name(dev);
176
177 /* compatibility with old ioctl which only returned
178 * 20 characters */
179 return copy_to_user(arg, name, min(strlen(name), (size_t)20))
180 ? -EFAULT: 0;
174} 181}
175 182
176 183
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f2f51e0333eb..940dc32ff0dc 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -91,26 +91,19 @@ static void scsi_unprep_request(struct request *req)
91 scsi_put_command(cmd); 91 scsi_put_command(cmd);
92} 92}
93 93
94/* 94/**
95 * Function: scsi_queue_insert() 95 * __scsi_queue_insert - private queue insertion
96 * 96 * @cmd: The SCSI command being requeued
97 * Purpose: Insert a command in the midlevel queue. 97 * @reason: The reason for the requeue
98 * 98 * @unbusy: Whether the queue should be unbusied
99 * Arguments: cmd - command that we are adding to queue. 99 *
100 * reason - why we are inserting command to queue. 100 * This is a private queue insertion. The public interface
101 * 101 * scsi_queue_insert() always assumes the queue should be unbusied
102 * Lock status: Assumed that lock is not held upon entry. 102 * because it's always called before the completion. This function is
103 * 103 * for a requeue after completion, which should only occur in this
104 * Returns: Nothing. 104 * file.
105 *
106 * Notes: We do this for one of two cases. Either the host is busy
107 * and it cannot accept any more commands for the time being,
108 * or the device returned QUEUE_FULL and can accept no more
109 * commands.
110 * Notes: This could be called either from an interrupt context or a
111 * normal process context.
112 */ 105 */
113int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 106static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
114{ 107{
115 struct Scsi_Host *host = cmd->device->host; 108 struct Scsi_Host *host = cmd->device->host;
116 struct scsi_device *device = cmd->device; 109 struct scsi_device *device = cmd->device;
@@ -150,7 +143,8 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
150 * Decrement the counters, since these commands are no longer 143 * Decrement the counters, since these commands are no longer
151 * active on the host/device. 144 * active on the host/device.
152 */ 145 */
153 scsi_device_unbusy(device); 146 if (unbusy)
147 scsi_device_unbusy(device);
154 148
155 /* 149 /*
156 * Requeue this command. It will go before all other commands 150 * Requeue this command. It will go before all other commands
@@ -172,6 +166,29 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
172 return 0; 166 return 0;
173} 167}
174 168
169/*
170 * Function: scsi_queue_insert()
171 *
172 * Purpose: Insert a command in the midlevel queue.
173 *
174 * Arguments: cmd - command that we are adding to queue.
175 * reason - why we are inserting command to queue.
176 *
177 * Lock status: Assumed that lock is not held upon entry.
178 *
179 * Returns: Nothing.
180 *
181 * Notes: We do this for one of two cases. Either the host is busy
182 * and it cannot accept any more commands for the time being,
183 * or the device returned QUEUE_FULL and can accept no more
184 * commands.
185 * Notes: This could be called either from an interrupt context or a
186 * normal process context.
187 */
188int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
189{
190 return __scsi_queue_insert(cmd, reason, 1);
191}
175/** 192/**
176 * scsi_execute - insert request and wait for the result 193 * scsi_execute - insert request and wait for the result
177 * @sdev: scsi device 194 * @sdev: scsi device
@@ -684,6 +701,8 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
684 scsi_run_queue(sdev->request_queue); 701 scsi_run_queue(sdev->request_queue);
685} 702}
686 703
704static void __scsi_release_buffers(struct scsi_cmnd *, int);
705
687/* 706/*
688 * Function: scsi_end_request() 707 * Function: scsi_end_request()
689 * 708 *
@@ -732,6 +751,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
732 * leftovers in the front of the 751 * leftovers in the front of the
733 * queue, and goose the queue again. 752 * queue, and goose the queue again.
734 */ 753 */
754 scsi_release_buffers(cmd);
735 scsi_requeue_command(q, cmd); 755 scsi_requeue_command(q, cmd);
736 cmd = NULL; 756 cmd = NULL;
737 } 757 }
@@ -743,6 +763,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
743 * This will goose the queue request function at the end, so we don't 763 * This will goose the queue request function at the end, so we don't
744 * need to worry about launching another command. 764 * need to worry about launching another command.
745 */ 765 */
766 __scsi_release_buffers(cmd, 0);
746 scsi_next_command(cmd); 767 scsi_next_command(cmd);
747 return NULL; 768 return NULL;
748} 769}
@@ -798,6 +819,26 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
798 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); 819 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
799} 820}
800 821
822static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
823{
824
825 if (cmd->sdb.table.nents)
826 scsi_free_sgtable(&cmd->sdb);
827
828 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
829
830 if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
831 struct scsi_data_buffer *bidi_sdb =
832 cmd->request->next_rq->special;
833 scsi_free_sgtable(bidi_sdb);
834 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
835 cmd->request->next_rq->special = NULL;
836 }
837
838 if (scsi_prot_sg_count(cmd))
839 scsi_free_sgtable(cmd->prot_sdb);
840}
841
801/* 842/*
802 * Function: scsi_release_buffers() 843 * Function: scsi_release_buffers()
803 * 844 *
@@ -817,21 +858,7 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
817 */ 858 */
818void scsi_release_buffers(struct scsi_cmnd *cmd) 859void scsi_release_buffers(struct scsi_cmnd *cmd)
819{ 860{
820 if (cmd->sdb.table.nents) 861 __scsi_release_buffers(cmd, 1);
821 scsi_free_sgtable(&cmd->sdb);
822
823 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
824
825 if (scsi_bidi_cmnd(cmd)) {
826 struct scsi_data_buffer *bidi_sdb =
827 cmd->request->next_rq->special;
828 scsi_free_sgtable(bidi_sdb);
829 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
830 cmd->request->next_rq->special = NULL;
831 }
832
833 if (scsi_prot_sg_count(cmd))
834 scsi_free_sgtable(cmd->prot_sdb);
835} 862}
836EXPORT_SYMBOL(scsi_release_buffers); 863EXPORT_SYMBOL(scsi_release_buffers);
837 864
@@ -945,7 +972,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
945 } 972 }
946 973
947 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ 974 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
948 scsi_release_buffers(cmd);
949 975
950 /* 976 /*
951 * Next deal with any sectors which we were able to correctly 977 * Next deal with any sectors which we were able to correctly
@@ -963,6 +989,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
963 return; 989 return;
964 this_count = blk_rq_bytes(req); 990 this_count = blk_rq_bytes(req);
965 991
992 error = -EIO;
993
966 if (host_byte(result) == DID_RESET) { 994 if (host_byte(result) == DID_RESET) {
967 /* Third party bus reset or reset for error recovery 995 /* Third party bus reset or reset for error recovery
968 * reasons. Just retry the command and see what 996 * reasons. Just retry the command and see what
@@ -1004,13 +1032,18 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1004 /* This will issue a new 6-byte command. */ 1032 /* This will issue a new 6-byte command. */
1005 cmd->device->use_10_for_rw = 0; 1033 cmd->device->use_10_for_rw = 0;
1006 action = ACTION_REPREP; 1034 action = ACTION_REPREP;
1035 } else if (sshdr.asc == 0x10) /* DIX */ {
1036 description = "Host Data Integrity Failure";
1037 action = ACTION_FAIL;
1038 error = -EILSEQ;
1007 } else 1039 } else
1008 action = ACTION_FAIL; 1040 action = ACTION_FAIL;
1009 break; 1041 break;
1010 case ABORTED_COMMAND: 1042 case ABORTED_COMMAND:
1011 if (sshdr.asc == 0x10) { /* DIF */ 1043 if (sshdr.asc == 0x10) { /* DIF */
1044 description = "Target Data Integrity Failure";
1012 action = ACTION_FAIL; 1045 action = ACTION_FAIL;
1013 description = "Data Integrity Failure"; 1046 error = -EILSEQ;
1014 } else 1047 } else
1015 action = ACTION_RETRY; 1048 action = ACTION_RETRY;
1016 break; 1049 break;
@@ -1029,6 +1062,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1029 case 0x09: /* self test in progress */ 1062 case 0x09: /* self test in progress */
1030 action = ACTION_DELAYED_RETRY; 1063 action = ACTION_DELAYED_RETRY;
1031 break; 1064 break;
1065 default:
1066 description = "Device not ready";
1067 action = ACTION_FAIL;
1068 break;
1032 } 1069 }
1033 } else { 1070 } else {
1034 description = "Device not ready"; 1071 description = "Device not ready";
@@ -1052,9 +1089,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1052 switch (action) { 1089 switch (action) {
1053 case ACTION_FAIL: 1090 case ACTION_FAIL:
1054 /* Give up and fail the remainder of the request */ 1091 /* Give up and fail the remainder of the request */
1092 scsi_release_buffers(cmd);
1055 if (!(req->cmd_flags & REQ_QUIET)) { 1093 if (!(req->cmd_flags & REQ_QUIET)) {
1056 if (description) 1094 if (description)
1057 scmd_printk(KERN_INFO, cmd, "%s", 1095 scmd_printk(KERN_INFO, cmd, "%s\n",
1058 description); 1096 description);
1059 scsi_print_result(cmd); 1097 scsi_print_result(cmd);
1060 if (driver_byte(result) & DRIVER_SENSE) 1098 if (driver_byte(result) & DRIVER_SENSE)
@@ -1067,15 +1105,16 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1067 /* Unprep the request and put it back at the head of the queue. 1105 /* Unprep the request and put it back at the head of the queue.
1068 * A new command will be prepared and issued. 1106 * A new command will be prepared and issued.
1069 */ 1107 */
1108 scsi_release_buffers(cmd);
1070 scsi_requeue_command(q, cmd); 1109 scsi_requeue_command(q, cmd);
1071 break; 1110 break;
1072 case ACTION_RETRY: 1111 case ACTION_RETRY:
1073 /* Retry the same command immediately */ 1112 /* Retry the same command immediately */
1074 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 1113 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
1075 break; 1114 break;
1076 case ACTION_DELAYED_RETRY: 1115 case ACTION_DELAYED_RETRY:
1077 /* Retry the same command after a delay */ 1116 /* Retry the same command after a delay */
1078 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1117 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
1079 break; 1118 break;
1080 } 1119 }
1081} 1120}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 17914a346f71..66505bb79410 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -414,8 +414,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
414 device_initialize(dev); 414 device_initialize(dev);
415 starget->reap_ref = 1; 415 starget->reap_ref = 1;
416 dev->parent = get_device(parent); 416 dev->parent = get_device(parent);
417 sprintf(dev->bus_id, "target%d:%d:%d", 417 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
418 shost->host_no, channel, id);
419#ifndef CONFIG_SYSFS_DEPRECATED 418#ifndef CONFIG_SYSFS_DEPRECATED
420 dev->bus = &scsi_bus_type; 419 dev->bus = &scsi_bus_type;
421#endif 420#endif
@@ -1024,7 +1023,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
1024 if (rescan || !scsi_device_created(sdev)) { 1023 if (rescan || !scsi_device_created(sdev)) {
1025 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO 1024 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
1026 "scsi scan: device exists on %s\n", 1025 "scsi scan: device exists on %s\n",
1027 sdev->sdev_gendev.bus_id)); 1026 dev_name(&sdev->sdev_gendev)));
1028 if (sdevp) 1027 if (sdevp)
1029 *sdevp = sdev; 1028 *sdevp = sdev;
1030 else 1029 else
@@ -1163,7 +1162,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
1163 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1162 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1164 1163
1165 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: Sequential scan of" 1164 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: Sequential scan of"
1166 "%s\n", starget->dev.bus_id)); 1165 "%s\n", dev_name(&starget->dev)));
1167 1166
1168 max_dev_lun = min(max_scsi_luns, shost->max_lun); 1167 max_dev_lun = min(max_scsi_luns, shost->max_lun);
1169 /* 1168 /*
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 93c28f30bbd7..da63802cbf9d 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1079,16 +1079,14 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
1079 device_initialize(&sdev->sdev_gendev); 1079 device_initialize(&sdev->sdev_gendev);
1080 sdev->sdev_gendev.bus = &scsi_bus_type; 1080 sdev->sdev_gendev.bus = &scsi_bus_type;
1081 sdev->sdev_gendev.type = &scsi_dev_type; 1081 sdev->sdev_gendev.type = &scsi_dev_type;
1082 sprintf(sdev->sdev_gendev.bus_id,"%d:%d:%d:%d", 1082 dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%d",
1083 sdev->host->host_no, sdev->channel, sdev->id, 1083 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1084 sdev->lun); 1084
1085
1086 device_initialize(&sdev->sdev_dev); 1085 device_initialize(&sdev->sdev_dev);
1087 sdev->sdev_dev.parent = &sdev->sdev_gendev; 1086 sdev->sdev_dev.parent = &sdev->sdev_gendev;
1088 sdev->sdev_dev.class = &sdev_class; 1087 sdev->sdev_dev.class = &sdev_class;
1089 snprintf(sdev->sdev_dev.bus_id, BUS_ID_SIZE, 1088 dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%d",
1090 "%d:%d:%d:%d", sdev->host->host_no, 1089 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1091 sdev->channel, sdev->id, sdev->lun);
1092 sdev->scsi_level = starget->scsi_level; 1090 sdev->scsi_level = starget->scsi_level;
1093 transport_setup_device(&sdev->sdev_gendev); 1091 transport_setup_device(&sdev->sdev_gendev);
1094 spin_lock_irqsave(shost->host_lock, flags); 1092 spin_lock_irqsave(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 062304de4854..5f77417ed585 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2407,8 +2407,12 @@ fc_rport_final_delete(struct work_struct *work)
2407 /* 2407 /*
2408 * Notify the driver that the rport is now dead. The LLDD will 2408 * Notify the driver that the rport is now dead. The LLDD will
2409 * also guarantee that any communication to the rport is terminated 2409 * also guarantee that any communication to the rport is terminated
2410 *
2411 * Avoid this call if we already called it when we preserved the
2412 * rport for the binding.
2410 */ 2413 */
2411 if (i->f->dev_loss_tmo_callbk) 2414 if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) &&
2415 (i->f->dev_loss_tmo_callbk))
2412 i->f->dev_loss_tmo_callbk(rport); 2416 i->f->dev_loss_tmo_callbk(rport);
2413 2417
2414 transport_remove_device(dev); 2418 transport_remove_device(dev);
@@ -2486,8 +2490,8 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
2486 device_initialize(dev); /* takes self reference */ 2490 device_initialize(dev); /* takes self reference */
2487 dev->parent = get_device(&shost->shost_gendev); /* parent reference */ 2491 dev->parent = get_device(&shost->shost_gendev); /* parent reference */
2488 dev->release = fc_rport_dev_release; 2492 dev->release = fc_rport_dev_release;
2489 sprintf(dev->bus_id, "rport-%d:%d-%d", 2493 dev_set_name(dev, "rport-%d:%d-%d",
2490 shost->host_no, channel, rport->number); 2494 shost->host_no, channel, rport->number);
2491 transport_setup_device(dev); 2495 transport_setup_device(dev);
2492 2496
2493 error = device_add(dev); 2497 error = device_add(dev);
@@ -2647,7 +2651,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2647 spin_lock_irqsave(shost->host_lock, flags); 2651 spin_lock_irqsave(shost->host_lock, flags);
2648 2652
2649 rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT | 2653 rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
2650 FC_RPORT_DEVLOSS_PENDING); 2654 FC_RPORT_DEVLOSS_PENDING |
2655 FC_RPORT_DEVLOSS_CALLBK_DONE);
2651 2656
2652 /* if target, initiate a scan */ 2657 /* if target, initiate a scan */
2653 if (rport->scsi_target_id != -1) { 2658 if (rport->scsi_target_id != -1) {
@@ -2944,6 +2949,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
2944 struct fc_rport *rport = 2949 struct fc_rport *rport =
2945 container_of(work, struct fc_rport, dev_loss_work.work); 2950 container_of(work, struct fc_rport, dev_loss_work.work);
2946 struct Scsi_Host *shost = rport_to_shost(rport); 2951 struct Scsi_Host *shost = rport_to_shost(rport);
2952 struct fc_internal *i = to_fc_internal(shost->transportt);
2947 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 2953 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2948 unsigned long flags; 2954 unsigned long flags;
2949 2955
@@ -3011,6 +3017,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
3011 rport->roles = FC_PORT_ROLE_UNKNOWN; 3017 rport->roles = FC_PORT_ROLE_UNKNOWN;
3012 rport->port_state = FC_PORTSTATE_NOTPRESENT; 3018 rport->port_state = FC_PORTSTATE_NOTPRESENT;
3013 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; 3019 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
3020 rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
3014 3021
3015 /* 3022 /*
3016 * Pre-emptively kill I/O rather than waiting for the work queue 3023 * Pre-emptively kill I/O rather than waiting for the work queue
@@ -3046,8 +3053,18 @@ fc_timeout_deleted_rport(struct work_struct *work)
3046 * all attached scsi devices. 3053 * all attached scsi devices.
3047 */ 3054 */
3048 fc_queue_work(shost, &rport->stgt_delete_work); 3055 fc_queue_work(shost, &rport->stgt_delete_work);
3056
3057 /*
3058 * Notify the driver that the rport is now dead. The LLDD will
3059 * also guarantee that any communication to the rport is terminated
3060 *
3061 * Note: we set the CALLBK_DONE flag above to correspond
3062 */
3063 if (i->f->dev_loss_tmo_callbk)
3064 i->f->dev_loss_tmo_callbk(rport);
3049} 3065}
3050 3066
3067
3051/** 3068/**
3052 * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a disconnected SCSI target. 3069 * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a disconnected SCSI target.
3053 * @work: rport to terminate io on. 3070 * @work: rport to terminate io on.
@@ -3164,8 +3181,8 @@ fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
3164 device_initialize(dev); /* takes self reference */ 3181 device_initialize(dev); /* takes self reference */
3165 dev->parent = get_device(pdev); /* takes parent reference */ 3182 dev->parent = get_device(pdev); /* takes parent reference */
3166 dev->release = fc_vport_dev_release; 3183 dev->release = fc_vport_dev_release;
3167 sprintf(dev->bus_id, "vport-%d:%d-%d", 3184 dev_set_name(dev, "vport-%d:%d-%d",
3168 shost->host_no, channel, vport->number); 3185 shost->host_no, channel, vport->number);
3169 transport_setup_device(dev); 3186 transport_setup_device(dev);
3170 3187
3171 error = device_add(dev); 3188 error = device_add(dev);
@@ -3188,19 +3205,19 @@ fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
3188 */ 3205 */
3189 if (pdev != &shost->shost_gendev) { 3206 if (pdev != &shost->shost_gendev) {
3190 error = sysfs_create_link(&shost->shost_gendev.kobj, 3207 error = sysfs_create_link(&shost->shost_gendev.kobj,
3191 &dev->kobj, dev->bus_id); 3208 &dev->kobj, dev_name(dev));
3192 if (error) 3209 if (error)
3193 printk(KERN_ERR 3210 printk(KERN_ERR
3194 "%s: Cannot create vport symlinks for " 3211 "%s: Cannot create vport symlinks for "
3195 "%s, err=%d\n", 3212 "%s, err=%d\n",
3196 __func__, dev->bus_id, error); 3213 __func__, dev_name(dev), error);
3197 } 3214 }
3198 spin_lock_irqsave(shost->host_lock, flags); 3215 spin_lock_irqsave(shost->host_lock, flags);
3199 vport->flags &= ~FC_VPORT_CREATING; 3216 vport->flags &= ~FC_VPORT_CREATING;
3200 spin_unlock_irqrestore(shost->host_lock, flags); 3217 spin_unlock_irqrestore(shost->host_lock, flags);
3201 3218
3202 dev_printk(KERN_NOTICE, pdev, 3219 dev_printk(KERN_NOTICE, pdev,
3203 "%s created via shost%d channel %d\n", dev->bus_id, 3220 "%s created via shost%d channel %d\n", dev_name(dev),
3204 shost->host_no, channel); 3221 shost->host_no, channel);
3205 3222
3206 *ret_vport = vport; 3223 *ret_vport = vport;
@@ -3297,7 +3314,7 @@ fc_vport_terminate(struct fc_vport *vport)
3297 return stat; 3314 return stat;
3298 3315
3299 if (dev->parent != &shost->shost_gendev) 3316 if (dev->parent != &shost->shost_gendev)
3300 sysfs_remove_link(&shost->shost_gendev.kobj, dev->bus_id); 3317 sysfs_remove_link(&shost->shost_gendev.kobj, dev_name(dev));
3301 transport_remove_device(dev); 3318 transport_remove_device(dev);
3302 device_del(dev); 3319 device_del(dev);
3303 transport_destroy_device(dev); 3320 transport_destroy_device(dev);
@@ -3329,7 +3346,7 @@ fc_vport_sched_delete(struct work_struct *work)
3329 dev_printk(KERN_ERR, vport->dev.parent, 3346 dev_printk(KERN_ERR, vport->dev.parent,
3330 "%s: %s could not be deleted created via " 3347 "%s: %s could not be deleted created via "
3331 "shost%d channel %d - error %d\n", __func__, 3348 "shost%d channel %d - error %d\n", __func__,
3332 vport->dev.bus_id, vport->shost->host_no, 3349 dev_name(&vport->dev), vport->shost->host_no,
3333 vport->channel, stat); 3350 vport->channel, stat);
3334} 3351}
3335 3352
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 4a803ebaf508..75c9297694cb 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -187,8 +187,7 @@ iscsi_create_endpoint(int dd_size)
187 187
188 ep->id = id; 188 ep->id = id;
189 ep->dev.class = &iscsi_endpoint_class; 189 ep->dev.class = &iscsi_endpoint_class;
190 snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%llu", 190 dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id);
191 (unsigned long long) id);
192 err = device_register(&ep->dev); 191 err = device_register(&ep->dev);
193 if (err) 192 if (err)
194 goto free_ep; 193 goto free_ep;
@@ -724,8 +723,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
724 } 723 }
725 session->target_id = id; 724 session->target_id = id;
726 725
727 snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u", 726 dev_set_name(&session->dev, "session%u", session->sid);
728 session->sid);
729 err = device_add(&session->dev); 727 err = device_add(&session->dev);
730 if (err) { 728 if (err) {
731 iscsi_cls_session_printk(KERN_ERR, session, 729 iscsi_cls_session_printk(KERN_ERR, session,
@@ -898,8 +896,7 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
898 if (!get_device(&session->dev)) 896 if (!get_device(&session->dev))
899 goto free_conn; 897 goto free_conn;
900 898
901 snprintf(conn->dev.bus_id, BUS_ID_SIZE, "connection%d:%u", 899 dev_set_name(&conn->dev, "connection%d:%u", session->sid, cid);
902 session->sid, cid);
903 conn->dev.parent = &session->dev; 900 conn->dev.parent = &session->dev;
904 conn->dev.release = iscsi_conn_release; 901 conn->dev.release = iscsi_conn_release;
905 err = device_register(&conn->dev); 902 err = device_register(&conn->dev);
@@ -1816,7 +1813,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
1816 priv->t.create_work_queue = 1; 1813 priv->t.create_work_queue = 1;
1817 1814
1818 priv->dev.class = &iscsi_transport_class; 1815 priv->dev.class = &iscsi_transport_class;
1819 snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name); 1816 dev_set_name(&priv->dev, "%s", tt->name);
1820 err = device_register(&priv->dev); 1817 err = device_register(&priv->dev);
1821 if (err) 1818 if (err)
1822 goto free_priv; 1819 goto free_priv;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 366609386be1..50988cbf7b2d 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -207,7 +207,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
207 struct request_queue *q; 207 struct request_queue *q;
208 int error; 208 int error;
209 struct device *dev; 209 struct device *dev;
210 char namebuf[BUS_ID_SIZE]; 210 char namebuf[20];
211 const char *name; 211 const char *name;
212 void (*release)(struct device *); 212 void (*release)(struct device *);
213 213
@@ -219,7 +219,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
219 if (rphy) { 219 if (rphy) {
220 q = blk_init_queue(sas_non_host_smp_request, NULL); 220 q = blk_init_queue(sas_non_host_smp_request, NULL);
221 dev = &rphy->dev; 221 dev = &rphy->dev;
222 name = dev->bus_id; 222 name = dev_name(dev);
223 release = NULL; 223 release = NULL;
224 } else { 224 } else {
225 q = blk_init_queue(sas_host_smp_request, NULL); 225 q = blk_init_queue(sas_host_smp_request, NULL);
@@ -629,10 +629,10 @@ struct sas_phy *sas_phy_alloc(struct device *parent, int number)
629 INIT_LIST_HEAD(&phy->port_siblings); 629 INIT_LIST_HEAD(&phy->port_siblings);
630 if (scsi_is_sas_expander_device(parent)) { 630 if (scsi_is_sas_expander_device(parent)) {
631 struct sas_rphy *rphy = dev_to_rphy(parent); 631 struct sas_rphy *rphy = dev_to_rphy(parent);
632 sprintf(phy->dev.bus_id, "phy-%d:%d:%d", shost->host_no, 632 dev_set_name(&phy->dev, "phy-%d:%d:%d", shost->host_no,
633 rphy->scsi_target_id, number); 633 rphy->scsi_target_id, number);
634 } else 634 } else
635 sprintf(phy->dev.bus_id, "phy-%d:%d", shost->host_no, number); 635 dev_set_name(&phy->dev, "phy-%d:%d", shost->host_no, number);
636 636
637 transport_setup_device(&phy->dev); 637 transport_setup_device(&phy->dev);
638 638
@@ -770,7 +770,7 @@ static void sas_port_create_link(struct sas_port *port,
770 int res; 770 int res;
771 771
772 res = sysfs_create_link(&port->dev.kobj, &phy->dev.kobj, 772 res = sysfs_create_link(&port->dev.kobj, &phy->dev.kobj,
773 phy->dev.bus_id); 773 dev_name(&phy->dev));
774 if (res) 774 if (res)
775 goto err; 775 goto err;
776 res = sysfs_create_link(&phy->dev.kobj, &port->dev.kobj, "port"); 776 res = sysfs_create_link(&phy->dev.kobj, &port->dev.kobj, "port");
@@ -785,7 +785,7 @@ err:
785static void sas_port_delete_link(struct sas_port *port, 785static void sas_port_delete_link(struct sas_port *port,
786 struct sas_phy *phy) 786 struct sas_phy *phy)
787{ 787{
788 sysfs_remove_link(&port->dev.kobj, phy->dev.bus_id); 788 sysfs_remove_link(&port->dev.kobj, dev_name(&phy->dev));
789 sysfs_remove_link(&phy->dev.kobj, "port"); 789 sysfs_remove_link(&phy->dev.kobj, "port");
790} 790}
791 791
@@ -821,11 +821,11 @@ struct sas_port *sas_port_alloc(struct device *parent, int port_id)
821 821
822 if (scsi_is_sas_expander_device(parent)) { 822 if (scsi_is_sas_expander_device(parent)) {
823 struct sas_rphy *rphy = dev_to_rphy(parent); 823 struct sas_rphy *rphy = dev_to_rphy(parent);
824 sprintf(port->dev.bus_id, "port-%d:%d:%d", shost->host_no, 824 dev_set_name(&port->dev, "port-%d:%d:%d", shost->host_no,
825 rphy->scsi_target_id, port->port_identifier); 825 rphy->scsi_target_id, port->port_identifier);
826 } else 826 } else
827 sprintf(port->dev.bus_id, "port-%d:%d", shost->host_no, 827 dev_set_name(&port->dev, "port-%d:%d", shost->host_no,
828 port->port_identifier); 828 port->port_identifier);
829 829
830 transport_setup_device(&port->dev); 830 transport_setup_device(&port->dev);
831 831
@@ -935,7 +935,7 @@ void sas_port_delete(struct sas_port *port)
935 if (port->is_backlink) { 935 if (port->is_backlink) {
936 struct device *parent = port->dev.parent; 936 struct device *parent = port->dev.parent;
937 937
938 sysfs_remove_link(&port->dev.kobj, parent->bus_id); 938 sysfs_remove_link(&port->dev.kobj, dev_name(parent));
939 port->is_backlink = 0; 939 port->is_backlink = 0;
940 } 940 }
941 941
@@ -984,7 +984,8 @@ void sas_port_add_phy(struct sas_port *port, struct sas_phy *phy)
984 /* If this trips, you added a phy that was already 984 /* If this trips, you added a phy that was already
985 * part of a different port */ 985 * part of a different port */
986 if (unlikely(tmp != phy)) { 986 if (unlikely(tmp != phy)) {
987 dev_printk(KERN_ERR, &port->dev, "trying to add phy %s fails: it's already part of another port\n", phy->dev.bus_id); 987 dev_printk(KERN_ERR, &port->dev, "trying to add phy %s fails: it's already part of another port\n",
988 dev_name(&phy->dev));
988 BUG(); 989 BUG();
989 } 990 }
990 } else { 991 } else {
@@ -1023,7 +1024,7 @@ void sas_port_mark_backlink(struct sas_port *port)
1023 return; 1024 return;
1024 port->is_backlink = 1; 1025 port->is_backlink = 1;
1025 res = sysfs_create_link(&port->dev.kobj, &parent->kobj, 1026 res = sysfs_create_link(&port->dev.kobj, &parent->kobj,
1026 parent->bus_id); 1027 dev_name(parent));
1027 if (res) 1028 if (res)
1028 goto err; 1029 goto err;
1029 return; 1030 return;
@@ -1367,11 +1368,12 @@ struct sas_rphy *sas_end_device_alloc(struct sas_port *parent)
1367 rdev->rphy.dev.release = sas_end_device_release; 1368 rdev->rphy.dev.release = sas_end_device_release;
1368 if (scsi_is_sas_expander_device(parent->dev.parent)) { 1369 if (scsi_is_sas_expander_device(parent->dev.parent)) {
1369 struct sas_rphy *rphy = dev_to_rphy(parent->dev.parent); 1370 struct sas_rphy *rphy = dev_to_rphy(parent->dev.parent);
1370 sprintf(rdev->rphy.dev.bus_id, "end_device-%d:%d:%d", 1371 dev_set_name(&rdev->rphy.dev, "end_device-%d:%d:%d",
1371 shost->host_no, rphy->scsi_target_id, parent->port_identifier); 1372 shost->host_no, rphy->scsi_target_id,
1373 parent->port_identifier);
1372 } else 1374 } else
1373 sprintf(rdev->rphy.dev.bus_id, "end_device-%d:%d", 1375 dev_set_name(&rdev->rphy.dev, "end_device-%d:%d",
1374 shost->host_no, parent->port_identifier); 1376 shost->host_no, parent->port_identifier);
1375 rdev->rphy.identify.device_type = SAS_END_DEVICE; 1377 rdev->rphy.identify.device_type = SAS_END_DEVICE;
1376 sas_rphy_initialize(&rdev->rphy); 1378 sas_rphy_initialize(&rdev->rphy);
1377 transport_setup_device(&rdev->rphy.dev); 1379 transport_setup_device(&rdev->rphy.dev);
@@ -1411,8 +1413,8 @@ struct sas_rphy *sas_expander_alloc(struct sas_port *parent,
1411 mutex_lock(&sas_host->lock); 1413 mutex_lock(&sas_host->lock);
1412 rdev->rphy.scsi_target_id = sas_host->next_expander_id++; 1414 rdev->rphy.scsi_target_id = sas_host->next_expander_id++;
1413 mutex_unlock(&sas_host->lock); 1415 mutex_unlock(&sas_host->lock);
1414 sprintf(rdev->rphy.dev.bus_id, "expander-%d:%d", 1416 dev_set_name(&rdev->rphy.dev, "expander-%d:%d",
1415 shost->host_no, rdev->rphy.scsi_target_id); 1417 shost->host_no, rdev->rphy.scsi_target_id);
1416 rdev->rphy.identify.device_type = type; 1418 rdev->rphy.identify.device_type = type;
1417 sas_rphy_initialize(&rdev->rphy); 1419 sas_rphy_initialize(&rdev->rphy);
1418 transport_setup_device(&rdev->rphy.dev); 1420 transport_setup_device(&rdev->rphy.dev);
@@ -1445,7 +1447,7 @@ int sas_rphy_add(struct sas_rphy *rphy)
1445 transport_add_device(&rphy->dev); 1447 transport_add_device(&rphy->dev);
1446 transport_configure_device(&rphy->dev); 1448 transport_configure_device(&rphy->dev);
1447 if (sas_bsg_initialize(shost, rphy)) 1449 if (sas_bsg_initialize(shost, rphy))
1448 printk("fail to a bsg device %s\n", rphy->dev.bus_id); 1450 printk("fail to a bsg device %s\n", dev_name(&rphy->dev));
1449 1451
1450 1452
1451 mutex_lock(&sas_host->lock); 1453 mutex_lock(&sas_host->lock);
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 8a7af951d98a..21a045e0559f 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -212,7 +212,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
212 rport->roles = ids->roles; 212 rport->roles = ids->roles;
213 213
214 id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id); 214 id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
215 sprintf(rport->dev.bus_id, "port-%d:%d", shost->host_no, id); 215 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
216 216
217 transport_setup_device(&rport->dev); 217 transport_setup_device(&rport->dev);
218 218
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e035c1114010..d57566b8be0a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1830,7 +1830,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
1830 device_initialize(&sdkp->dev); 1830 device_initialize(&sdkp->dev);
1831 sdkp->dev.parent = &sdp->sdev_gendev; 1831 sdkp->dev.parent = &sdp->sdev_gendev;
1832 sdkp->dev.class = &sd_disk_class; 1832 sdkp->dev.class = &sd_disk_class;
1833 strncpy(sdkp->dev.bus_id, sdp->sdev_gendev.bus_id, BUS_ID_SIZE); 1833 dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
1834 1834
1835 if (device_add(&sdkp->dev)) 1835 if (device_add(&sdkp->dev))
1836 goto out_free_index; 1836 goto out_free_index;
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 3ebb1f289490..184dff492797 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -142,7 +142,7 @@ static int sd_dif_type1_verify_ip(struct blk_integrity_exchg *bix)
142static void sd_dif_type1_set_tag(void *prot, void *tag_buf, unsigned int sectors) 142static void sd_dif_type1_set_tag(void *prot, void *tag_buf, unsigned int sectors)
143{ 143{
144 struct sd_dif_tuple *sdt = prot; 144 struct sd_dif_tuple *sdt = prot;
145 char *tag = tag_buf; 145 u8 *tag = tag_buf;
146 unsigned int i, j; 146 unsigned int i, j;
147 147
148 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) { 148 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
@@ -154,7 +154,7 @@ static void sd_dif_type1_set_tag(void *prot, void *tag_buf, unsigned int sectors
154static void sd_dif_type1_get_tag(void *prot, void *tag_buf, unsigned int sectors) 154static void sd_dif_type1_get_tag(void *prot, void *tag_buf, unsigned int sectors)
155{ 155{
156 struct sd_dif_tuple *sdt = prot; 156 struct sd_dif_tuple *sdt = prot;
157 char *tag = tag_buf; 157 u8 *tag = tag_buf;
158 unsigned int i, j; 158 unsigned int i, j;
159 159
160 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) { 160 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
@@ -256,7 +256,7 @@ static int sd_dif_type3_verify_ip(struct blk_integrity_exchg *bix)
256static void sd_dif_type3_set_tag(void *prot, void *tag_buf, unsigned int sectors) 256static void sd_dif_type3_set_tag(void *prot, void *tag_buf, unsigned int sectors)
257{ 257{
258 struct sd_dif_tuple *sdt = prot; 258 struct sd_dif_tuple *sdt = prot;
259 char *tag = tag_buf; 259 u8 *tag = tag_buf;
260 unsigned int i, j; 260 unsigned int i, j;
261 261
262 for (i = 0, j = 0 ; i < sectors ; i++, j += 6, sdt++) { 262 for (i = 0, j = 0 ; i < sectors ; i++, j += 6, sdt++) {
@@ -269,7 +269,7 @@ static void sd_dif_type3_set_tag(void *prot, void *tag_buf, unsigned int sectors
269static void sd_dif_type3_get_tag(void *prot, void *tag_buf, unsigned int sectors) 269static void sd_dif_type3_get_tag(void *prot, void *tag_buf, unsigned int sectors)
270{ 270{
271 struct sd_dif_tuple *sdt = prot; 271 struct sd_dif_tuple *sdt = prot;
272 char *tag = tag_buf; 272 u8 *tag = tag_buf;
273 unsigned int i, j; 273 unsigned int i, j;
274 274
275 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) { 275 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
@@ -374,7 +374,10 @@ void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix, unsig
374 else 374 else
375 csum_convert = 0; 375 csum_convert = 0;
376 376
377 BUG_ON(dif && (scmd->cmnd[0] == READ_6 || scmd->cmnd[0] == WRITE_6));
378
377 switch (scmd->cmnd[0]) { 379 switch (scmd->cmnd[0]) {
380 case READ_6:
378 case READ_10: 381 case READ_10:
379 case READ_12: 382 case READ_12:
380 case READ_16: 383 case READ_16:
@@ -390,6 +393,7 @@ void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix, unsig
390 393
391 break; 394 break;
392 395
396 case WRITE_6:
393 case WRITE_10: 397 case WRITE_10:
394 case WRITE_12: 398 case WRITE_12:
395 case WRITE_16: 399 case WRITE_16:
@@ -475,8 +479,9 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
475 479
476error: 480error:
477 kunmap_atomic(sdt, KM_USER0); 481 kunmap_atomic(sdt, KM_USER0);
478 sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u\n", 482 sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u, app %4x\n",
479 __func__, virt, phys, be32_to_cpu(sdt->ref_tag)); 483 __func__, virt, phys, be32_to_cpu(sdt->ref_tag),
484 be16_to_cpu(sdt->app_tag));
480 485
481 return -EIO; 486 return -EIO;
482} 487}
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 7f0df29f3a64..e946e05db7f7 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -526,7 +526,7 @@ static int ses_intf_add(struct device *cdev,
526 if (!scomp) 526 if (!scomp)
527 goto err_free; 527 goto err_free;
528 528
529 edev = enclosure_register(cdev->parent, sdev->sdev_gendev.bus_id, 529 edev = enclosure_register(cdev->parent, dev_name(&sdev->sdev_gendev),
530 components, &ses_enclosure_callbacks); 530 components, &ses_enclosure_callbacks);
531 if (IS_ERR(edev)) { 531 if (IS_ERR(edev)) {
532 err = PTR_ERR(edev); 532 err = PTR_ERR(edev);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 5103855242ae..8f0bd3f7a59f 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1669,6 +1669,8 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
1669 md->pages = req_schp->pages; 1669 md->pages = req_schp->pages;
1670 md->page_order = req_schp->page_order; 1670 md->page_order = req_schp->page_order;
1671 md->nr_entries = req_schp->k_use_sg; 1671 md->nr_entries = req_schp->k_use_sg;
1672 md->offset = 0;
1673 md->null_mapped = hp->dxferp ? 0 : 1;
1672 } 1674 }
1673 1675
1674 if (iov_count) 1676 if (iov_count)
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 31fe6051c799..0807b260268b 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -297,7 +297,7 @@ out:
297 return err; 297 return err;
298} 298}
299 299
300static void __exit sgiwd93_remove(struct platform_device *pdev) 300static int __exit sgiwd93_remove(struct platform_device *pdev)
301{ 301{
302 struct Scsi_Host *host = platform_get_drvdata(pdev); 302 struct Scsi_Host *host = platform_get_drvdata(pdev);
303 struct ip22_hostdata *hdata = (struct ip22_hostdata *) host->hostdata; 303 struct ip22_hostdata *hdata = (struct ip22_hostdata *) host->hostdata;
@@ -307,6 +307,7 @@ static void __exit sgiwd93_remove(struct platform_device *pdev)
307 free_irq(pd->irq, host); 307 free_irq(pd->irq, host);
308 dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma); 308 dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma);
309 scsi_host_put(host); 309 scsi_host_put(host);
310 return 0;
310} 311}
311 312
312static struct platform_driver sgiwd93_driver = { 313static struct platform_driver sgiwd93_driver = {
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index d63d229e2323..6dc8b846c112 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -102,7 +102,7 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
102 struct NCR_700_Host_Parameters *hostdata = 102 struct NCR_700_Host_Parameters *hostdata =
103 kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL); 103 kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
104 104
105 printk(KERN_NOTICE "sim710: %s\n", dev->bus_id); 105 printk(KERN_NOTICE "sim710: %s\n", dev_name(dev));
106 printk(KERN_NOTICE "sim710: irq = %d, clock = %d, base = 0x%lx, scsi_id = %d\n", 106 printk(KERN_NOTICE "sim710: irq = %d, clock = %d, base = 0x%lx, scsi_id = %d\n",
107 irq, clock, base_addr, scsi_id); 107 irq, clock, base_addr, scsi_id);
108 108
@@ -305,7 +305,7 @@ sim710_eisa_probe(struct device *dev)
305 scsi_id = ffs(val) - 1; 305 scsi_id = ffs(val) - 1;
306 306
307 if(scsi_id > 7 || (val & ~(1<<scsi_id)) != 0) { 307 if(scsi_id > 7 || (val & ~(1<<scsi_id)) != 0) {
308 printk(KERN_ERR "sim710.c, EISA card %s has incorrect scsi_id, setting to 7\n", dev->bus_id); 308 printk(KERN_ERR "sim710.c, EISA card %s has incorrect scsi_id, setting to 7\n", dev_name(dev));
309 scsi_id = 7; 309 scsi_id = 7;
310 } 310 }
311 } else { 311 } else {
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 2bbef4c45a0d..77f0b2cdaa94 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -78,8 +78,7 @@ static int __init snirm710_probe(struct platform_device *dev)
78 base = res->start; 78 base = res->start;
79 hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); 79 hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
80 if (!hostdata) { 80 if (!hostdata) {
81 printk(KERN_ERR "%s: Failed to allocate host data\n", 81 dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
82 dev->dev.bus_id);
83 return -ENOMEM; 82 return -ENOMEM;
84 } 83 }
85 84
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 7f3f317ee6ca..c6f19ee8f2cb 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -17,7 +17,7 @@
17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
18 */ 18 */
19 19
20static const char *verstr = "20080504"; 20static const char *verstr = "20081215";
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23 23
@@ -182,18 +182,16 @@ static struct scsi_tape **scsi_tapes = NULL;
182 182
183static int modes_defined; 183static int modes_defined;
184 184
185static struct st_buffer *new_tape_buffer(int, int, int);
186static int enlarge_buffer(struct st_buffer *, int, int); 185static int enlarge_buffer(struct st_buffer *, int, int);
187static void clear_buffer(struct st_buffer *); 186static void clear_buffer(struct st_buffer *);
188static void normalize_buffer(struct st_buffer *); 187static void normalize_buffer(struct st_buffer *);
189static int append_to_buffer(const char __user *, struct st_buffer *, int); 188static int append_to_buffer(const char __user *, struct st_buffer *, int);
190static int from_buffer(struct st_buffer *, char __user *, int); 189static int from_buffer(struct st_buffer *, char __user *, int);
191static void move_buffer_data(struct st_buffer *, int); 190static void move_buffer_data(struct st_buffer *, int);
192static void buf_to_sg(struct st_buffer *, unsigned int);
193 191
194static int sgl_map_user_pages(struct scatterlist *, const unsigned int, 192static int sgl_map_user_pages(struct st_buffer *, const unsigned int,
195 unsigned long, size_t, int); 193 unsigned long, size_t, int);
196static int sgl_unmap_user_pages(struct scatterlist *, const unsigned int, int); 194static int sgl_unmap_user_pages(struct st_buffer *, const unsigned int, int);
197 195
198static int st_probe(struct device *); 196static int st_probe(struct device *);
199static int st_remove(struct device *); 197static int st_remove(struct device *);
@@ -435,22 +433,6 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
435 return (-EIO); 433 return (-EIO);
436} 434}
437 435
438
439/* Wakeup from interrupt */
440static void st_sleep_done(void *data, char *sense, int result, int resid)
441{
442 struct st_request *SRpnt = data;
443 struct scsi_tape *STp = SRpnt->stp;
444
445 memcpy(SRpnt->sense, sense, SCSI_SENSE_BUFFERSIZE);
446 (STp->buffer)->cmdstat.midlevel_result = SRpnt->result = result;
447 (STp->buffer)->cmdstat.residual = resid;
448 DEB( STp->write_pending = 0; )
449
450 if (SRpnt->waiting)
451 complete(SRpnt->waiting);
452}
453
454static struct st_request *st_allocate_request(struct scsi_tape *stp) 436static struct st_request *st_allocate_request(struct scsi_tape *stp)
455{ 437{
456 struct st_request *streq; 438 struct st_request *streq;
@@ -475,6 +457,63 @@ static void st_release_request(struct st_request *streq)
475 kfree(streq); 457 kfree(streq);
476} 458}
477 459
460static void st_scsi_execute_end(struct request *req, int uptodate)
461{
462 struct st_request *SRpnt = req->end_io_data;
463 struct scsi_tape *STp = SRpnt->stp;
464
465 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
466 STp->buffer->cmdstat.residual = req->data_len;
467
468 if (SRpnt->waiting)
469 complete(SRpnt->waiting);
470
471 blk_rq_unmap_user(SRpnt->bio);
472 __blk_put_request(req->q, req);
473}
474
475static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
476 int data_direction, void *buffer, unsigned bufflen,
477 int timeout, int retries)
478{
479 struct request *req;
480 struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
481 int err = 0;
482 int write = (data_direction == DMA_TO_DEVICE);
483
484 req = blk_get_request(SRpnt->stp->device->request_queue, write,
485 GFP_KERNEL);
486 if (!req)
487 return DRIVER_ERROR << 24;
488
489 req->cmd_type = REQ_TYPE_BLOCK_PC;
490 req->cmd_flags |= REQ_QUIET;
491
492 mdata->null_mapped = 1;
493
494 if (bufflen) {
495 err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen,
496 GFP_KERNEL);
497 if (err) {
498 blk_put_request(req);
499 return DRIVER_ERROR << 24;
500 }
501 }
502
503 SRpnt->bio = req->bio;
504 req->cmd_len = COMMAND_SIZE(cmd[0]);
505 memset(req->cmd, 0, BLK_MAX_CDB);
506 memcpy(req->cmd, cmd, req->cmd_len);
507 req->sense = SRpnt->sense;
508 req->sense_len = 0;
509 req->timeout = timeout;
510 req->retries = retries;
511 req->end_io_data = SRpnt;
512
513 blk_execute_rq_nowait(req->q, NULL, req, 1, st_scsi_execute_end);
514 return 0;
515}
516
478/* Do the scsi command. Waits until command performed if do_wait is true. 517/* Do the scsi command. Waits until command performed if do_wait is true.
479 Otherwise write_behind_check() is used to check that the command 518 Otherwise write_behind_check() is used to check that the command
480 has finished. */ 519 has finished. */
@@ -483,6 +522,8 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
483 int bytes, int direction, int timeout, int retries, int do_wait) 522 int bytes, int direction, int timeout, int retries, int do_wait)
484{ 523{
485 struct completion *waiting; 524 struct completion *waiting;
525 struct rq_map_data *mdata = &STp->buffer->map_data;
526 int ret;
486 527
487 /* if async, make sure there's no command outstanding */ 528 /* if async, make sure there's no command outstanding */
488 if (!do_wait && ((STp->buffer)->last_SRpnt)) { 529 if (!do_wait && ((STp->buffer)->last_SRpnt)) {
@@ -510,21 +551,27 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
510 init_completion(waiting); 551 init_completion(waiting);
511 SRpnt->waiting = waiting; 552 SRpnt->waiting = waiting;
512 553
513 if (!STp->buffer->do_dio) 554 if (STp->buffer->do_dio) {
514 buf_to_sg(STp->buffer, bytes); 555 mdata->nr_entries = STp->buffer->sg_segs;
556 mdata->pages = STp->buffer->mapped_pages;
557 } else {
558 mdata->nr_entries =
559 DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
560 STp->buffer->map_data.pages = STp->buffer->reserved_pages;
561 STp->buffer->map_data.offset = 0;
562 }
515 563
516 memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd)); 564 memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
517 STp->buffer->cmdstat.have_sense = 0; 565 STp->buffer->cmdstat.have_sense = 0;
518 STp->buffer->syscall_result = 0; 566 STp->buffer->syscall_result = 0;
519 567
520 if (scsi_execute_async(STp->device, cmd, COMMAND_SIZE(cmd[0]), direction, 568 ret = st_scsi_execute(SRpnt, cmd, direction, NULL, bytes, timeout,
521 &((STp->buffer)->sg[0]), bytes, (STp->buffer)->sg_segs, 569 retries);
522 timeout, retries, SRpnt, st_sleep_done, GFP_KERNEL)) { 570 if (ret) {
523 /* could not allocate the buffer or request was too large */ 571 /* could not allocate the buffer or request was too large */
524 (STp->buffer)->syscall_result = (-EBUSY); 572 (STp->buffer)->syscall_result = (-EBUSY);
525 (STp->buffer)->last_SRpnt = NULL; 573 (STp->buffer)->last_SRpnt = NULL;
526 } 574 } else if (do_wait) {
527 else if (do_wait) {
528 wait_for_completion(waiting); 575 wait_for_completion(waiting);
529 SRpnt->waiting = NULL; 576 SRpnt->waiting = NULL;
530 (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt); 577 (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt);
@@ -533,28 +580,6 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
533 return SRpnt; 580 return SRpnt;
534} 581}
535 582
536static int st_scsi_kern_execute(struct st_request *streq,
537 const unsigned char *cmd, int data_direction,
538 void *buffer, unsigned bufflen, int timeout,
539 int retries)
540{
541 struct scsi_tape *stp = streq->stp;
542 int ret, resid;
543
544 stp->buffer->cmdstat.have_sense = 0;
545 memcpy(streq->cmd, cmd, sizeof(streq->cmd));
546
547 ret = scsi_execute(stp->device, cmd, data_direction, buffer, bufflen,
548 streq->sense, timeout, retries, 0, &resid);
549 if (driver_byte(ret) & DRIVER_ERROR)
550 return -EBUSY;
551
552 stp->buffer->cmdstat.midlevel_result = streq->result = ret;
553 stp->buffer->cmdstat.residual = resid;
554 stp->buffer->syscall_result = st_chk_result(stp, streq);
555
556 return 0;
557}
558 583
559/* Handle the write-behind checking (waits for completion). Returns -ENOSPC if 584/* Handle the write-behind checking (waits for completion). Returns -ENOSPC if
560 write has been correct but EOM early warning reached, -EIO if write ended in 585 write has been correct but EOM early warning reached, -EIO if write ended in
@@ -627,7 +652,6 @@ static int cross_eof(struct scsi_tape * STp, int forward)
627{ 652{
628 struct st_request *SRpnt; 653 struct st_request *SRpnt;
629 unsigned char cmd[MAX_COMMAND_SIZE]; 654 unsigned char cmd[MAX_COMMAND_SIZE];
630 int ret;
631 655
632 cmd[0] = SPACE; 656 cmd[0] = SPACE;
633 cmd[1] = 0x01; /* Space FileMarks */ 657 cmd[1] = 0x01; /* Space FileMarks */
@@ -641,26 +665,20 @@ static int cross_eof(struct scsi_tape * STp, int forward)
641 DEBC(printk(ST_DEB_MSG "%s: Stepping over filemark %s.\n", 665 DEBC(printk(ST_DEB_MSG "%s: Stepping over filemark %s.\n",
642 tape_name(STp), forward ? "forward" : "backward")); 666 tape_name(STp), forward ? "forward" : "backward"));
643 667
644 SRpnt = st_allocate_request(STp); 668 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
669 STp->device->request_queue->rq_timeout,
670 MAX_RETRIES, 1);
645 if (!SRpnt) 671 if (!SRpnt)
646 return STp->buffer->syscall_result; 672 return (STp->buffer)->syscall_result;
647
648 ret = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
649 STp->device->request_queue->rq_timeout,
650 MAX_RETRIES);
651 if (ret)
652 goto out;
653 673
654 ret = STp->buffer->syscall_result; 674 st_release_request(SRpnt);
675 SRpnt = NULL;
655 676
656 if ((STp->buffer)->cmdstat.midlevel_result != 0) 677 if ((STp->buffer)->cmdstat.midlevel_result != 0)
657 printk(KERN_ERR "%s: Stepping over filemark %s failed.\n", 678 printk(KERN_ERR "%s: Stepping over filemark %s failed.\n",
658 tape_name(STp), forward ? "forward" : "backward"); 679 tape_name(STp), forward ? "forward" : "backward");
659 680
660out: 681 return (STp->buffer)->syscall_result;
661 st_release_request(SRpnt);
662
663 return ret;
664} 682}
665 683
666 684
@@ -881,24 +899,21 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
881 int attentions, waits, max_wait, scode; 899 int attentions, waits, max_wait, scode;
882 int retval = CHKRES_READY, new_session = 0; 900 int retval = CHKRES_READY, new_session = 0;
883 unsigned char cmd[MAX_COMMAND_SIZE]; 901 unsigned char cmd[MAX_COMMAND_SIZE];
884 struct st_request *SRpnt; 902 struct st_request *SRpnt = NULL;
885 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; 903 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
886 904
887 SRpnt = st_allocate_request(STp);
888 if (!SRpnt)
889 return STp->buffer->syscall_result;
890
891 max_wait = do_wait ? ST_BLOCK_SECONDS : 0; 905 max_wait = do_wait ? ST_BLOCK_SECONDS : 0;
892 906
893 for (attentions=waits=0; ; ) { 907 for (attentions=waits=0; ; ) {
894 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE); 908 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
895 cmd[0] = TEST_UNIT_READY; 909 cmd[0] = TEST_UNIT_READY;
910 SRpnt = st_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
911 STp->long_timeout, MAX_READY_RETRIES, 1);
896 912
897 retval = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0, 913 if (!SRpnt) {
898 STp->long_timeout, 914 retval = (STp->buffer)->syscall_result;
899 MAX_READY_RETRIES);
900 if (retval)
901 break; 915 break;
916 }
902 917
903 if (cmdstatp->have_sense) { 918 if (cmdstatp->have_sense) {
904 919
@@ -942,8 +957,8 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
942 break; 957 break;
943 } 958 }
944 959
945 st_release_request(SRpnt); 960 if (SRpnt != NULL)
946 961 st_release_request(SRpnt);
947 return retval; 962 return retval;
948} 963}
949 964
@@ -1020,24 +1035,17 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
1020 } 1035 }
1021 } 1036 }
1022 1037
1023 SRpnt = st_allocate_request(STp);
1024 if (!SRpnt) {
1025 retval = STp->buffer->syscall_result;
1026 goto err_out;
1027 }
1028
1029 if (STp->omit_blklims) 1038 if (STp->omit_blklims)
1030 STp->min_block = STp->max_block = (-1); 1039 STp->min_block = STp->max_block = (-1);
1031 else { 1040 else {
1032 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE); 1041 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
1033 cmd[0] = READ_BLOCK_LIMITS; 1042 cmd[0] = READ_BLOCK_LIMITS;
1034 1043
1035 retval = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE, 1044 SRpnt = st_do_scsi(SRpnt, STp, cmd, 6, DMA_FROM_DEVICE,
1036 STp->buffer->b_data, 6, 1045 STp->device->request_queue->rq_timeout,
1037 STp->device->request_queue->rq_timeout, 1046 MAX_READY_RETRIES, 1);
1038 MAX_READY_RETRIES); 1047 if (!SRpnt) {
1039 if (retval) { 1048 retval = (STp->buffer)->syscall_result;
1040 st_release_request(SRpnt);
1041 goto err_out; 1049 goto err_out;
1042 } 1050 }
1043 1051
@@ -1061,12 +1069,11 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
1061 cmd[0] = MODE_SENSE; 1069 cmd[0] = MODE_SENSE;
1062 cmd[4] = 12; 1070 cmd[4] = 12;
1063 1071
1064 retval = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE, 1072 SRpnt = st_do_scsi(SRpnt, STp, cmd, 12, DMA_FROM_DEVICE,
1065 STp->buffer->b_data, 12, 1073 STp->device->request_queue->rq_timeout,
1066 STp->device->request_queue->rq_timeout, 1074 MAX_READY_RETRIES, 1);
1067 MAX_READY_RETRIES); 1075 if (!SRpnt) {
1068 if (retval) { 1076 retval = (STp->buffer)->syscall_result;
1069 st_release_request(SRpnt);
1070 goto err_out; 1077 goto err_out;
1071 } 1078 }
1072 1079
@@ -1296,17 +1303,11 @@ static int st_flush(struct file *filp, fl_owner_t id)
1296 cmd[0] = WRITE_FILEMARKS; 1303 cmd[0] = WRITE_FILEMARKS;
1297 cmd[4] = 1 + STp->two_fm; 1304 cmd[4] = 1 + STp->two_fm;
1298 1305
1299 SRpnt = st_allocate_request(STp); 1306 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
1307 STp->device->request_queue->rq_timeout,
1308 MAX_WRITE_RETRIES, 1);
1300 if (!SRpnt) { 1309 if (!SRpnt) {
1301 result = STp->buffer->syscall_result; 1310 result = (STp->buffer)->syscall_result;
1302 goto out;
1303 }
1304
1305 result = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
1306 STp->device->request_queue->rq_timeout,
1307 MAX_WRITE_RETRIES);
1308 if (result) {
1309 st_release_request(SRpnt);
1310 goto out; 1311 goto out;
1311 } 1312 }
1312 1313
@@ -1471,8 +1472,8 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf,
1471 1472
1472 if (i && ((unsigned long)buf & queue_dma_alignment( 1473 if (i && ((unsigned long)buf & queue_dma_alignment(
1473 STp->device->request_queue)) == 0) { 1474 STp->device->request_queue)) == 0) {
1474 i = sgl_map_user_pages(&(STbp->sg[0]), STbp->use_sg, 1475 i = sgl_map_user_pages(STbp, STbp->use_sg, (unsigned long)buf,
1475 (unsigned long)buf, count, (is_read ? READ : WRITE)); 1476 count, (is_read ? READ : WRITE));
1476 if (i > 0) { 1477 if (i > 0) {
1477 STbp->do_dio = i; 1478 STbp->do_dio = i;
1478 STbp->buffer_bytes = 0; /* can be used as transfer counter */ 1479 STbp->buffer_bytes = 0; /* can be used as transfer counter */
@@ -1480,7 +1481,6 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf,
1480 else 1481 else
1481 STbp->do_dio = 0; /* fall back to buffering with any error */ 1482 STbp->do_dio = 0; /* fall back to buffering with any error */
1482 STbp->sg_segs = STbp->do_dio; 1483 STbp->sg_segs = STbp->do_dio;
1483 STbp->frp_sg_current = 0;
1484 DEB( 1484 DEB(
1485 if (STbp->do_dio) { 1485 if (STbp->do_dio) {
1486 STp->nbr_dio++; 1486 STp->nbr_dio++;
@@ -1526,7 +1526,7 @@ static void release_buffering(struct scsi_tape *STp, int is_read)
1526 1526
1527 STbp = STp->buffer; 1527 STbp = STp->buffer;
1528 if (STbp->do_dio) { 1528 if (STbp->do_dio) {
1529 sgl_unmap_user_pages(&(STbp->sg[0]), STbp->do_dio, is_read); 1529 sgl_unmap_user_pages(STbp, STbp->do_dio, is_read);
1530 STbp->do_dio = 0; 1530 STbp->do_dio = 0;
1531 STbp->sg_segs = 0; 1531 STbp->sg_segs = 0;
1532 } 1532 }
@@ -2372,7 +2372,6 @@ static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
2372{ 2372{
2373 unsigned char cmd[MAX_COMMAND_SIZE]; 2373 unsigned char cmd[MAX_COMMAND_SIZE];
2374 struct st_request *SRpnt; 2374 struct st_request *SRpnt;
2375 int ret;
2376 2375
2377 memset(cmd, 0, MAX_COMMAND_SIZE); 2376 memset(cmd, 0, MAX_COMMAND_SIZE);
2378 cmd[0] = MODE_SENSE; 2377 cmd[0] = MODE_SENSE;
@@ -2381,17 +2380,14 @@ static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
2381 cmd[2] = page; 2380 cmd[2] = page;
2382 cmd[4] = 255; 2381 cmd[4] = 255;
2383 2382
2384 SRpnt = st_allocate_request(STp); 2383 SRpnt = st_do_scsi(NULL, STp, cmd, cmd[4], DMA_FROM_DEVICE,
2385 if (!SRpnt) 2384 STp->device->request_queue->rq_timeout, 0, 1);
2386 return STp->buffer->syscall_result; 2385 if (SRpnt == NULL)
2386 return (STp->buffer)->syscall_result;
2387 2387
2388 ret = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
2389 STp->buffer->b_data, cmd[4],
2390 STp->device->request_queue->rq_timeout,
2391 MAX_RETRIES);
2392 st_release_request(SRpnt); 2388 st_release_request(SRpnt);
2393 2389
2394 return ret ? : STp->buffer->syscall_result; 2390 return STp->buffer->syscall_result;
2395} 2391}
2396 2392
2397 2393
@@ -2399,9 +2395,10 @@ static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
2399 in the buffer is correctly formatted. The long timeout is used if slow is non-zero. */ 2395 in the buffer is correctly formatted. The long timeout is used if slow is non-zero. */
2400static int write_mode_page(struct scsi_tape *STp, int page, int slow) 2396static int write_mode_page(struct scsi_tape *STp, int page, int slow)
2401{ 2397{
2402 int pgo, timeout, ret = 0; 2398 int pgo;
2403 unsigned char cmd[MAX_COMMAND_SIZE]; 2399 unsigned char cmd[MAX_COMMAND_SIZE];
2404 struct st_request *SRpnt; 2400 struct st_request *SRpnt;
2401 int timeout;
2405 2402
2406 memset(cmd, 0, MAX_COMMAND_SIZE); 2403 memset(cmd, 0, MAX_COMMAND_SIZE);
2407 cmd[0] = MODE_SELECT; 2404 cmd[0] = MODE_SELECT;
@@ -2415,21 +2412,16 @@ static int write_mode_page(struct scsi_tape *STp, int page, int slow)
2415 (STp->buffer)->b_data[MH_OFF_DEV_SPECIFIC] &= ~MH_BIT_WP; 2412 (STp->buffer)->b_data[MH_OFF_DEV_SPECIFIC] &= ~MH_BIT_WP;
2416 (STp->buffer)->b_data[pgo + MP_OFF_PAGE_NBR] &= MP_MSK_PAGE_NBR; 2413 (STp->buffer)->b_data[pgo + MP_OFF_PAGE_NBR] &= MP_MSK_PAGE_NBR;
2417 2414
2418 SRpnt = st_allocate_request(STp); 2415 timeout = slow ?
2419 if (!SRpnt) 2416 STp->long_timeout : STp->device->request_queue->rq_timeout;
2420 return ret; 2417 SRpnt = st_do_scsi(NULL, STp, cmd, cmd[4], DMA_TO_DEVICE,
2421 2418 timeout, 0, 1);
2422 timeout = slow ? STp->long_timeout : 2419 if (SRpnt == NULL)
2423 STp->device->request_queue->rq_timeout; 2420 return (STp->buffer)->syscall_result;
2424
2425 ret = st_scsi_kern_execute(SRpnt, cmd, DMA_TO_DEVICE,
2426 STp->buffer->b_data, cmd[4], timeout, 0);
2427 if (!ret)
2428 ret = STp->buffer->syscall_result;
2429 2421
2430 st_release_request(SRpnt); 2422 st_release_request(SRpnt);
2431 2423
2432 return ret; 2424 return STp->buffer->syscall_result;
2433} 2425}
2434 2426
2435 2427
@@ -2547,16 +2539,13 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
2547 printk(ST_DEB_MSG "%s: Loading tape.\n", name); 2539 printk(ST_DEB_MSG "%s: Loading tape.\n", name);
2548 ); 2540 );
2549 2541
2550 SRpnt = st_allocate_request(STp); 2542 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
2543 timeout, MAX_RETRIES, 1);
2551 if (!SRpnt) 2544 if (!SRpnt)
2552 return STp->buffer->syscall_result; 2545 return (STp->buffer)->syscall_result;
2553
2554 retval = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0, timeout,
2555 MAX_RETRIES);
2556 if (retval)
2557 goto out;
2558 2546
2559 retval = (STp->buffer)->syscall_result; 2547 retval = (STp->buffer)->syscall_result;
2548 st_release_request(SRpnt);
2560 2549
2561 if (!retval) { /* SCSI command successful */ 2550 if (!retval) { /* SCSI command successful */
2562 2551
@@ -2575,8 +2564,6 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
2575 STps = &(STp->ps[STp->partition]); 2564 STps = &(STp->ps[STp->partition]);
2576 STps->drv_file = STps->drv_block = (-1); 2565 STps->drv_file = STps->drv_block = (-1);
2577 } 2566 }
2578out:
2579 st_release_request(SRpnt);
2580 2567
2581 return retval; 2568 return retval;
2582} 2569}
@@ -2852,15 +2839,12 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2852 return (-ENOSYS); 2839 return (-ENOSYS);
2853 } 2840 }
2854 2841
2855 SRpnt = st_allocate_request(STp); 2842 SRpnt = st_do_scsi(NULL, STp, cmd, datalen, direction,
2843 timeout, MAX_RETRIES, 1);
2856 if (!SRpnt) 2844 if (!SRpnt)
2857 return (STp->buffer)->syscall_result; 2845 return (STp->buffer)->syscall_result;
2858 2846
2859 ioctl_result = st_scsi_kern_execute(SRpnt, cmd, direction, 2847 ioctl_result = (STp->buffer)->syscall_result;
2860 STp->buffer->b_data, datalen,
2861 timeout, MAX_RETRIES);
2862 if (!ioctl_result)
2863 ioctl_result = (STp->buffer)->syscall_result;
2864 2848
2865 if (!ioctl_result) { /* SCSI command successful */ 2849 if (!ioctl_result) { /* SCSI command successful */
2866 st_release_request(SRpnt); 2850 st_release_request(SRpnt);
@@ -3022,17 +3006,11 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti
3022 if (!logical && !STp->scsi2_logical) 3006 if (!logical && !STp->scsi2_logical)
3023 scmd[1] = 1; 3007 scmd[1] = 1;
3024 } 3008 }
3025 3009 SRpnt = st_do_scsi(NULL, STp, scmd, 20, DMA_FROM_DEVICE,
3026 SRpnt = st_allocate_request(STp); 3010 STp->device->request_queue->rq_timeout,
3011 MAX_READY_RETRIES, 1);
3027 if (!SRpnt) 3012 if (!SRpnt)
3028 return STp->buffer->syscall_result; 3013 return (STp->buffer)->syscall_result;
3029
3030 result = st_scsi_kern_execute(SRpnt, scmd, DMA_FROM_DEVICE,
3031 STp->buffer->b_data, 20,
3032 STp->device->request_queue->rq_timeout,
3033 MAX_READY_RETRIES);
3034 if (result)
3035 goto out;
3036 3014
3037 if ((STp->buffer)->syscall_result != 0 || 3015 if ((STp->buffer)->syscall_result != 0 ||
3038 (STp->device->scsi_level >= SCSI_2 && 3016 (STp->device->scsi_level >= SCSI_2 &&
@@ -3060,7 +3038,6 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti
3060 DEBC(printk(ST_DEB_MSG "%s: Got tape pos. blk %d part %d.\n", name, 3038 DEBC(printk(ST_DEB_MSG "%s: Got tape pos. blk %d part %d.\n", name,
3061 *block, *partition)); 3039 *block, *partition));
3062 } 3040 }
3063out:
3064 st_release_request(SRpnt); 3041 st_release_request(SRpnt);
3065 SRpnt = NULL; 3042 SRpnt = NULL;
3066 3043
@@ -3135,14 +3112,10 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition
3135 timeout = STp->device->request_queue->rq_timeout; 3112 timeout = STp->device->request_queue->rq_timeout;
3136 } 3113 }
3137 3114
3138 SRpnt = st_allocate_request(STp); 3115 SRpnt = st_do_scsi(NULL, STp, scmd, 0, DMA_NONE,
3116 timeout, MAX_READY_RETRIES, 1);
3139 if (!SRpnt) 3117 if (!SRpnt)
3140 return STp->buffer->syscall_result; 3118 return (STp->buffer)->syscall_result;
3141
3142 result = st_scsi_kern_execute(SRpnt, scmd, DMA_NONE, NULL, 0,
3143 timeout, MAX_READY_RETRIES);
3144 if (result)
3145 goto out;
3146 3119
3147 STps->drv_block = STps->drv_file = (-1); 3120 STps->drv_block = STps->drv_file = (-1);
3148 STps->eof = ST_NOEOF; 3121 STps->eof = ST_NOEOF;
@@ -3167,7 +3140,7 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition
3167 STps->drv_block = STps->drv_file = 0; 3140 STps->drv_block = STps->drv_file = 0;
3168 result = 0; 3141 result = 0;
3169 } 3142 }
3170out: 3143
3171 st_release_request(SRpnt); 3144 st_release_request(SRpnt);
3172 SRpnt = NULL; 3145 SRpnt = NULL;
3173 3146
@@ -3696,38 +3669,34 @@ static long st_compat_ioctl(struct file *file, unsigned int cmd, unsigned long a
3696 3669
3697/* Try to allocate a new tape buffer. Calling function must not hold 3670/* Try to allocate a new tape buffer. Calling function must not hold
3698 dev_arr_lock. */ 3671 dev_arr_lock. */
3699static struct st_buffer * 3672static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
3700 new_tape_buffer(int from_initialization, int need_dma, int max_sg)
3701{ 3673{
3702 int i, got = 0;
3703 gfp_t priority;
3704 struct st_buffer *tb; 3674 struct st_buffer *tb;
3705 3675
3706 if (from_initialization) 3676 tb = kzalloc(sizeof(struct st_buffer), GFP_ATOMIC);
3707 priority = GFP_ATOMIC;
3708 else
3709 priority = GFP_KERNEL;
3710
3711 i = sizeof(struct st_buffer) + (max_sg - 1) * sizeof(struct scatterlist) +
3712 max_sg * sizeof(struct st_buf_fragment);
3713 tb = kzalloc(i, priority);
3714 if (!tb) { 3677 if (!tb) {
3715 printk(KERN_NOTICE "st: Can't allocate new tape buffer.\n"); 3678 printk(KERN_NOTICE "st: Can't allocate new tape buffer.\n");
3716 return NULL; 3679 return NULL;
3717 } 3680 }
3718 tb->frp_segs = tb->orig_frp_segs = 0; 3681 tb->frp_segs = 0;
3719 tb->use_sg = max_sg; 3682 tb->use_sg = max_sg;
3720 tb->frp = (struct st_buf_fragment *)(&(tb->sg[0]) + max_sg);
3721
3722 tb->dma = need_dma; 3683 tb->dma = need_dma;
3723 tb->buffer_size = got; 3684 tb->buffer_size = 0;
3724 sg_init_table(tb->sg, max_sg); 3685
3686 tb->reserved_pages = kzalloc(max_sg * sizeof(struct page *),
3687 GFP_ATOMIC);
3688 if (!tb->reserved_pages) {
3689 kfree(tb);
3690 return NULL;
3691 }
3725 3692
3726 return tb; 3693 return tb;
3727} 3694}
3728 3695
3729 3696
3730/* Try to allocate enough space in the tape buffer */ 3697/* Try to allocate enough space in the tape buffer */
3698#define ST_MAX_ORDER 6
3699
3731static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma) 3700static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma)
3732{ 3701{
3733 int segs, nbr, max_segs, b_size, order, got; 3702 int segs, nbr, max_segs, b_size, order, got;
@@ -3747,33 +3716,45 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
3747 priority = GFP_KERNEL | __GFP_NOWARN; 3716 priority = GFP_KERNEL | __GFP_NOWARN;
3748 if (need_dma) 3717 if (need_dma)
3749 priority |= GFP_DMA; 3718 priority |= GFP_DMA;
3750 for (b_size = PAGE_SIZE, order=0; order <= 6 && 3719
3751 b_size < new_size - STbuffer->buffer_size; 3720 if (STbuffer->cleared)
3752 order++, b_size *= 2) 3721 priority |= __GFP_ZERO;
3753 ; /* empty */ 3722
3723 if (STbuffer->frp_segs) {
3724 order = STbuffer->map_data.page_order;
3725 b_size = PAGE_SIZE << order;
3726 } else {
3727 for (b_size = PAGE_SIZE, order = 0;
3728 order < ST_MAX_ORDER && b_size < new_size;
3729 order++, b_size *= 2)
3730 ; /* empty */
3731 }
3732 if (max_segs * (PAGE_SIZE << order) < new_size) {
3733 if (order == ST_MAX_ORDER)
3734 return 0;
3735 normalize_buffer(STbuffer);
3736 return enlarge_buffer(STbuffer, new_size, need_dma);
3737 }
3754 3738
3755 for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size; 3739 for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size;
3756 segs < max_segs && got < new_size;) { 3740 segs < max_segs && got < new_size;) {
3757 STbuffer->frp[segs].page = alloc_pages(priority, order); 3741 struct page *page;
3758 if (STbuffer->frp[segs].page == NULL) { 3742
3759 if (new_size - got <= (max_segs - segs) * b_size / 2) { 3743 page = alloc_pages(priority, order);
3760 b_size /= 2; /* Large enough for the rest of the buffers */ 3744 if (!page) {
3761 order--;
3762 continue;
3763 }
3764 DEB(STbuffer->buffer_size = got); 3745 DEB(STbuffer->buffer_size = got);
3765 normalize_buffer(STbuffer); 3746 normalize_buffer(STbuffer);
3766 return 0; 3747 return 0;
3767 } 3748 }
3768 STbuffer->frp[segs].length = b_size; 3749
3769 STbuffer->frp_segs += 1; 3750 STbuffer->frp_segs += 1;
3770 got += b_size; 3751 got += b_size;
3771 STbuffer->buffer_size = got; 3752 STbuffer->buffer_size = got;
3772 if (STbuffer->cleared) 3753 STbuffer->reserved_pages[segs] = page;
3773 memset(page_address(STbuffer->frp[segs].page), 0, b_size);
3774 segs++; 3754 segs++;
3775 } 3755 }
3776 STbuffer->b_data = page_address(STbuffer->frp[0].page); 3756 STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
3757 STbuffer->map_data.page_order = order;
3777 3758
3778 return 1; 3759 return 1;
3779} 3760}
@@ -3785,7 +3766,8 @@ static void clear_buffer(struct st_buffer * st_bp)
3785 int i; 3766 int i;
3786 3767
3787 for (i=0; i < st_bp->frp_segs; i++) 3768 for (i=0; i < st_bp->frp_segs; i++)
3788 memset(page_address(st_bp->frp[i].page), 0, st_bp->frp[i].length); 3769 memset(page_address(st_bp->reserved_pages[i]), 0,
3770 PAGE_SIZE << st_bp->map_data.page_order);
3789 st_bp->cleared = 1; 3771 st_bp->cleared = 1;
3790} 3772}
3791 3773
@@ -3793,16 +3775,16 @@ static void clear_buffer(struct st_buffer * st_bp)
3793/* Release the extra buffer */ 3775/* Release the extra buffer */
3794static void normalize_buffer(struct st_buffer * STbuffer) 3776static void normalize_buffer(struct st_buffer * STbuffer)
3795{ 3777{
3796 int i, order; 3778 int i, order = STbuffer->map_data.page_order;
3797 3779
3798 for (i = STbuffer->orig_frp_segs; i < STbuffer->frp_segs; i++) { 3780 for (i = 0; i < STbuffer->frp_segs; i++) {
3799 order = get_order(STbuffer->frp[i].length); 3781 __free_pages(STbuffer->reserved_pages[i], order);
3800 __free_pages(STbuffer->frp[i].page, order); 3782 STbuffer->buffer_size -= (PAGE_SIZE << order);
3801 STbuffer->buffer_size -= STbuffer->frp[i].length;
3802 } 3783 }
3803 STbuffer->frp_segs = STbuffer->orig_frp_segs; 3784 STbuffer->frp_segs = 0;
3804 STbuffer->frp_sg_current = 0;
3805 STbuffer->sg_segs = 0; 3785 STbuffer->sg_segs = 0;
3786 STbuffer->map_data.page_order = 0;
3787 STbuffer->map_data.offset = 0;
3806} 3788}
3807 3789
3808 3790
@@ -3811,18 +3793,19 @@ static void normalize_buffer(struct st_buffer * STbuffer)
3811static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count) 3793static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
3812{ 3794{
3813 int i, cnt, res, offset; 3795 int i, cnt, res, offset;
3796 int length = PAGE_SIZE << st_bp->map_data.page_order;
3814 3797
3815 for (i = 0, offset = st_bp->buffer_bytes; 3798 for (i = 0, offset = st_bp->buffer_bytes;
3816 i < st_bp->frp_segs && offset >= st_bp->frp[i].length; i++) 3799 i < st_bp->frp_segs && offset >= length; i++)
3817 offset -= st_bp->frp[i].length; 3800 offset -= length;
3818 if (i == st_bp->frp_segs) { /* Should never happen */ 3801 if (i == st_bp->frp_segs) { /* Should never happen */
3819 printk(KERN_WARNING "st: append_to_buffer offset overflow.\n"); 3802 printk(KERN_WARNING "st: append_to_buffer offset overflow.\n");
3820 return (-EIO); 3803 return (-EIO);
3821 } 3804 }
3822 for (; i < st_bp->frp_segs && do_count > 0; i++) { 3805 for (; i < st_bp->frp_segs && do_count > 0; i++) {
3823 cnt = st_bp->frp[i].length - offset < do_count ? 3806 struct page *page = st_bp->reserved_pages[i];
3824 st_bp->frp[i].length - offset : do_count; 3807 cnt = length - offset < do_count ? length - offset : do_count;
3825 res = copy_from_user(page_address(st_bp->frp[i].page) + offset, ubp, cnt); 3808 res = copy_from_user(page_address(page) + offset, ubp, cnt);
3826 if (res) 3809 if (res)
3827 return (-EFAULT); 3810 return (-EFAULT);
3828 do_count -= cnt; 3811 do_count -= cnt;
@@ -3842,18 +3825,19 @@ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, in
3842static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count) 3825static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
3843{ 3826{
3844 int i, cnt, res, offset; 3827 int i, cnt, res, offset;
3828 int length = PAGE_SIZE << st_bp->map_data.page_order;
3845 3829
3846 for (i = 0, offset = st_bp->read_pointer; 3830 for (i = 0, offset = st_bp->read_pointer;
3847 i < st_bp->frp_segs && offset >= st_bp->frp[i].length; i++) 3831 i < st_bp->frp_segs && offset >= length; i++)
3848 offset -= st_bp->frp[i].length; 3832 offset -= length;
3849 if (i == st_bp->frp_segs) { /* Should never happen */ 3833 if (i == st_bp->frp_segs) { /* Should never happen */
3850 printk(KERN_WARNING "st: from_buffer offset overflow.\n"); 3834 printk(KERN_WARNING "st: from_buffer offset overflow.\n");
3851 return (-EIO); 3835 return (-EIO);
3852 } 3836 }
3853 for (; i < st_bp->frp_segs && do_count > 0; i++) { 3837 for (; i < st_bp->frp_segs && do_count > 0; i++) {
3854 cnt = st_bp->frp[i].length - offset < do_count ? 3838 struct page *page = st_bp->reserved_pages[i];
3855 st_bp->frp[i].length - offset : do_count; 3839 cnt = length - offset < do_count ? length - offset : do_count;
3856 res = copy_to_user(ubp, page_address(st_bp->frp[i].page) + offset, cnt); 3840 res = copy_to_user(ubp, page_address(page) + offset, cnt);
3857 if (res) 3841 if (res)
3858 return (-EFAULT); 3842 return (-EFAULT);
3859 do_count -= cnt; 3843 do_count -= cnt;
@@ -3874,6 +3858,7 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
3874{ 3858{
3875 int src_seg, dst_seg, src_offset = 0, dst_offset; 3859 int src_seg, dst_seg, src_offset = 0, dst_offset;
3876 int count, total; 3860 int count, total;
3861 int length = PAGE_SIZE << st_bp->map_data.page_order;
3877 3862
3878 if (offset == 0) 3863 if (offset == 0)
3879 return; 3864 return;
@@ -3881,24 +3866,26 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
3881 total=st_bp->buffer_bytes - offset; 3866 total=st_bp->buffer_bytes - offset;
3882 for (src_seg=0; src_seg < st_bp->frp_segs; src_seg++) { 3867 for (src_seg=0; src_seg < st_bp->frp_segs; src_seg++) {
3883 src_offset = offset; 3868 src_offset = offset;
3884 if (src_offset < st_bp->frp[src_seg].length) 3869 if (src_offset < length)
3885 break; 3870 break;
3886 offset -= st_bp->frp[src_seg].length; 3871 offset -= length;
3887 } 3872 }
3888 3873
3889 st_bp->buffer_bytes = st_bp->read_pointer = total; 3874 st_bp->buffer_bytes = st_bp->read_pointer = total;
3890 for (dst_seg=dst_offset=0; total > 0; ) { 3875 for (dst_seg=dst_offset=0; total > 0; ) {
3891 count = min(st_bp->frp[dst_seg].length - dst_offset, 3876 struct page *dpage = st_bp->reserved_pages[dst_seg];
3892 st_bp->frp[src_seg].length - src_offset); 3877 struct page *spage = st_bp->reserved_pages[src_seg];
3893 memmove(page_address(st_bp->frp[dst_seg].page) + dst_offset, 3878
3894 page_address(st_bp->frp[src_seg].page) + src_offset, count); 3879 count = min(length - dst_offset, length - src_offset);
3880 memmove(page_address(dpage) + dst_offset,
3881 page_address(spage) + src_offset, count);
3895 src_offset += count; 3882 src_offset += count;
3896 if (src_offset >= st_bp->frp[src_seg].length) { 3883 if (src_offset >= length) {
3897 src_seg++; 3884 src_seg++;
3898 src_offset = 0; 3885 src_offset = 0;
3899 } 3886 }
3900 dst_offset += count; 3887 dst_offset += count;
3901 if (dst_offset >= st_bp->frp[dst_seg].length) { 3888 if (dst_offset >= length) {
3902 dst_seg++; 3889 dst_seg++;
3903 dst_offset = 0; 3890 dst_offset = 0;
3904 } 3891 }
@@ -3906,32 +3893,6 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
3906 } 3893 }
3907} 3894}
3908 3895
3909
3910/* Fill the s/g list up to the length required for this transfer */
3911static void buf_to_sg(struct st_buffer *STbp, unsigned int length)
3912{
3913 int i;
3914 unsigned int count;
3915 struct scatterlist *sg;
3916 struct st_buf_fragment *frp;
3917
3918 if (length == STbp->frp_sg_current)
3919 return; /* work already done */
3920
3921 sg = &(STbp->sg[0]);
3922 frp = STbp->frp;
3923 for (i=count=0; count < length; i++) {
3924 if (length - count > frp[i].length)
3925 sg_set_page(&sg[i], frp[i].page, frp[i].length, 0);
3926 else
3927 sg_set_page(&sg[i], frp[i].page, length - count, 0);
3928 count += sg[i].length;
3929 }
3930 STbp->sg_segs = i;
3931 STbp->frp_sg_current = length;
3932}
3933
3934
3935/* Validate the options from command line or module parameters */ 3896/* Validate the options from command line or module parameters */
3936static void validate_options(void) 3897static void validate_options(void)
3937{ 3898{
@@ -4026,7 +3987,7 @@ static int st_probe(struct device *dev)
4026 SDp->request_queue->max_phys_segments); 3987 SDp->request_queue->max_phys_segments);
4027 if (st_max_sg_segs < i) 3988 if (st_max_sg_segs < i)
4028 i = st_max_sg_segs; 3989 i = st_max_sg_segs;
4029 buffer = new_tape_buffer(1, (SDp->host)->unchecked_isa_dma, i); 3990 buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
4030 if (buffer == NULL) { 3991 if (buffer == NULL) {
4031 printk(KERN_ERR 3992 printk(KERN_ERR
4032 "st: Can't allocate new tape buffer. Device not attached.\n"); 3993 "st: Can't allocate new tape buffer. Device not attached.\n");
@@ -4280,8 +4241,8 @@ static void scsi_tape_release(struct kref *kref)
4280 tpnt->device = NULL; 4241 tpnt->device = NULL;
4281 4242
4282 if (tpnt->buffer) { 4243 if (tpnt->buffer) {
4283 tpnt->buffer->orig_frp_segs = 0;
4284 normalize_buffer(tpnt->buffer); 4244 normalize_buffer(tpnt->buffer);
4245 kfree(tpnt->buffer->reserved_pages);
4285 kfree(tpnt->buffer); 4246 kfree(tpnt->buffer);
4286 } 4247 }
4287 4248
@@ -4567,14 +4528,16 @@ out:
4567} 4528}
4568 4529
4569/* The following functions may be useful for a larger audience. */ 4530/* The following functions may be useful for a larger audience. */
4570static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, 4531static int sgl_map_user_pages(struct st_buffer *STbp,
4571 unsigned long uaddr, size_t count, int rw) 4532 const unsigned int max_pages, unsigned long uaddr,
4533 size_t count, int rw)
4572{ 4534{
4573 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT; 4535 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
4574 unsigned long start = uaddr >> PAGE_SHIFT; 4536 unsigned long start = uaddr >> PAGE_SHIFT;
4575 const int nr_pages = end - start; 4537 const int nr_pages = end - start;
4576 int res, i, j; 4538 int res, i, j;
4577 struct page **pages; 4539 struct page **pages;
4540 struct rq_map_data *mdata = &STbp->map_data;
4578 4541
4579 /* User attempted Overflow! */ 4542 /* User attempted Overflow! */
4580 if ((uaddr + count) < uaddr) 4543 if ((uaddr + count) < uaddr)
@@ -4616,24 +4579,11 @@ static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pa
4616 flush_dcache_page(pages[i]); 4579 flush_dcache_page(pages[i]);
4617 } 4580 }
4618 4581
4619 /* Populate the scatter/gather list */ 4582 mdata->offset = uaddr & ~PAGE_MASK;
4620 sg_set_page(&sgl[0], pages[0], 0, uaddr & ~PAGE_MASK); 4583 mdata->page_order = 0;
4621 if (nr_pages > 1) { 4584 STbp->mapped_pages = pages;
4622 sgl[0].length = PAGE_SIZE - sgl[0].offset;
4623 count -= sgl[0].length;
4624 for (i=1; i < nr_pages ; i++) {
4625 sg_set_page(&sgl[i], pages[i],
4626 count < PAGE_SIZE ? count : PAGE_SIZE, 0);;
4627 count -= PAGE_SIZE;
4628 }
4629 }
4630 else {
4631 sgl[0].length = count;
4632 }
4633 4585
4634 kfree(pages);
4635 return nr_pages; 4586 return nr_pages;
4636
4637 out_unmap: 4587 out_unmap:
4638 if (res > 0) { 4588 if (res > 0) {
4639 for (j=0; j < res; j++) 4589 for (j=0; j < res; j++)
@@ -4646,13 +4596,13 @@ static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pa
4646 4596
4647 4597
4648/* And unmap them... */ 4598/* And unmap them... */
4649static int sgl_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages, 4599static int sgl_unmap_user_pages(struct st_buffer *STbp,
4650 int dirtied) 4600 const unsigned int nr_pages, int dirtied)
4651{ 4601{
4652 int i; 4602 int i;
4653 4603
4654 for (i=0; i < nr_pages; i++) { 4604 for (i=0; i < nr_pages; i++) {
4655 struct page *page = sg_page(&sgl[i]); 4605 struct page *page = STbp->mapped_pages[i];
4656 4606
4657 if (dirtied) 4607 if (dirtied)
4658 SetPageDirty(page); 4608 SetPageDirty(page);
@@ -4661,6 +4611,8 @@ static int sgl_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_p
4661 */ 4611 */
4662 page_cache_release(page); 4612 page_cache_release(page);
4663 } 4613 }
4614 kfree(STbp->mapped_pages);
4615 STbp->mapped_pages = NULL;
4664 4616
4665 return 0; 4617 return 0;
4666} 4618}
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index b92712f95931..544dc6b1f548 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -29,6 +29,7 @@ struct st_request {
29 int result; 29 int result;
30 struct scsi_tape *stp; 30 struct scsi_tape *stp;
31 struct completion *waiting; 31 struct completion *waiting;
32 struct bio *bio;
32}; 33};
33 34
34/* The tape buffer descriptor. */ 35/* The tape buffer descriptor. */
@@ -44,20 +45,13 @@ struct st_buffer {
44 int syscall_result; 45 int syscall_result;
45 struct st_request *last_SRpnt; 46 struct st_request *last_SRpnt;
46 struct st_cmdstatus cmdstat; 47 struct st_cmdstatus cmdstat;
48 struct page **reserved_pages;
49 struct page **mapped_pages;
50 struct rq_map_data map_data;
47 unsigned char *b_data; 51 unsigned char *b_data;
48 unsigned short use_sg; /* zero or max number of s/g segments for this adapter */ 52 unsigned short use_sg; /* zero or max number of s/g segments for this adapter */
49 unsigned short sg_segs; /* number of segments in s/g list */ 53 unsigned short sg_segs; /* number of segments in s/g list */
50 unsigned short orig_frp_segs; /* number of segments allocated at first try */
51 unsigned short frp_segs; /* number of buffer segments */ 54 unsigned short frp_segs; /* number of buffer segments */
52 unsigned int frp_sg_current; /* driver buffer length currently in s/g list */
53 struct st_buf_fragment *frp; /* the allocated buffer fragment list */
54 struct scatterlist sg[1]; /* MUST BE last item */
55};
56
57/* The tape buffer fragment descriptor */
58struct st_buf_fragment {
59 struct page *page;
60 unsigned int length;
61}; 55};
62 56
63/* The tape mode definition */ 57/* The tape mode definition */
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 3c4a300494a4..a8d61a62522e 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -137,8 +137,8 @@ zalon_probe(struct parisc_device *dev)
137 goto fail; 137 goto fail;
138 138
139 if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) { 139 if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) {
140 printk(KERN_ERR "%s: irq problem with %d, detaching\n ", 140 dev_printk(KERN_ERR, dev, "irq problem with %d, detaching\n ",
141 dev->dev.bus_id, dev->irq); 141 dev->irq);
142 goto fail; 142 goto fail;
143 } 143 }
144 144
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index b695ab3142d8..3e525e38a5d9 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -457,7 +457,7 @@ config SERIAL_SAMSUNG
457 457
458config SERIAL_SAMSUNG_UARTS 458config SERIAL_SAMSUNG_UARTS
459 int 459 int
460 depends on SERIAL_SAMSUNG 460 depends on ARM && PLAT_S3C
461 default 2 if ARCH_S3C2400 461 default 2 if ARCH_S3C2400
462 default 4 if ARCH_S3C64XX || CPU_S3C2443 462 default 4 if ARCH_S3C64XX || CPU_S3C2443
463 default 3 463 default 3
@@ -1320,13 +1320,30 @@ config SERIAL_NETX_CONSOLE
1320config SERIAL_OF_PLATFORM 1320config SERIAL_OF_PLATFORM
1321 tristate "Serial port on Open Firmware platform bus" 1321 tristate "Serial port on Open Firmware platform bus"
1322 depends on PPC_OF 1322 depends on PPC_OF
1323 depends on SERIAL_8250 1323 depends on SERIAL_8250 || SERIAL_OF_PLATFORM_NWPSERIAL
1324 help 1324 help
1325 If you have a PowerPC based system that has serial ports 1325 If you have a PowerPC based system that has serial ports
1326 on a platform specific bus, you should enable this option. 1326 on a platform specific bus, you should enable this option.
1327 Currently, only 8250 compatible ports are supported, but 1327 Currently, only 8250 compatible ports are supported, but
1328 others can easily be added. 1328 others can easily be added.
1329 1329
1330config SERIAL_OF_PLATFORM_NWPSERIAL
1331 tristate "NWP serial port driver"
1332 depends on PPC_OF && PPC_DCR
1333 select SERIAL_OF_PLATFORM
1334 select SERIAL_CORE_CONSOLE
1335 select SERIAL_CORE
1336 help
1337 This driver supports the cell network processor nwp serial
1338 device.
1339
1340config SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE
1341 bool "Console on NWP serial port"
1342 depends on SERIAL_OF_PLATFORM_NWPSERIAL=y
1343 select SERIAL_CORE_CONSOLE
1344 help
1345 Support for Console on the NWP serial ports.
1346
1330config SERIAL_QE 1347config SERIAL_QE
1331 tristate "Freescale QUICC Engine serial port support" 1348 tristate "Freescale QUICC Engine serial port support"
1332 depends on QUICC_ENGINE 1349 depends on QUICC_ENGINE
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index dfe775ac45b2..8844c0a03929 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -72,6 +72,7 @@ obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
72obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o 72obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
73obj-$(CONFIG_SERIAL_NETX) += netx-serial.o 73obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
74obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o 74obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o
75obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o
75obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o 76obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
76obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o 77obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
77obj-$(CONFIG_SERIAL_QE) += ucc_uart.o 78obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
diff --git a/drivers/serial/nwpserial.c b/drivers/serial/nwpserial.c
new file mode 100644
index 000000000000..32f3eaf0d262
--- /dev/null
+++ b/drivers/serial/nwpserial.c
@@ -0,0 +1,475 @@
1/*
2 * Serial Port driver for a NWP uart device
3 *
4 * Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12#include <linux/init.h>
13#include <linux/console.h>
14#include <linux/serial.h>
15#include <linux/serial_reg.h>
16#include <linux/serial_core.h>
17#include <linux/tty.h>
18#include <linux/irqreturn.h>
19#include <linux/mutex.h>
20#include <linux/of_platform.h>
21#include <linux/of_device.h>
22#include <linux/nwpserial.h>
23#include <asm/prom.h>
24#include <asm/dcr.h>
25
26#define NWPSERIAL_NR 2
27
28#define NWPSERIAL_STATUS_RXVALID 0x1
29#define NWPSERIAL_STATUS_TXFULL 0x2
30
31struct nwpserial_port {
32 struct uart_port port;
33 dcr_host_t dcr_host;
34 unsigned int ier;
35 unsigned int mcr;
36};
37
38static DEFINE_MUTEX(nwpserial_mutex);
39static struct nwpserial_port nwpserial_ports[NWPSERIAL_NR];
40
41static void wait_for_bits(struct nwpserial_port *up, int bits)
42{
43 unsigned int status, tmout = 10000;
44
45 /* Wait up to 10ms for the character(s) to be sent. */
46 do {
47 status = dcr_read(up->dcr_host, UART_LSR);
48
49 if (--tmout == 0)
50 break;
51 udelay(1);
52 } while ((status & bits) != bits);
53}
54
55#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE
56static void nwpserial_console_putchar(struct uart_port *port, int c)
57{
58 struct nwpserial_port *up;
59 up = container_of(port, struct nwpserial_port, port);
60 /* check if tx buffer is full */
61 wait_for_bits(up, UART_LSR_THRE);
62 dcr_write(up->dcr_host, UART_TX, c);
63 up->port.icount.tx++;
64}
65
66static void
67nwpserial_console_write(struct console *co, const char *s, unsigned int count)
68{
69 struct nwpserial_port *up = &nwpserial_ports[co->index];
70 unsigned long flags;
71 int locked = 1;
72
73 if (oops_in_progress)
74 locked = spin_trylock_irqsave(&up->port.lock, flags);
75 else
76 spin_lock_irqsave(&up->port.lock, flags);
77
78 /* save and disable interrupt */
79 up->ier = dcr_read(up->dcr_host, UART_IER);
80 dcr_write(up->dcr_host, UART_IER, up->ier & ~UART_IER_RDI);
81
82 uart_console_write(&up->port, s, count, nwpserial_console_putchar);
83
84 /* wait for transmitter to become emtpy */
85 while ((dcr_read(up->dcr_host, UART_LSR) & UART_LSR_THRE) == 0)
86 cpu_relax();
87
88 /* restore interrupt state */
89 dcr_write(up->dcr_host, UART_IER, up->ier);
90
91 if (locked)
92 spin_unlock_irqrestore(&up->port.lock, flags);
93}
94
95static struct uart_driver nwpserial_reg;
96static struct console nwpserial_console = {
97 .name = "ttySQ",
98 .write = nwpserial_console_write,
99 .device = uart_console_device,
100 .flags = CON_PRINTBUFFER,
101 .index = -1,
102 .data = &nwpserial_reg,
103};
104#define NWPSERIAL_CONSOLE (&nwpserial_console)
105#else
106#define NWPSERIAL_CONSOLE NULL
107#endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */
108
109/**************************************************************************/
110
111static int nwpserial_request_port(struct uart_port *port)
112{
113 return 0;
114}
115
116static void nwpserial_release_port(struct uart_port *port)
117{
118 /* N/A */
119}
120
121static void nwpserial_config_port(struct uart_port *port, int flags)
122{
123 port->type = PORT_NWPSERIAL;
124}
125
126static irqreturn_t nwpserial_interrupt(int irq, void *dev_id)
127{
128 struct nwpserial_port *up = dev_id;
129 struct tty_struct *tty = up->port.info->port.tty;
130 irqreturn_t ret;
131 unsigned int iir;
132 unsigned char ch;
133
134 spin_lock(&up->port.lock);
135
136 /* check if the uart was the interrupt source. */
137 iir = dcr_read(up->dcr_host, UART_IIR);
138 if (!iir) {
139 ret = IRQ_NONE;
140 goto out;
141 }
142
143 do {
144 up->port.icount.rx++;
145 ch = dcr_read(up->dcr_host, UART_RX);
146 if (up->port.ignore_status_mask != NWPSERIAL_STATUS_RXVALID)
147 tty_insert_flip_char(tty, ch, TTY_NORMAL);
148 } while (dcr_read(up->dcr_host, UART_RX) & UART_LSR_DR);
149
150 tty_flip_buffer_push(tty);
151 ret = IRQ_HANDLED;
152
153out:
154 spin_unlock(&up->port.lock);
155 return ret;
156}
157
158static int nwpserial_startup(struct uart_port *port)
159{
160 struct nwpserial_port *up;
161 int err;
162
163 up = container_of(port, struct nwpserial_port, port);
164
165 /* disable flow control by default */
166 up->mcr = dcr_read(up->dcr_host, UART_MCR) & ~UART_MCR_AFE;
167 dcr_write(up->dcr_host, UART_MCR, up->mcr);
168
169 /* register interrupt handler */
170 err = request_irq(up->port.irq, nwpserial_interrupt,
171 IRQF_SHARED, "nwpserial", up);
172 if (err)
173 return err;
174
175 /* enable interrupts */
176 up->ier = UART_IER_RDI;
177 dcr_write(up->dcr_host, UART_IER, up->ier);
178
179 /* enable receiving */
180 up->port.ignore_status_mask &= ~NWPSERIAL_STATUS_RXVALID;
181
182 return 0;
183}
184
185static void nwpserial_shutdown(struct uart_port *port)
186{
187 struct nwpserial_port *up;
188 up = container_of(port, struct nwpserial_port, port);
189
190 /* disable receiving */
191 up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID;
192
193 /* disable interrupts from this port */
194 up->ier = 0;
195 dcr_write(up->dcr_host, UART_IER, up->ier);
196
197 /* free irq */
198 free_irq(up->port.irq, port);
199}
200
201static int nwpserial_verify_port(struct uart_port *port,
202 struct serial_struct *ser)
203{
204 return -EINVAL;
205}
206
207static const char *nwpserial_type(struct uart_port *port)
208{
209 return port->type == PORT_NWPSERIAL ? "nwpserial" : NULL;
210}
211
212static void nwpserial_set_termios(struct uart_port *port,
213 struct ktermios *termios, struct ktermios *old)
214{
215 struct nwpserial_port *up;
216 up = container_of(port, struct nwpserial_port, port);
217
218 up->port.read_status_mask = NWPSERIAL_STATUS_RXVALID
219 | NWPSERIAL_STATUS_TXFULL;
220
221 up->port.ignore_status_mask = 0;
222 /* ignore all characters if CREAD is not set */
223 if ((termios->c_cflag & CREAD) == 0)
224 up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID;
225
226 /* Copy back the old hardware settings */
227 if (old)
228 tty_termios_copy_hw(termios, old);
229}
230
231static void nwpserial_break_ctl(struct uart_port *port, int ctl)
232{
233 /* N/A */
234}
235
236static void nwpserial_enable_ms(struct uart_port *port)
237{
238 /* N/A */
239}
240
241static void nwpserial_stop_rx(struct uart_port *port)
242{
243 struct nwpserial_port *up;
244 up = container_of(port, struct nwpserial_port, port);
245 /* don't forward any more data (like !CREAD) */
246 up->port.ignore_status_mask = NWPSERIAL_STATUS_RXVALID;
247}
248
249static void nwpserial_putchar(struct nwpserial_port *up, unsigned char c)
250{
251 /* check if tx buffer is full */
252 wait_for_bits(up, UART_LSR_THRE);
253 dcr_write(up->dcr_host, UART_TX, c);
254 up->port.icount.tx++;
255}
256
257static void nwpserial_start_tx(struct uart_port *port)
258{
259 struct nwpserial_port *up;
260 struct circ_buf *xmit;
261 up = container_of(port, struct nwpserial_port, port);
262 xmit = &up->port.info->xmit;
263
264 if (port->x_char) {
265 nwpserial_putchar(up, up->port.x_char);
266 port->x_char = 0;
267 }
268
269 while (!(uart_circ_empty(xmit) || uart_tx_stopped(&up->port))) {
270 nwpserial_putchar(up, xmit->buf[xmit->tail]);
271 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1);
272 }
273}
274
275static unsigned int nwpserial_get_mctrl(struct uart_port *port)
276{
277 return 0;
278}
279
280static void nwpserial_set_mctrl(struct uart_port *port, unsigned int mctrl)
281{
282 /* N/A */
283}
284
285static void nwpserial_stop_tx(struct uart_port *port)
286{
287 /* N/A */
288}
289
290static unsigned int nwpserial_tx_empty(struct uart_port *port)
291{
292 struct nwpserial_port *up;
293 unsigned long flags;
294 int ret;
295 up = container_of(port, struct nwpserial_port, port);
296
297 spin_lock_irqsave(&up->port.lock, flags);
298 ret = dcr_read(up->dcr_host, UART_LSR);
299 spin_unlock_irqrestore(&up->port.lock, flags);
300
301 return ret & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
302}
303
304static struct uart_ops nwpserial_pops = {
305 .tx_empty = nwpserial_tx_empty,
306 .set_mctrl = nwpserial_set_mctrl,
307 .get_mctrl = nwpserial_get_mctrl,
308 .stop_tx = nwpserial_stop_tx,
309 .start_tx = nwpserial_start_tx,
310 .stop_rx = nwpserial_stop_rx,
311 .enable_ms = nwpserial_enable_ms,
312 .break_ctl = nwpserial_break_ctl,
313 .startup = nwpserial_startup,
314 .shutdown = nwpserial_shutdown,
315 .set_termios = nwpserial_set_termios,
316 .type = nwpserial_type,
317 .release_port = nwpserial_release_port,
318 .request_port = nwpserial_request_port,
319 .config_port = nwpserial_config_port,
320 .verify_port = nwpserial_verify_port,
321};
322
323static struct uart_driver nwpserial_reg = {
324 .owner = THIS_MODULE,
325 .driver_name = "nwpserial",
326 .dev_name = "ttySQ",
327 .major = TTY_MAJOR,
328 .minor = 68,
329 .nr = NWPSERIAL_NR,
330 .cons = NWPSERIAL_CONSOLE,
331};
332
333int nwpserial_register_port(struct uart_port *port)
334{
335 struct nwpserial_port *up = NULL;
336 int ret = -1;
337 int i;
338 static int first = 1;
339 int dcr_len;
340 int dcr_base;
341 struct device_node *dn;
342
343 mutex_lock(&nwpserial_mutex);
344
345 dn = to_of_device(port->dev)->node;
346 if (dn == NULL)
347 goto out;
348
349 /* get dcr base. */
350 dcr_base = dcr_resource_start(dn, 0);
351
352 /* find matching entry */
353 for (i = 0; i < NWPSERIAL_NR; i++)
354 if (nwpserial_ports[i].port.iobase == dcr_base) {
355 up = &nwpserial_ports[i];
356 break;
357 }
358
359 /* we didn't find a mtching entry, search for a free port */
360 if (up == NULL)
361 for (i = 0; i < NWPSERIAL_NR; i++)
362 if (nwpserial_ports[i].port.type == PORT_UNKNOWN &&
363 nwpserial_ports[i].port.iobase == 0) {
364 up = &nwpserial_ports[i];
365 break;
366 }
367
368 if (up == NULL) {
369 ret = -EBUSY;
370 goto out;
371 }
372
373 if (first)
374 uart_register_driver(&nwpserial_reg);
375 first = 0;
376
377 up->port.membase = port->membase;
378 up->port.irq = port->irq;
379 up->port.uartclk = port->uartclk;
380 up->port.fifosize = port->fifosize;
381 up->port.regshift = port->regshift;
382 up->port.iotype = port->iotype;
383 up->port.flags = port->flags;
384 up->port.mapbase = port->mapbase;
385 up->port.private_data = port->private_data;
386
387 if (port->dev)
388 up->port.dev = port->dev;
389
390 if (up->port.iobase != dcr_base) {
391 up->port.ops = &nwpserial_pops;
392 up->port.fifosize = 16;
393
394 spin_lock_init(&up->port.lock);
395
396 up->port.iobase = dcr_base;
397 dcr_len = dcr_resource_len(dn, 0);
398
399 up->dcr_host = dcr_map(dn, dcr_base, dcr_len);
400 if (!DCR_MAP_OK(up->dcr_host)) {
401 printk(KERN_ERR "Cannot map DCR resources for NWPSERIAL");
402 goto out;
403 }
404 }
405
406 ret = uart_add_one_port(&nwpserial_reg, &up->port);
407 if (ret == 0)
408 ret = up->port.line;
409
410out:
411 mutex_unlock(&nwpserial_mutex);
412
413 return ret;
414}
415EXPORT_SYMBOL(nwpserial_register_port);
416
417void nwpserial_unregister_port(int line)
418{
419 struct nwpserial_port *up = &nwpserial_ports[line];
420 mutex_lock(&nwpserial_mutex);
421 uart_remove_one_port(&nwpserial_reg, &up->port);
422
423 up->port.type = PORT_UNKNOWN;
424
425 mutex_unlock(&nwpserial_mutex);
426}
427EXPORT_SYMBOL(nwpserial_unregister_port);
428
429#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE
430static int __init nwpserial_console_init(void)
431{
432 struct nwpserial_port *up = NULL;
433 struct device_node *dn;
434 const char *name;
435 int dcr_base;
436 int dcr_len;
437 int i;
438
439 /* search for a free port */
440 for (i = 0; i < NWPSERIAL_NR; i++)
441 if (nwpserial_ports[i].port.type == PORT_UNKNOWN) {
442 up = &nwpserial_ports[i];
443 break;
444 }
445
446 if (up == NULL)
447 return -1;
448
449 name = of_get_property(of_chosen, "linux,stdout-path", NULL);
450 if (name == NULL)
451 return -1;
452
453 dn = of_find_node_by_path(name);
454 if (!dn)
455 return -1;
456
457 spin_lock_init(&up->port.lock);
458 up->port.ops = &nwpserial_pops;
459 up->port.type = PORT_NWPSERIAL;
460 up->port.fifosize = 16;
461
462 dcr_base = dcr_resource_start(dn, 0);
463 dcr_len = dcr_resource_len(dn, 0);
464 up->port.iobase = dcr_base;
465
466 up->dcr_host = dcr_map(dn, dcr_base, dcr_len);
467 if (!DCR_MAP_OK(up->dcr_host)) {
468 printk("Cannot map DCR resources for SERIAL");
469 return -1;
470 }
471 register_console(&nwpserial_console);
472 return 0;
473}
474console_initcall(nwpserial_console_init);
475#endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index 8fa0ff561e9f..a821e3a3d664 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -14,6 +14,7 @@
14#include <linux/serial_core.h> 14#include <linux/serial_core.h>
15#include <linux/serial_8250.h> 15#include <linux/serial_8250.h>
16#include <linux/of_platform.h> 16#include <linux/of_platform.h>
17#include <linux/nwpserial.h>
17 18
18#include <asm/prom.h> 19#include <asm/prom.h>
19 20
@@ -99,9 +100,16 @@ static int __devinit of_platform_serial_probe(struct of_device *ofdev,
99 goto out; 100 goto out;
100 101
101 switch (port_type) { 102 switch (port_type) {
103#ifdef CONFIG_SERIAL_8250
102 case PORT_8250 ... PORT_MAX_8250: 104 case PORT_8250 ... PORT_MAX_8250:
103 ret = serial8250_register_port(&port); 105 ret = serial8250_register_port(&port);
104 break; 106 break;
107#endif
108#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
109 case PORT_NWPSERIAL:
110 ret = nwpserial_register_port(&port);
111 break;
112#endif
105 default: 113 default:
106 /* need to add code for these */ 114 /* need to add code for these */
107 case PORT_UNKNOWN: 115 case PORT_UNKNOWN:
@@ -129,9 +137,16 @@ static int of_platform_serial_remove(struct of_device *ofdev)
129{ 137{
130 struct of_serial_info *info = ofdev->dev.driver_data; 138 struct of_serial_info *info = ofdev->dev.driver_data;
131 switch (info->type) { 139 switch (info->type) {
140#ifdef CONFIG_SERIAL_8250
132 case PORT_8250 ... PORT_MAX_8250: 141 case PORT_8250 ... PORT_MAX_8250:
133 serial8250_unregister_port(info->line); 142 serial8250_unregister_port(info->line);
134 break; 143 break;
144#endif
145#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
146 case PORT_NWPSERIAL:
147 nwpserial_unregister_port(info->line);
148 break;
149#endif
135 default: 150 default:
136 /* need to add code for these */ 151 /* need to add code for these */
137 break; 152 break;
@@ -148,6 +163,10 @@ static struct of_device_id __devinitdata of_platform_serial_table[] = {
148 { .type = "serial", .compatible = "ns16450", .data = (void *)PORT_16450, }, 163 { .type = "serial", .compatible = "ns16450", .data = (void *)PORT_16450, },
149 { .type = "serial", .compatible = "ns16550", .data = (void *)PORT_16550, }, 164 { .type = "serial", .compatible = "ns16550", .data = (void *)PORT_16550, },
150 { .type = "serial", .compatible = "ns16750", .data = (void *)PORT_16750, }, 165 { .type = "serial", .compatible = "ns16750", .data = (void *)PORT_16750, },
166#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
167 { .type = "serial", .compatible = "ibm,qpace-nwp-serial",
168 .data = (void *)PORT_NWPSERIAL, },
169#endif
151 { .type = "serial", .data = (void *)PORT_UNKNOWN, }, 170 { .type = "serial", .data = (void *)PORT_UNKNOWN, },
152 { /* end of list */ }, 171 { /* end of list */ },
153}; 172};
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index d8fc9b32fe36..c0916c7b217e 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -279,6 +279,13 @@ static int pn_net_mtu(struct net_device *dev, int new_mtu)
279 return err; 279 return err;
280} 280}
281 281
282static const struct net_device_ops pn_netdev_ops = {
283 .ndo_open = pn_net_open,
284 .ndo_stop = pn_net_close,
285 .ndo_start_xmit = pn_net_xmit,
286 .ndo_change_mtu = pn_net_mtu,
287};
288
282static void pn_net_setup(struct net_device *dev) 289static void pn_net_setup(struct net_device *dev)
283{ 290{
284 dev->features = 0; 291 dev->features = 0;
@@ -290,12 +297,9 @@ static void pn_net_setup(struct net_device *dev)
290 dev->addr_len = 1; 297 dev->addr_len = 1;
291 dev->tx_queue_len = 1; 298 dev->tx_queue_len = 1;
292 299
300 dev->netdev_ops = &pn_netdev_ops;
293 dev->destructor = free_netdev; 301 dev->destructor = free_netdev;
294 dev->header_ops = &phonet_header_ops; 302 dev->header_ops = &phonet_header_ops;
295 dev->open = pn_net_open;
296 dev->stop = pn_net_close;
297 dev->hard_start_xmit = pn_net_xmit; /* mandatory */
298 dev->change_mtu = pn_net_mtu;
299} 303}
300 304
301/*-------------------------------------------------------------------------*/ 305/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index d9739d52f8f5..96d65ca06ecd 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -716,6 +716,14 @@ static int __init get_ether_addr(const char *str, u8 *dev_addr)
716 716
717static struct eth_dev *the_dev; 717static struct eth_dev *the_dev;
718 718
719static const struct net_device_ops eth_netdev_ops = {
720 .ndo_open = eth_open,
721 .ndo_stop = eth_stop,
722 .ndo_start_xmit = eth_start_xmit,
723 .ndo_change_mtu = ueth_change_mtu,
724 .ndo_set_mac_address = eth_mac_addr,
725 .ndo_validate_addr = eth_validate_addr,
726};
719 727
720/** 728/**
721 * gether_setup - initialize one ethernet-over-usb link 729 * gether_setup - initialize one ethernet-over-usb link
@@ -764,12 +772,8 @@ int __init gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
764 if (ethaddr) 772 if (ethaddr)
765 memcpy(ethaddr, dev->host_mac, ETH_ALEN); 773 memcpy(ethaddr, dev->host_mac, ETH_ALEN);
766 774
767 net->change_mtu = ueth_change_mtu; 775 net->netdev_ops = &eth_netdev_ops;
768 net->hard_start_xmit = eth_start_xmit; 776
769 net->open = eth_open;
770 net->stop = eth_stop;
771 /* watchdog_timeo, tx_timeout ... */
772 /* set_multicast_list */
773 SET_ETHTOOL_OPS(net, &ops); 777 SET_ETHTOOL_OPS(net, &ops);
774 778
775 /* two kinds of host-initiated state changes: 779 /* two kinds of host-initiated state changes:
diff --git a/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h b/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h
index 5f1b2951bb83..3421d3339d7d 100644
--- a/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h
+++ b/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h
@@ -221,7 +221,6 @@ struct i1480u {
221 struct net_device *net_dev; 221 struct net_device *net_dev;
222 222
223 spinlock_t lock; 223 spinlock_t lock;
224 struct net_device_stats stats;
225 224
226 /* RX context handling */ 225 /* RX context handling */
227 struct sk_buff *rx_skb; 226 struct sk_buff *rx_skb;
@@ -271,7 +270,6 @@ extern int i1480u_stop(struct net_device *);
271extern int i1480u_hard_start_xmit(struct sk_buff *, struct net_device *); 270extern int i1480u_hard_start_xmit(struct sk_buff *, struct net_device *);
272extern void i1480u_tx_timeout(struct net_device *); 271extern void i1480u_tx_timeout(struct net_device *);
273extern int i1480u_set_config(struct net_device *, struct ifmap *); 272extern int i1480u_set_config(struct net_device *, struct ifmap *);
274extern struct net_device_stats *i1480u_get_stats(struct net_device *);
275extern int i1480u_change_mtu(struct net_device *, int); 273extern int i1480u_change_mtu(struct net_device *, int);
276extern void i1480u_uwb_notifs_cb(void *, struct uwb_dev *, enum uwb_notifs); 274extern void i1480u_uwb_notifs_cb(void *, struct uwb_dev *, enum uwb_notifs);
277 275
diff --git a/drivers/uwb/i1480/i1480u-wlp/lc.c b/drivers/uwb/i1480/i1480u-wlp/lc.c
index 049c05d4cc6a..f272dfe54d49 100644
--- a/drivers/uwb/i1480/i1480u-wlp/lc.c
+++ b/drivers/uwb/i1480/i1480u-wlp/lc.c
@@ -181,6 +181,15 @@ error:
181} 181}
182#endif 182#endif
183 183
184static const struct net_device_ops i1480u_netdev_ops = {
185 .ndo_open = i1480u_open,
186 .ndo_stop = i1480u_stop,
187 .ndo_start_xmit = i1480u_hard_start_xmit,
188 .ndo_tx_timeout = i1480u_tx_timeout,
189 .ndo_set_config = i1480u_set_config,
190 .ndo_change_mtu = i1480u_change_mtu,
191};
192
184static 193static
185int i1480u_add(struct i1480u *i1480u, struct usb_interface *iface) 194int i1480u_add(struct i1480u *i1480u, struct usb_interface *iface)
186{ 195{
@@ -235,13 +244,7 @@ int i1480u_add(struct i1480u *i1480u, struct usb_interface *iface)
235 net_dev->features |= NETIF_F_HIGHDMA; 244 net_dev->features |= NETIF_F_HIGHDMA;
236 net_dev->watchdog_timeo = 5*HZ; /* FIXME: a better default? */ 245 net_dev->watchdog_timeo = 5*HZ; /* FIXME: a better default? */
237 246
238 net_dev->open = i1480u_open; 247 net_dev->netdev_ops = &i1480u_netdev_ops;
239 net_dev->stop = i1480u_stop;
240 net_dev->hard_start_xmit = i1480u_hard_start_xmit;
241 net_dev->tx_timeout = i1480u_tx_timeout;
242 net_dev->get_stats = i1480u_get_stats;
243 net_dev->set_config = i1480u_set_config;
244 net_dev->change_mtu = i1480u_change_mtu;
245 248
246#ifdef i1480u_FLOW_CONTROL 249#ifdef i1480u_FLOW_CONTROL
247 /* Notification endpoint setup (submitted when we open the device) */ 250 /* Notification endpoint setup (submitted when we open the device) */
diff --git a/drivers/uwb/i1480/i1480u-wlp/netdev.c b/drivers/uwb/i1480/i1480u-wlp/netdev.c
index e3873ffb942c..73055530e60f 100644
--- a/drivers/uwb/i1480/i1480u-wlp/netdev.c
+++ b/drivers/uwb/i1480/i1480u-wlp/netdev.c
@@ -262,15 +262,6 @@ int i1480u_stop(struct net_device *net_dev)
262 return 0; 262 return 0;
263} 263}
264 264
265
266/** Report statistics */
267struct net_device_stats *i1480u_get_stats(struct net_device *net_dev)
268{
269 struct i1480u *i1480u = netdev_priv(net_dev);
270 return &i1480u->stats;
271}
272
273
274/** 265/**
275 * 266 *
276 * Change the interface config--we probably don't have to do anything. 267 * Change the interface config--we probably don't have to do anything.
diff --git a/drivers/uwb/i1480/i1480u-wlp/rx.c b/drivers/uwb/i1480/i1480u-wlp/rx.c
index 34f4cf9a7d34..25a2758beb61 100644
--- a/drivers/uwb/i1480/i1480u-wlp/rx.c
+++ b/drivers/uwb/i1480/i1480u-wlp/rx.c
@@ -167,7 +167,7 @@ do { \
167do { \ 167do { \
168 if (printk_ratelimit()) \ 168 if (printk_ratelimit()) \
169 dev_err(&i1480u->usb_iface->dev, msg); \ 169 dev_err(&i1480u->usb_iface->dev, msg); \
170 i1480u->stats.rx_dropped++; \ 170 i1480u->net_dev->stats.rx_dropped++; \
171} while (0) 171} while (0)
172 172
173 173
@@ -193,10 +193,8 @@ void i1480u_skb_deliver(struct i1480u *i1480u)
193 if (!should_parse) 193 if (!should_parse)
194 goto out; 194 goto out;
195 i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev); 195 i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev);
196 i1480u->stats.rx_packets++; 196 net_dev->stats.rx_packets++;
197 i1480u->stats.rx_bytes += i1480u->rx_untd_pkt_size; 197 net_dev->stats.rx_bytes += i1480u->rx_untd_pkt_size;
198 net_dev->last_rx = jiffies;
199 /* FIXME: flow control: check netif_rx() retval */
200 198
201 netif_rx(i1480u->rx_skb); /* deliver */ 199 netif_rx(i1480u->rx_skb); /* deliver */
202out: 200out:
diff --git a/drivers/uwb/i1480/i1480u-wlp/tx.c b/drivers/uwb/i1480/i1480u-wlp/tx.c
index 39032cc3503e..26bacc009c65 100644
--- a/drivers/uwb/i1480/i1480u-wlp/tx.c
+++ b/drivers/uwb/i1480/i1480u-wlp/tx.c
@@ -117,8 +117,8 @@ void i1480u_tx_cb(struct urb *urb)
117 switch (urb->status) { 117 switch (urb->status) {
118 case 0: 118 case 0:
119 spin_lock_irqsave(&i1480u->lock, flags); 119 spin_lock_irqsave(&i1480u->lock, flags);
120 i1480u->stats.tx_packets++; 120 net_dev->stats.tx_packets++;
121 i1480u->stats.tx_bytes += urb->actual_length; 121 net_dev->stats.tx_bytes += urb->actual_length;
122 spin_unlock_irqrestore(&i1480u->lock, flags); 122 spin_unlock_irqrestore(&i1480u->lock, flags);
123 break; 123 break;
124 case -ECONNRESET: /* Not an error, but a controlled situation; */ 124 case -ECONNRESET: /* Not an error, but a controlled situation; */
@@ -530,7 +530,7 @@ int i1480u_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
530 return NETDEV_TX_OK; 530 return NETDEV_TX_OK;
531error: 531error:
532 dev_kfree_skb_any(skb); 532 dev_kfree_skb_any(skb);
533 i1480u->stats.tx_dropped++; 533 net_dev->stats.tx_dropped++;
534out: 534out:
535 return NETDEV_TX_OK; 535 return NETDEV_TX_OK;
536} 536}
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 2ac52fd8cc11..4e046fed1380 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -24,6 +24,7 @@
24#include <linux/amba/bus.h> 24#include <linux/amba/bus.h>
25#include <linux/amba/clcd.h> 25#include <linux/amba/clcd.h>
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/hardirq.h>
27 28
28#include <asm/sizes.h> 29#include <asm/sizes.h>
29 30
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 4a4dd9adc328..72facb9eb7db 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -52,11 +52,11 @@ config LCD_ILI9320
52 then say y to include a power driver for it. 52 then say y to include a power driver for it.
53 53
54config LCD_TDO24M 54config LCD_TDO24M
55 tristate "Toppoly TDO24M LCD Panels support" 55 tristate "Toppoly TDO24M and TDO35S LCD Panels support"
56 depends on LCD_CLASS_DEVICE && SPI_MASTER 56 depends on LCD_CLASS_DEVICE && SPI_MASTER
57 default n 57 default n
58 help 58 help
59 If you have a Toppoly TDO24M series LCD panel, say y here to 59 If you have a Toppoly TDO24M/TDO35S series LCD panel, say y here to
60 include the support for it. 60 include the support for it.
61 61
62config LCD_VGG2432A4 62config LCD_VGG2432A4
@@ -123,17 +123,14 @@ config BACKLIGHT_ATMEL_PWM
123 To compile this driver as a module, choose M here: the module will be 123 To compile this driver as a module, choose M here: the module will be
124 called atmel-pwm-bl. 124 called atmel-pwm-bl.
125 125
126config BACKLIGHT_CORGI 126config BACKLIGHT_GENERIC
127 tristate "Generic (aka Sharp Corgi) Backlight Driver (DEPRECATED)" 127 tristate "Generic (aka Sharp Corgi) Backlight Driver"
128 depends on BACKLIGHT_CLASS_DEVICE 128 depends on BACKLIGHT_CLASS_DEVICE
129 default n 129 default y
130 help 130 help
131 Say y to enable the generic platform backlight driver previously 131 Say y to enable the generic platform backlight driver previously
132 known as the Corgi backlight driver. If you have a Sharp Zaurus 132 known as the Corgi backlight driver. If you have a Sharp Zaurus
133 SL-C7xx, SL-Cxx00 or SL-6000x say y. Most users can say n. 133 SL-C7xx, SL-Cxx00 or SL-6000x say y.
134
135 Note: this driver is marked as deprecated, try enable SPI and
136 use the new corgi_lcd driver with integrated backlight control
137 134
138config BACKLIGHT_LOCOMO 135config BACKLIGHT_LOCOMO
139 tristate "Sharp LOCOMO LCD/Backlight Driver" 136 tristate "Sharp LOCOMO LCD/Backlight Driver"
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 103427de6703..363b3cb2f01b 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o
11 11
12obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o 12obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
13obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o 13obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o
14obj-$(CONFIG_BACKLIGHT_CORGI) += corgi_bl.o 14obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o
15obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o 15obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
16obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o 16obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
17obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o 17obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 0664fc032235..157057c79ca3 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -40,6 +40,10 @@ static int fb_notifier_callback(struct notifier_block *self,
40 if (!bd->ops->check_fb || 40 if (!bd->ops->check_fb ||
41 bd->ops->check_fb(evdata->info)) { 41 bd->ops->check_fb(evdata->info)) {
42 bd->props.fb_blank = *(int *)evdata->data; 42 bd->props.fb_blank = *(int *)evdata->data;
43 if (bd->props.fb_blank == FB_BLANK_UNBLANK)
44 bd->props.state &= ~BL_CORE_FBBLANK;
45 else
46 bd->props.state |= BL_CORE_FBBLANK;
43 backlight_update_status(bd); 47 backlight_update_status(bd);
44 } 48 }
45 mutex_unlock(&bd->ops_lock); 49 mutex_unlock(&bd->ops_lock);
@@ -80,20 +84,18 @@ static ssize_t backlight_show_power(struct device *dev,
80static ssize_t backlight_store_power(struct device *dev, 84static ssize_t backlight_store_power(struct device *dev,
81 struct device_attribute *attr, const char *buf, size_t count) 85 struct device_attribute *attr, const char *buf, size_t count)
82{ 86{
83 int rc = -ENXIO; 87 int rc;
84 char *endp;
85 struct backlight_device *bd = to_backlight_device(dev); 88 struct backlight_device *bd = to_backlight_device(dev);
86 int power = simple_strtoul(buf, &endp, 0); 89 unsigned long power;
87 size_t size = endp - buf;
88 90
89 if (*endp && isspace(*endp)) 91 rc = strict_strtoul(buf, 0, &power);
90 size++; 92 if (rc)
91 if (size != count) 93 return rc;
92 return -EINVAL;
93 94
95 rc = -ENXIO;
94 mutex_lock(&bd->ops_lock); 96 mutex_lock(&bd->ops_lock);
95 if (bd->ops) { 97 if (bd->ops) {
96 pr_debug("backlight: set power to %d\n", power); 98 pr_debug("backlight: set power to %lu\n", power);
97 if (bd->props.power != power) { 99 if (bd->props.power != power) {
98 bd->props.power = power; 100 bd->props.power = power;
99 backlight_update_status(bd); 101 backlight_update_status(bd);
@@ -116,28 +118,25 @@ static ssize_t backlight_show_brightness(struct device *dev,
116static ssize_t backlight_store_brightness(struct device *dev, 118static ssize_t backlight_store_brightness(struct device *dev,
117 struct device_attribute *attr, const char *buf, size_t count) 119 struct device_attribute *attr, const char *buf, size_t count)
118{ 120{
119 int rc = -ENXIO; 121 int rc;
120 char *endp;
121 struct backlight_device *bd = to_backlight_device(dev); 122 struct backlight_device *bd = to_backlight_device(dev);
122 int brightness = simple_strtoul(buf, &endp, 0); 123 unsigned long brightness;
123 size_t size = endp - buf; 124
125 rc = strict_strtoul(buf, 0, &brightness);
126 if (rc)
127 return rc;
124 128
125 if (*endp && isspace(*endp)) 129 rc = -ENXIO;
126 size++;
127 if (size != count)
128 return -EINVAL;
129 130
130 mutex_lock(&bd->ops_lock); 131 mutex_lock(&bd->ops_lock);
131 if (bd->ops) { 132 if (bd->ops) {
132 if (brightness > bd->props.max_brightness) 133 if (brightness > bd->props.max_brightness)
133 rc = -EINVAL; 134 rc = -EINVAL;
134 else { 135 else {
135 pr_debug("backlight: set brightness to %d\n", 136 pr_debug("backlight: set brightness to %lu\n",
136 brightness); 137 brightness);
137 if (bd->props.brightness != brightness) { 138 bd->props.brightness = brightness;
138 bd->props.brightness = brightness; 139 backlight_update_status(bd);
139 backlight_update_status(bd);
140 }
141 rc = count; 140 rc = count;
142 } 141 }
143 } 142 }
@@ -170,6 +169,34 @@ static ssize_t backlight_show_actual_brightness(struct device *dev,
170 169
171static struct class *backlight_class; 170static struct class *backlight_class;
172 171
172static int backlight_suspend(struct device *dev, pm_message_t state)
173{
174 struct backlight_device *bd = to_backlight_device(dev);
175
176 if (bd->ops->options & BL_CORE_SUSPENDRESUME) {
177 mutex_lock(&bd->ops_lock);
178 bd->props.state |= BL_CORE_SUSPENDED;
179 backlight_update_status(bd);
180 mutex_unlock(&bd->ops_lock);
181 }
182
183 return 0;
184}
185
186static int backlight_resume(struct device *dev)
187{
188 struct backlight_device *bd = to_backlight_device(dev);
189
190 if (bd->ops->options & BL_CORE_SUSPENDRESUME) {
191 mutex_lock(&bd->ops_lock);
192 bd->props.state &= ~BL_CORE_SUSPENDED;
193 backlight_update_status(bd);
194 mutex_unlock(&bd->ops_lock);
195 }
196
197 return 0;
198}
199
173static void bl_device_release(struct device *dev) 200static void bl_device_release(struct device *dev)
174{ 201{
175 struct backlight_device *bd = to_backlight_device(dev); 202 struct backlight_device *bd = to_backlight_device(dev);
@@ -286,6 +313,8 @@ static int __init backlight_class_init(void)
286 } 313 }
287 314
288 backlight_class->dev_attrs = bl_device_attributes; 315 backlight_class->dev_attrs = bl_device_attributes;
316 backlight_class->suspend = backlight_suspend;
317 backlight_class->resume = backlight_resume;
289 return 0; 318 return 0;
290} 319}
291 320
diff --git a/drivers/video/backlight/corgi_bl.c b/drivers/video/backlight/corgi_bl.c
deleted file mode 100644
index 4d4d037e3ec9..000000000000
--- a/drivers/video/backlight/corgi_bl.c
+++ /dev/null
@@ -1,169 +0,0 @@
1/*
2 * Backlight Driver for Sharp Zaurus Handhelds (various models)
3 *
4 * Copyright (c) 2004-2006 Richard Purdie
5 *
6 * Based on Sharp's 2.4 Backlight Driver
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/platform_device.h>
18#include <linux/mutex.h>
19#include <linux/fb.h>
20#include <linux/backlight.h>
21
22static int corgibl_intensity;
23static struct backlight_properties corgibl_data;
24static struct backlight_device *corgi_backlight_device;
25static struct generic_bl_info *bl_machinfo;
26
27static unsigned long corgibl_flags;
28#define CORGIBL_SUSPENDED 0x01
29#define CORGIBL_BATTLOW 0x02
30
31static int corgibl_send_intensity(struct backlight_device *bd)
32{
33 int intensity = bd->props.brightness;
34
35 if (bd->props.power != FB_BLANK_UNBLANK)
36 intensity = 0;
37 if (bd->props.fb_blank != FB_BLANK_UNBLANK)
38 intensity = 0;
39 if (corgibl_flags & CORGIBL_SUSPENDED)
40 intensity = 0;
41 if (corgibl_flags & CORGIBL_BATTLOW)
42 intensity &= bl_machinfo->limit_mask;
43
44 bl_machinfo->set_bl_intensity(intensity);
45
46 corgibl_intensity = intensity;
47
48 if (bl_machinfo->kick_battery)
49 bl_machinfo->kick_battery();
50
51 return 0;
52}
53
54#ifdef CONFIG_PM
55static int corgibl_suspend(struct platform_device *pdev, pm_message_t state)
56{
57 struct backlight_device *bd = platform_get_drvdata(pdev);
58
59 corgibl_flags |= CORGIBL_SUSPENDED;
60 backlight_update_status(bd);
61 return 0;
62}
63
64static int corgibl_resume(struct platform_device *pdev)
65{
66 struct backlight_device *bd = platform_get_drvdata(pdev);
67
68 corgibl_flags &= ~CORGIBL_SUSPENDED;
69 backlight_update_status(bd);
70 return 0;
71}
72#else
73#define corgibl_suspend NULL
74#define corgibl_resume NULL
75#endif
76
77static int corgibl_get_intensity(struct backlight_device *bd)
78{
79 return corgibl_intensity;
80}
81
82/*
83 * Called when the battery is low to limit the backlight intensity.
84 * If limit==0 clear any limit, otherwise limit the intensity
85 */
86void corgibl_limit_intensity(int limit)
87{
88 if (limit)
89 corgibl_flags |= CORGIBL_BATTLOW;
90 else
91 corgibl_flags &= ~CORGIBL_BATTLOW;
92 backlight_update_status(corgi_backlight_device);
93}
94EXPORT_SYMBOL(corgibl_limit_intensity);
95
96
97static struct backlight_ops corgibl_ops = {
98 .get_brightness = corgibl_get_intensity,
99 .update_status = corgibl_send_intensity,
100};
101
102static int corgibl_probe(struct platform_device *pdev)
103{
104 struct generic_bl_info *machinfo = pdev->dev.platform_data;
105 const char *name = "generic-bl";
106
107 bl_machinfo = machinfo;
108 if (!machinfo->limit_mask)
109 machinfo->limit_mask = -1;
110
111 if (machinfo->name)
112 name = machinfo->name;
113
114 corgi_backlight_device = backlight_device_register (name,
115 &pdev->dev, NULL, &corgibl_ops);
116 if (IS_ERR (corgi_backlight_device))
117 return PTR_ERR (corgi_backlight_device);
118
119 platform_set_drvdata(pdev, corgi_backlight_device);
120
121 corgi_backlight_device->props.max_brightness = machinfo->max_intensity;
122 corgi_backlight_device->props.power = FB_BLANK_UNBLANK;
123 corgi_backlight_device->props.brightness = machinfo->default_intensity;
124 backlight_update_status(corgi_backlight_device);
125
126 printk("Corgi Backlight Driver Initialized.\n");
127 return 0;
128}
129
130static int corgibl_remove(struct platform_device *pdev)
131{
132 struct backlight_device *bd = platform_get_drvdata(pdev);
133
134 corgibl_data.power = 0;
135 corgibl_data.brightness = 0;
136 backlight_update_status(bd);
137
138 backlight_device_unregister(bd);
139
140 printk("Corgi Backlight Driver Unloaded\n");
141 return 0;
142}
143
144static struct platform_driver corgibl_driver = {
145 .probe = corgibl_probe,
146 .remove = corgibl_remove,
147 .suspend = corgibl_suspend,
148 .resume = corgibl_resume,
149 .driver = {
150 .name = "generic-bl",
151 },
152};
153
154static int __init corgibl_init(void)
155{
156 return platform_driver_register(&corgibl_driver);
157}
158
159static void __exit corgibl_exit(void)
160{
161 platform_driver_unregister(&corgibl_driver);
162}
163
164module_init(corgibl_init);
165module_exit(corgibl_exit);
166
167MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
168MODULE_DESCRIPTION("Corgi Backlight Driver");
169MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index 26add8898605..b9fe62b475c6 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -259,22 +259,18 @@ static int __init cr_backlight_init(void)
259{ 259{
260 int ret = platform_driver_register(&cr_backlight_driver); 260 int ret = platform_driver_register(&cr_backlight_driver);
261 261
262 if (!ret) { 262 if (ret)
263 crp = platform_device_alloc("cr_backlight", -1); 263 return ret;
264 if (!crp)
265 return -ENOMEM;
266 264
267 ret = platform_device_add(crp); 265 crp = platform_device_register_simple("cr_backlight", -1, NULL, 0);
268 266 if (IS_ERR(crp)) {
269 if (ret) { 267 platform_driver_unregister(&cr_backlight_driver);
270 platform_device_put(crp); 268 return PTR_ERR(crp);
271 platform_driver_unregister(&cr_backlight_driver);
272 }
273 } 269 }
274 270
275 printk("Carillo Ranch Backlight Driver Initialized.\n"); 271 printk("Carillo Ranch Backlight Driver Initialized.\n");
276 272
277 return ret; 273 return 0;
278} 274}
279 275
280static void __exit cr_backlight_exit(void) 276static void __exit cr_backlight_exit(void)
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
new file mode 100644
index 000000000000..6d27f62fdcd0
--- /dev/null
+++ b/drivers/video/backlight/generic_bl.c
@@ -0,0 +1,147 @@
1/*
2 * Generic Backlight Driver
3 *
4 * Copyright (c) 2004-2008 Richard Purdie
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/platform_device.h>
16#include <linux/mutex.h>
17#include <linux/fb.h>
18#include <linux/backlight.h>
19
20static int genericbl_intensity;
21static struct backlight_device *generic_backlight_device;
22static struct generic_bl_info *bl_machinfo;
23
24/* Flag to signal when the battery is low */
25#define GENERICBL_BATTLOW BL_CORE_DRIVER1
26
27static int genericbl_send_intensity(struct backlight_device *bd)
28{
29 int intensity = bd->props.brightness;
30
31 if (bd->props.power != FB_BLANK_UNBLANK)
32 intensity = 0;
33 if (bd->props.state & BL_CORE_FBBLANK)
34 intensity = 0;
35 if (bd->props.state & BL_CORE_SUSPENDED)
36 intensity = 0;
37 if (bd->props.state & GENERICBL_BATTLOW)
38 intensity &= bl_machinfo->limit_mask;
39
40 bl_machinfo->set_bl_intensity(intensity);
41
42 genericbl_intensity = intensity;
43
44 if (bl_machinfo->kick_battery)
45 bl_machinfo->kick_battery();
46
47 return 0;
48}
49
50static int genericbl_get_intensity(struct backlight_device *bd)
51{
52 return genericbl_intensity;
53}
54
55/*
56 * Called when the battery is low to limit the backlight intensity.
57 * If limit==0 clear any limit, otherwise limit the intensity
58 */
59void corgibl_limit_intensity(int limit)
60{
61 struct backlight_device *bd = generic_backlight_device;
62
63 mutex_lock(&bd->ops_lock);
64 if (limit)
65 bd->props.state |= GENERICBL_BATTLOW;
66 else
67 bd->props.state &= ~GENERICBL_BATTLOW;
68 backlight_update_status(generic_backlight_device);
69 mutex_unlock(&bd->ops_lock);
70}
71EXPORT_SYMBOL(corgibl_limit_intensity);
72
73static struct backlight_ops genericbl_ops = {
74 .options = BL_CORE_SUSPENDRESUME,
75 .get_brightness = genericbl_get_intensity,
76 .update_status = genericbl_send_intensity,
77};
78
79static int genericbl_probe(struct platform_device *pdev)
80{
81 struct generic_bl_info *machinfo = pdev->dev.platform_data;
82 const char *name = "generic-bl";
83 struct backlight_device *bd;
84
85 bl_machinfo = machinfo;
86 if (!machinfo->limit_mask)
87 machinfo->limit_mask = -1;
88
89 if (machinfo->name)
90 name = machinfo->name;
91
92 bd = backlight_device_register (name,
93 &pdev->dev, NULL, &genericbl_ops);
94 if (IS_ERR (bd))
95 return PTR_ERR (bd);
96
97 platform_set_drvdata(pdev, bd);
98
99 bd->props.max_brightness = machinfo->max_intensity;
100 bd->props.power = FB_BLANK_UNBLANK;
101 bd->props.brightness = machinfo->default_intensity;
102 backlight_update_status(bd);
103
104 generic_backlight_device = bd;
105
106 printk("Generic Backlight Driver Initialized.\n");
107 return 0;
108}
109
110static int genericbl_remove(struct platform_device *pdev)
111{
112 struct backlight_device *bd = platform_get_drvdata(pdev);
113
114 bd->props.power = 0;
115 bd->props.brightness = 0;
116 backlight_update_status(bd);
117
118 backlight_device_unregister(bd);
119
120 printk("Generic Backlight Driver Unloaded\n");
121 return 0;
122}
123
124static struct platform_driver genericbl_driver = {
125 .probe = genericbl_probe,
126 .remove = genericbl_remove,
127 .driver = {
128 .name = "generic-bl",
129 },
130};
131
132static int __init genericbl_init(void)
133{
134 return platform_driver_register(&genericbl_driver);
135}
136
137static void __exit genericbl_exit(void)
138{
139 platform_driver_unregister(&genericbl_driver);
140}
141
142module_init(genericbl_init);
143module_exit(genericbl_exit);
144
145MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
146MODULE_DESCRIPTION("Generic Backlight Driver");
147MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index d4cfed0b26d5..5be55a20d8c7 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -151,19 +151,15 @@ static int __init hp680bl_init(void)
151 int ret; 151 int ret;
152 152
153 ret = platform_driver_register(&hp680bl_driver); 153 ret = platform_driver_register(&hp680bl_driver);
154 if (!ret) { 154 if (ret)
155 hp680bl_device = platform_device_alloc("hp680-bl", -1); 155 return ret;
156 if (!hp680bl_device) 156 hp680bl_device = platform_device_register_simple("hp680-bl", -1,
157 return -ENOMEM; 157 NULL, 0);
158 158 if (IS_ERR(hp680bl_device)) {
159 ret = platform_device_add(hp680bl_device); 159 platform_driver_unregister(&hp680bl_driver);
160 160 return PTR_ERR(hp680bl_device);
161 if (ret) {
162 platform_device_put(hp680bl_device);
163 platform_driver_unregister(&hp680bl_driver);
164 }
165 } 161 }
166 return ret; 162 return 0;
167} 163}
168 164
169static void __exit hp680bl_exit(void) 165static void __exit hp680bl_exit(void)
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 06964af761c6..65864c500455 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -70,6 +70,7 @@ static int mbp_get_intensity(struct backlight_device *bd)
70} 70}
71 71
72static struct backlight_ops mbp_ops = { 72static struct backlight_ops mbp_ops = {
73 .options = BL_CORE_SUSPENDRESUME,
73 .get_brightness = mbp_get_intensity, 74 .get_brightness = mbp_get_intensity,
74 .update_status = mbp_send_intensity, 75 .update_status = mbp_send_intensity,
75}; 76};
diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
index 15fb4d58b5bc..9edaf24fd82d 100644
--- a/drivers/video/backlight/progear_bl.c
+++ b/drivers/video/backlight/progear_bl.c
@@ -119,20 +119,16 @@ static int __init progearbl_init(void)
119{ 119{
120 int ret = platform_driver_register(&progearbl_driver); 120 int ret = platform_driver_register(&progearbl_driver);
121 121
122 if (!ret) { 122 if (ret)
123 progearbl_device = platform_device_alloc("progear-bl", -1); 123 return ret;
124 if (!progearbl_device) 124 progearbl_device = platform_device_register_simple("progear-bl", -1,
125 return -ENOMEM; 125 NULL, 0);
126 126 if (IS_ERR(progearbl_device)) {
127 ret = platform_device_add(progearbl_device); 127 platform_driver_unregister(&progearbl_driver);
128 128 return PTR_ERR(progearbl_device);
129 if (ret) {
130 platform_device_put(progearbl_device);
131 platform_driver_unregister(&progearbl_driver);
132 }
133 } 129 }
134 130
135 return ret; 131 return 0;
136} 132}
137 133
138static void __exit progearbl_exit(void) 134static void __exit progearbl_exit(void)
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 8427669162ea..1dae7f8f3c6b 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -14,6 +14,7 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/spi/spi.h> 16#include <linux/spi/spi.h>
17#include <linux/spi/tdo24m.h>
17#include <linux/fb.h> 18#include <linux/fb.h>
18#include <linux/lcd.h> 19#include <linux/lcd.h>
19 20
@@ -31,6 +32,9 @@ struct tdo24m {
31 struct spi_transfer xfer; 32 struct spi_transfer xfer;
32 uint8_t *buf; 33 uint8_t *buf;
33 34
35 int (*adj_mode)(struct tdo24m *lcd, int mode);
36 int color_invert;
37
34 int power; 38 int power;
35 int mode; 39 int mode;
36}; 40};
@@ -66,7 +70,7 @@ static uint32_t lcd_panel_off[] = {
66 CMD_NULL, 70 CMD_NULL,
67}; 71};
68 72
69static uint32_t lcd_vga_pass_through[] = { 73static uint32_t lcd_vga_pass_through_tdo24m[] = {
70 CMD1(0xB0, 0x16), 74 CMD1(0xB0, 0x16),
71 CMD1(0xBC, 0x80), 75 CMD1(0xBC, 0x80),
72 CMD1(0xE1, 0x00), 76 CMD1(0xE1, 0x00),
@@ -75,7 +79,7 @@ static uint32_t lcd_vga_pass_through[] = {
75 CMD_NULL, 79 CMD_NULL,
76}; 80};
77 81
78static uint32_t lcd_qvga_pass_through[] = { 82static uint32_t lcd_qvga_pass_through_tdo24m[] = {
79 CMD1(0xB0, 0x16), 83 CMD1(0xB0, 0x16),
80 CMD1(0xBC, 0x81), 84 CMD1(0xBC, 0x81),
81 CMD1(0xE1, 0x00), 85 CMD1(0xE1, 0x00),
@@ -84,7 +88,7 @@ static uint32_t lcd_qvga_pass_through[] = {
84 CMD_NULL, 88 CMD_NULL,
85}; 89};
86 90
87static uint32_t lcd_vga_transfer[] = { 91static uint32_t lcd_vga_transfer_tdo24m[] = {
88 CMD1(0xcf, 0x02), /* Blanking period control (1) */ 92 CMD1(0xcf, 0x02), /* Blanking period control (1) */
89 CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */ 93 CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */
90 CMD1(0xd1, 0x01), /* CKV timing control on/off */ 94 CMD1(0xd1, 0x01), /* CKV timing control on/off */
@@ -110,6 +114,35 @@ static uint32_t lcd_qvga_transfer[] = {
110 CMD_NULL, 114 CMD_NULL,
111}; 115};
112 116
117static uint32_t lcd_vga_pass_through_tdo35s[] = {
118 CMD1(0xB0, 0x16),
119 CMD1(0xBC, 0x80),
120 CMD1(0xE1, 0x00),
121 CMD1(0x3B, 0x00),
122 CMD_NULL,
123};
124
125static uint32_t lcd_qvga_pass_through_tdo35s[] = {
126 CMD1(0xB0, 0x16),
127 CMD1(0xBC, 0x81),
128 CMD1(0xE1, 0x00),
129 CMD1(0x3B, 0x22),
130 CMD_NULL,
131};
132
133static uint32_t lcd_vga_transfer_tdo35s[] = {
134 CMD1(0xcf, 0x02), /* Blanking period control (1) */
135 CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */
136 CMD1(0xd1, 0x01), /* CKV timing control on/off */
137 CMD2(0xd2, 0x00, 0x1e), /* CKV 1,2 timing control */
138 CMD2(0xd3, 0x14, 0x28), /* OEV timing control */
139 CMD2(0xd4, 0x28, 0x64), /* ASW timing control (1) */
140 CMD1(0xd5, 0x28), /* ASW timing control (2) */
141 CMD0(0x21), /* Invert for normally black display */
142 CMD0(0x29), /* Display on */
143 CMD_NULL,
144};
145
113static uint32_t lcd_panel_config[] = { 146static uint32_t lcd_panel_config[] = {
114 CMD2(0xb8, 0xff, 0xf9), /* Output control */ 147 CMD2(0xb8, 0xff, 0xf9), /* Output control */
115 CMD0(0x11), /* sleep out */ 148 CMD0(0x11), /* sleep out */
@@ -148,6 +181,8 @@ static int tdo24m_writes(struct tdo24m *lcd, uint32_t *array)
148 int nparams, err = 0; 181 int nparams, err = 0;
149 182
150 for (; *p != CMD_NULL; p++) { 183 for (; *p != CMD_NULL; p++) {
184 if (!lcd->color_invert && *p == CMD0(0x21))
185 continue;
151 186
152 nparams = (*p >> 30) & 0x3; 187 nparams = (*p >> 30) & 0x3;
153 188
@@ -184,12 +219,33 @@ static int tdo24m_adj_mode(struct tdo24m *lcd, int mode)
184{ 219{
185 switch (mode) { 220 switch (mode) {
186 case MODE_VGA: 221 case MODE_VGA:
187 tdo24m_writes(lcd, lcd_vga_pass_through); 222 tdo24m_writes(lcd, lcd_vga_pass_through_tdo24m);
188 tdo24m_writes(lcd, lcd_panel_config); 223 tdo24m_writes(lcd, lcd_panel_config);
189 tdo24m_writes(lcd, lcd_vga_transfer); 224 tdo24m_writes(lcd, lcd_vga_transfer_tdo24m);
190 break; 225 break;
191 case MODE_QVGA: 226 case MODE_QVGA:
192 tdo24m_writes(lcd, lcd_qvga_pass_through); 227 tdo24m_writes(lcd, lcd_qvga_pass_through_tdo24m);
228 tdo24m_writes(lcd, lcd_panel_config);
229 tdo24m_writes(lcd, lcd_qvga_transfer);
230 break;
231 default:
232 return -EINVAL;
233 }
234
235 lcd->mode = mode;
236 return 0;
237}
238
239static int tdo35s_adj_mode(struct tdo24m *lcd, int mode)
240{
241 switch (mode) {
242 case MODE_VGA:
243 tdo24m_writes(lcd, lcd_vga_pass_through_tdo35s);
244 tdo24m_writes(lcd, lcd_panel_config);
245 tdo24m_writes(lcd, lcd_vga_transfer_tdo35s);
246 break;
247 case MODE_QVGA:
248 tdo24m_writes(lcd, lcd_qvga_pass_through_tdo35s);
193 tdo24m_writes(lcd, lcd_panel_config); 249 tdo24m_writes(lcd, lcd_panel_config);
194 tdo24m_writes(lcd, lcd_qvga_transfer); 250 tdo24m_writes(lcd, lcd_qvga_transfer);
195 break; 251 break;
@@ -213,7 +269,7 @@ static int tdo24m_power_on(struct tdo24m *lcd)
213 if (err) 269 if (err)
214 goto out; 270 goto out;
215 271
216 err = tdo24m_adj_mode(lcd, lcd->mode); 272 err = lcd->adj_mode(lcd, lcd->mode);
217out: 273out:
218 return err; 274 return err;
219} 275}
@@ -262,7 +318,7 @@ static int tdo24m_set_mode(struct lcd_device *ld, struct fb_videomode *m)
262 if (lcd->mode == mode) 318 if (lcd->mode == mode)
263 return 0; 319 return 0;
264 320
265 return tdo24m_adj_mode(lcd, mode); 321 return lcd->adj_mode(lcd, mode);
266} 322}
267 323
268static struct lcd_ops tdo24m_ops = { 324static struct lcd_ops tdo24m_ops = {
@@ -276,8 +332,16 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
276 struct tdo24m *lcd; 332 struct tdo24m *lcd;
277 struct spi_message *m; 333 struct spi_message *m;
278 struct spi_transfer *x; 334 struct spi_transfer *x;
335 struct tdo24m_platform_data *pdata;
336 enum tdo24m_model model;
279 int err; 337 int err;
280 338
339 pdata = spi->dev.platform_data;
340 if (pdata)
341 model = pdata->model;
342 else
343 model = TDO24M;
344
281 spi->bits_per_word = 8; 345 spi->bits_per_word = 8;
282 spi->mode = SPI_MODE_3; 346 spi->mode = SPI_MODE_3;
283 err = spi_setup(spi); 347 err = spi_setup(spi);
@@ -306,6 +370,20 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
306 x->tx_buf = &lcd->buf[0]; 370 x->tx_buf = &lcd->buf[0];
307 spi_message_add_tail(x, m); 371 spi_message_add_tail(x, m);
308 372
373 switch (model) {
374 case TDO24M:
375 lcd->color_invert = 1;
376 lcd->adj_mode = tdo24m_adj_mode;
377 break;
378 case TDO35S:
379 lcd->adj_mode = tdo35s_adj_mode;
380 lcd->color_invert = 0;
381 break;
382 default:
383 dev_err(&spi->dev, "Unsupported model");
384 goto out_free;
385 }
386
309 lcd->lcd_dev = lcd_device_register("tdo24m", &spi->dev, 387 lcd->lcd_dev = lcd_device_register("tdo24m", &spi->dev,
310 lcd, &tdo24m_ops); 388 lcd, &tdo24m_ops);
311 if (IS_ERR(lcd->lcd_dev)) { 389 if (IS_ERR(lcd->lcd_dev)) {
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 57a26649f1a5..b7fbc75a62fc 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -39,6 +39,7 @@ struct tosa_lcd_data {
39 struct i2c_client *i2c; 39 struct i2c_client *i2c;
40 40
41 int lcd_power; 41 int lcd_power;
42 bool is_vga;
42}; 43};
43 44
44static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data) 45static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data)
@@ -81,8 +82,12 @@ static void tosa_lcd_tg_init(struct tosa_lcd_data *data)
81static void tosa_lcd_tg_on(struct tosa_lcd_data *data) 82static void tosa_lcd_tg_on(struct tosa_lcd_data *data)
82{ 83{
83 struct spi_device *spi = data->spi; 84 struct spi_device *spi = data->spi;
84 const int value = TG_REG0_COLOR | TG_REG0_UD | TG_REG0_LR; 85 int value = TG_REG0_COLOR | TG_REG0_UD | TG_REG0_LR;
85 tosa_tg_send(spi, TG_PNLCTL, value | TG_REG0_VQV); /* this depends on mode */ 86
87 if (data->is_vga)
88 value |= TG_REG0_VQV;
89
90 tosa_tg_send(spi, TG_PNLCTL, value);
86 91
87 /* TG LCD pannel power up */ 92 /* TG LCD pannel power up */
88 tosa_tg_send(spi, TG_PINICTL,0x4); 93 tosa_tg_send(spi, TG_PINICTL,0x4);
@@ -142,9 +147,25 @@ static int tosa_lcd_get_power(struct lcd_device *lcd)
142 return data->lcd_power; 147 return data->lcd_power;
143} 148}
144 149
150static int tosa_lcd_set_mode(struct lcd_device *lcd, struct fb_videomode *mode)
151{
152 struct tosa_lcd_data *data = lcd_get_data(lcd);
153
154 if (mode->xres == 320 || mode->yres == 320)
155 data->is_vga = false;
156 else
157 data->is_vga = true;
158
159 if (POWER_IS_ON(data->lcd_power))
160 tosa_lcd_tg_on(data);
161
162 return 0;
163}
164
145static struct lcd_ops tosa_lcd_ops = { 165static struct lcd_ops tosa_lcd_ops = {
146 .set_power = tosa_lcd_set_power, 166 .set_power = tosa_lcd_set_power,
147 .get_power = tosa_lcd_get_power, 167 .get_power = tosa_lcd_get_power,
168 .set_mode = tosa_lcd_set_mode,
148}; 169};
149 170
150static int __devinit tosa_lcd_probe(struct spi_device *spi) 171static int __devinit tosa_lcd_probe(struct spi_device *spi)
@@ -156,6 +177,8 @@ static int __devinit tosa_lcd_probe(struct spi_device *spi)
156 if (!data) 177 if (!data)
157 return -ENOMEM; 178 return -ENOMEM;
158 179
180 data->is_vga = true; /* defaut to VGA mode */
181
159 /* 182 /*
160 * bits_per_word cannot be configured in platform data 183 * bits_per_word cannot be configured in platform data
161 */ 184 */
diff --git a/drivers/video/backlight/vgg2432a4.c b/drivers/video/backlight/vgg2432a4.c
index 593c7687d54a..8e653b8a6f17 100644
--- a/drivers/video/backlight/vgg2432a4.c
+++ b/drivers/video/backlight/vgg2432a4.c
@@ -137,7 +137,7 @@ static int vgg2432a4_lcd_init(struct ili9320 *lcd,
137 137
138 ili9320_write(lcd, ILI9320_RGB_IF1, cfg->rgb_if1); 138 ili9320_write(lcd, ILI9320_RGB_IF1, cfg->rgb_if1);
139 ili9320_write(lcd, ILI9320_FRAMEMAKER, 0x0); 139 ili9320_write(lcd, ILI9320_FRAMEMAKER, 0x0);
140 ili9320_write(lcd, ILI9320_RGB_IF2, ILI9320_RGBIF2_DPL); 140 ili9320_write(lcd, ILI9320_RGB_IF2, cfg->rgb_if2);
141 141
142 ret = ili9320_write_regs(lcd, vgg_init1, ARRAY_SIZE(vgg_init1)); 142 ret = ili9320_write_regs(lcd, vgg_init1, ARRAY_SIZE(vgg_init1));
143 if (ret != 0) 143 if (ret != 0)
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 90616822cd20..96d2f8e4c275 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -34,6 +34,12 @@ config W1_MASTER_DS2482
34 This driver can also be built as a module. If so, the module 34 This driver can also be built as a module. If so, the module
35 will be called ds2482. 35 will be called ds2482.
36 36
37config W1_MASTER_MXC
38 tristate "Freescale MXC 1-wire busmaster"
39 depends on W1 && ARCH_MXC
40 help
41 Say Y here to enable MXC 1-wire host
42
37config W1_MASTER_DS1WM 43config W1_MASTER_DS1WM
38 tristate "Maxim DS1WM 1-wire busmaster" 44 tristate "Maxim DS1WM 1-wire busmaster"
39 depends on W1 && ARM && HAVE_CLK 45 depends on W1 && ARM && HAVE_CLK
diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile
index bc4714a75f3a..c5a3e96fcbab 100644
--- a/drivers/w1/masters/Makefile
+++ b/drivers/w1/masters/Makefile
@@ -5,6 +5,8 @@
5obj-$(CONFIG_W1_MASTER_MATROX) += matrox_w1.o 5obj-$(CONFIG_W1_MASTER_MATROX) += matrox_w1.o
6obj-$(CONFIG_W1_MASTER_DS2490) += ds2490.o 6obj-$(CONFIG_W1_MASTER_DS2490) += ds2490.o
7obj-$(CONFIG_W1_MASTER_DS2482) += ds2482.o 7obj-$(CONFIG_W1_MASTER_DS2482) += ds2482.o
8obj-$(CONFIG_W1_MASTER_MXC) += mxc_w1.o
9
8obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o 10obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o
9obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o 11obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o
10obj-$(CONFIG_HDQ_MASTER_OMAP) += omap_hdq.o 12obj-$(CONFIG_HDQ_MASTER_OMAP) += omap_hdq.o
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
new file mode 100644
index 000000000000..b9d74d0b353e
--- /dev/null
+++ b/drivers/w1/masters/mxc_w1.c
@@ -0,0 +1,211 @@
1/*
2 * Copyright 2005-2008 Freescale Semiconductor, Inc. All Rights Reserved.
3 * Copyright 2008 Luotao Fu, kernel@pengutronix.de
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/platform_device.h>
23#include <linux/clk.h>
24#include <linux/delay.h>
25#include <linux/io.h>
26
27#include "../w1.h"
28#include "../w1_int.h"
29#include "../w1_log.h"
30
31/* According to the mx27 Datasheet the reset procedure should take up to about
32 * 1350us. We set the timeout to 500*100us = 50ms for sure */
33#define MXC_W1_RESET_TIMEOUT 500
34
35/*
36 * MXC W1 Register offsets
37 */
38#define MXC_W1_CONTROL 0x00
39#define MXC_W1_TIME_DIVIDER 0x02
40#define MXC_W1_RESET 0x04
41#define MXC_W1_COMMAND 0x06
42#define MXC_W1_TXRX 0x08
43#define MXC_W1_INTERRUPT 0x0A
44#define MXC_W1_INTERRUPT_EN 0x0C
45
46struct mxc_w1_device {
47 void __iomem *regs;
48 unsigned int clkdiv;
49 struct clk *clk;
50 struct w1_bus_master bus_master;
51};
52
53/*
54 * this is the low level routine to
55 * reset the device on the One Wire interface
56 * on the hardware
57 */
58static u8 mxc_w1_ds2_reset_bus(void *data)
59{
60 u8 reg_val;
61 unsigned int timeout_cnt = 0;
62 struct mxc_w1_device *dev = data;
63
64 __raw_writeb(0x80, (dev->regs + MXC_W1_CONTROL));
65
66 while (1) {
67 reg_val = __raw_readb(dev->regs + MXC_W1_CONTROL);
68
69 if (((reg_val >> 7) & 0x1) == 0 ||
70 timeout_cnt > MXC_W1_RESET_TIMEOUT)
71 break;
72 else
73 timeout_cnt++;
74
75 udelay(100);
76 }
77 return (reg_val >> 7) & 0x1;
78}
79
80/*
81 * this is the low level routine to read/write a bit on the One Wire
82 * interface on the hardware. It does write 0 if parameter bit is set
83 * to 0, otherwise a write 1/read.
84 */
85static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit)
86{
87 struct mxc_w1_device *mdev = data;
88 void __iomem *ctrl_addr = mdev->regs + MXC_W1_CONTROL;
89 unsigned int timeout_cnt = 400; /* Takes max. 120us according to
90 * datasheet.
91 */
92
93 __raw_writeb((1 << (5 - bit)), ctrl_addr);
94
95 while (timeout_cnt--) {
96 if (!((__raw_readb(ctrl_addr) >> (5 - bit)) & 0x1))
97 break;
98
99 udelay(1);
100 }
101
102 return ((__raw_readb(ctrl_addr)) >> 3) & 0x1;
103}
104
105static int __init mxc_w1_probe(struct platform_device *pdev)
106{
107 struct mxc_w1_device *mdev;
108 struct resource *res;
109 int err = 0;
110
111 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
112 if (!res)
113 return -ENODEV;
114
115 mdev = kzalloc(sizeof(struct mxc_w1_device), GFP_KERNEL);
116 if (!mdev)
117 return -ENOMEM;
118
119 mdev->clk = clk_get(&pdev->dev, "owire_clk");
120 if (!mdev->clk) {
121 err = -ENODEV;
122 goto failed_clk;
123 }
124
125 mdev->clkdiv = (clk_get_rate(mdev->clk) / 1000000) - 1;
126
127 res = request_mem_region(res->start, resource_size(res),
128 "mxc_w1");
129 if (!res) {
130 err = -EBUSY;
131 goto failed_req;
132 }
133
134 mdev->regs = ioremap(res->start, resource_size(res));
135 if (!mdev->regs) {
136 printk(KERN_ERR "Cannot map frame buffer registers\n");
137 goto failed_ioremap;
138 }
139
140 clk_enable(mdev->clk);
141 __raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER);
142
143 mdev->bus_master.data = mdev;
144 mdev->bus_master.reset_bus = mxc_w1_ds2_reset_bus;
145 mdev->bus_master.touch_bit = mxc_w1_ds2_touch_bit;
146
147 err = w1_add_master_device(&mdev->bus_master);
148
149 if (err)
150 goto failed_add;
151
152 platform_set_drvdata(pdev, mdev);
153 return 0;
154
155failed_add:
156 iounmap(mdev->regs);
157failed_ioremap:
158 release_mem_region(res->start, resource_size(res));
159failed_req:
160 clk_put(mdev->clk);
161failed_clk:
162 kfree(mdev);
163 return err;
164}
165
166/*
167 * disassociate the w1 device from the driver
168 */
169static int mxc_w1_remove(struct platform_device *pdev)
170{
171 struct mxc_w1_device *mdev = platform_get_drvdata(pdev);
172 struct resource *res;
173
174 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
175
176 w1_remove_master_device(&mdev->bus_master);
177
178 iounmap(mdev->regs);
179 release_mem_region(res->start, resource_size(res));
180 clk_disable(mdev->clk);
181 clk_put(mdev->clk);
182
183 platform_set_drvdata(pdev, NULL);
184
185 return 0;
186}
187
188static struct platform_driver mxc_w1_driver = {
189 .driver = {
190 .name = "mxc_w1",
191 },
192 .probe = mxc_w1_probe,
193 .remove = mxc_w1_remove,
194};
195
196static int __init mxc_w1_init(void)
197{
198 return platform_driver_register(&mxc_w1_driver);
199}
200
201static void mxc_w1_exit(void)
202{
203 platform_driver_unregister(&mxc_w1_driver);
204}
205
206module_init(mxc_w1_init);
207module_exit(mxc_w1_exit);
208
209MODULE_LICENSE("GPL");
210MODULE_AUTHOR("Freescale Semiconductors Inc");
211MODULE_DESCRIPTION("Driver for One-Wire on MXC");
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h
index 97304bd83ec9..d8a9709f3449 100644
--- a/drivers/w1/w1.h
+++ b/drivers/w1/w1.h
@@ -210,6 +210,7 @@ u8 w1_read_8(struct w1_master *);
210int w1_reset_bus(struct w1_master *); 210int w1_reset_bus(struct w1_master *);
211u8 w1_calc_crc8(u8 *, int); 211u8 w1_calc_crc8(u8 *, int);
212void w1_write_block(struct w1_master *, const u8 *, int); 212void w1_write_block(struct w1_master *, const u8 *, int);
213void w1_touch_block(struct w1_master *, u8 *, int);
213u8 w1_read_block(struct w1_master *, u8 *, int); 214u8 w1_read_block(struct w1_master *, u8 *, int);
214int w1_reset_select_slave(struct w1_slave *sl); 215int w1_reset_select_slave(struct w1_slave *sl);
215void w1_next_pullup(struct w1_master *, int); 216void w1_next_pullup(struct w1_master *, int);
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index 5139c25ca962..442bd8bbd4a5 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -238,7 +238,6 @@ EXPORT_SYMBOL_GPL(w1_read_8);
238 * @param dev the master device 238 * @param dev the master device
239 * @param buf pointer to the data to write 239 * @param buf pointer to the data to write
240 * @param len the number of bytes to write 240 * @param len the number of bytes to write
241 * @return the byte read
242 */ 241 */
243void w1_write_block(struct w1_master *dev, const u8 *buf, int len) 242void w1_write_block(struct w1_master *dev, const u8 *buf, int len)
244{ 243{
@@ -256,6 +255,31 @@ void w1_write_block(struct w1_master *dev, const u8 *buf, int len)
256EXPORT_SYMBOL_GPL(w1_write_block); 255EXPORT_SYMBOL_GPL(w1_write_block);
257 256
258/** 257/**
258 * Touches a series of bytes.
259 *
260 * @param dev the master device
261 * @param buf pointer to the data to write
262 * @param len the number of bytes to write
263 */
264void w1_touch_block(struct w1_master *dev, u8 *buf, int len)
265{
266 int i, j;
267 u8 tmp;
268
269 for (i = 0; i < len; ++i) {
270 tmp = 0;
271 for (j = 0; j < 8; ++j) {
272 if (j == 7)
273 w1_pre_write(dev);
274 tmp |= w1_touch_bit(dev, (buf[i] >> j) & 0x1) << j;
275 }
276
277 buf[i] = tmp;
278 }
279}
280EXPORT_SYMBOL_GPL(w1_touch_block);
281
282/**
259 * Reads a series of bytes. 283 * Reads a series of bytes.
260 * 284 *
261 * @param dev the master device 285 * @param dev the master device
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 65c5ebd0787e..fdf72851c574 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -47,21 +47,56 @@ void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg)
47 cn_netlink_send(m, 0, GFP_KERNEL); 47 cn_netlink_send(m, 0, GFP_KERNEL);
48} 48}
49 49
50static int w1_process_command_master(struct w1_master *dev, struct cn_msg *msg, 50static void w1_send_slave(struct w1_master *dev, u64 rn)
51 struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd) 51{
52 struct cn_msg *msg = dev->priv;
53 struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1);
54 struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1);
55 int avail;
56
57 avail = dev->priv_size - cmd->len;
58
59 if (avail > 8) {
60 u64 *data = (void *)(cmd + 1) + cmd->len;
61
62 *data = rn;
63 cmd->len += 8;
64 hdr->len += 8;
65 msg->len += 8;
66 return;
67 }
68
69 msg->ack++;
70 cn_netlink_send(msg, 0, GFP_KERNEL);
71
72 msg->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd);
73 hdr->len = sizeof(struct w1_netlink_cmd);
74 cmd->len = 0;
75}
76
77static int w1_process_search_command(struct w1_master *dev, struct cn_msg *msg,
78 unsigned int avail)
52{ 79{
53 dev_dbg(&dev->dev, "%s: %s: cmd=%02x, len=%u.\n", 80 struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1);
54 __func__, dev->name, cmd->cmd, cmd->len); 81 struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1);
82 int search_type = (cmd->cmd == W1_CMD_ALARM_SEARCH)?W1_ALARM_SEARCH:W1_SEARCH;
55 83
56 if (cmd->cmd != W1_CMD_SEARCH && cmd->cmd != W1_CMD_ALARM_SEARCH) 84 dev->priv = msg;
57 return -EINVAL; 85 dev->priv_size = avail;
86
87 w1_search_devices(dev, search_type, w1_send_slave);
88
89 msg->ack = 0;
90 cn_netlink_send(msg, 0, GFP_KERNEL);
91
92 dev->priv = NULL;
93 dev->priv_size = 0;
58 94
59 w1_search_process(dev, (cmd->cmd == W1_CMD_ALARM_SEARCH)?W1_ALARM_SEARCH:W1_SEARCH);
60 return 0; 95 return 0;
61} 96}
62 97
63static int w1_send_read_reply(struct w1_slave *sl, struct cn_msg *msg, 98static int w1_send_read_reply(struct cn_msg *msg, struct w1_netlink_msg *hdr,
64 struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd) 99 struct w1_netlink_cmd *cmd)
65{ 100{
66 void *data; 101 void *data;
67 struct w1_netlink_msg *h; 102 struct w1_netlink_msg *h;
@@ -85,7 +120,8 @@ static int w1_send_read_reply(struct w1_slave *sl, struct cn_msg *msg,
85 memcpy(c, cmd, sizeof(struct w1_netlink_cmd)); 120 memcpy(c, cmd, sizeof(struct w1_netlink_cmd));
86 121
87 cm->ack = msg->seq+1; 122 cm->ack = msg->seq+1;
88 cm->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd) + cmd->len; 123 cm->len = sizeof(struct w1_netlink_msg) +
124 sizeof(struct w1_netlink_cmd) + cmd->len;
89 125
90 h->len = sizeof(struct w1_netlink_cmd) + cmd->len; 126 h->len = sizeof(struct w1_netlink_cmd) + cmd->len;
91 127
@@ -98,36 +134,178 @@ static int w1_send_read_reply(struct w1_slave *sl, struct cn_msg *msg,
98 return err; 134 return err;
99} 135}
100 136
101static int w1_process_command_slave(struct w1_slave *sl, struct cn_msg *msg, 137static int w1_process_command_io(struct w1_master *dev, struct cn_msg *msg,
102 struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd) 138 struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd)
103{ 139{
104 int err = 0; 140 int err = 0;
105 141
106 dev_dbg(&sl->master->dev, "%s: %02x.%012llx.%02x: cmd=%02x, len=%u.\n", 142 switch (cmd->cmd) {
107 __func__, sl->reg_num.family, (unsigned long long)sl->reg_num.id, sl->reg_num.crc, 143 case W1_CMD_TOUCH:
108 cmd->cmd, cmd->len); 144 w1_touch_block(dev, cmd->data, cmd->len);
145 w1_send_read_reply(msg, hdr, cmd);
146 break;
147 case W1_CMD_READ:
148 w1_read_block(dev, cmd->data, cmd->len);
149 w1_send_read_reply(msg, hdr, cmd);
150 break;
151 case W1_CMD_WRITE:
152 w1_write_block(dev, cmd->data, cmd->len);
153 break;
154 default:
155 err = -EINVAL;
156 break;
157 }
158
159 return err;
160}
161
162static int w1_process_command_master(struct w1_master *dev, struct cn_msg *req_msg,
163 struct w1_netlink_msg *req_hdr, struct w1_netlink_cmd *req_cmd)
164{
165 int err = -EINVAL;
166 struct cn_msg *msg;
167 struct w1_netlink_msg *hdr;
168 struct w1_netlink_cmd *cmd;
169
170 msg = kzalloc(PAGE_SIZE, GFP_KERNEL);
171 if (!msg)
172 return -ENOMEM;
173
174 msg->id = req_msg->id;
175 msg->seq = req_msg->seq;
176 msg->ack = 0;
177 msg->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd);
178
179 hdr = (struct w1_netlink_msg *)(msg + 1);
180 cmd = (struct w1_netlink_cmd *)(hdr + 1);
181
182 hdr->type = W1_MASTER_CMD;
183 hdr->id = req_hdr->id;
184 hdr->len = sizeof(struct w1_netlink_cmd);
185
186 cmd->cmd = req_cmd->cmd;
187 cmd->len = 0;
109 188
110 switch (cmd->cmd) { 189 switch (cmd->cmd) {
111 case W1_CMD_READ: 190 case W1_CMD_SEARCH:
112 w1_read_block(sl->master, cmd->data, cmd->len); 191 case W1_CMD_ALARM_SEARCH:
113 w1_send_read_reply(sl, msg, hdr, cmd); 192 err = w1_process_search_command(dev, msg,
114 break; 193 PAGE_SIZE - msg->len - sizeof(struct cn_msg));
115 case W1_CMD_WRITE: 194 break;
116 w1_write_block(sl->master, cmd->data, cmd->len); 195 case W1_CMD_READ:
117 break; 196 case W1_CMD_WRITE:
118 case W1_CMD_SEARCH: 197 case W1_CMD_TOUCH:
119 case W1_CMD_ALARM_SEARCH: 198 err = w1_process_command_io(dev, req_msg, req_hdr, req_cmd);
120 w1_search_process(sl->master, 199 break;
121 (cmd->cmd == W1_CMD_ALARM_SEARCH)?W1_ALARM_SEARCH:W1_SEARCH); 200 case W1_CMD_RESET:
122 break; 201 err = w1_reset_bus(dev);
123 default: 202 break;
124 err = -1; 203 default:
125 break; 204 err = -EINVAL;
205 break;
126 } 206 }
127 207
208 kfree(msg);
128 return err; 209 return err;
129} 210}
130 211
212static int w1_process_command_slave(struct w1_slave *sl, struct cn_msg *msg,
213 struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd)
214{
215 dev_dbg(&sl->master->dev, "%s: %02x.%012llx.%02x: cmd=%02x, len=%u.\n",
216 __func__, sl->reg_num.family, (unsigned long long)sl->reg_num.id,
217 sl->reg_num.crc, cmd->cmd, cmd->len);
218
219 return w1_process_command_io(sl->master, msg, hdr, cmd);
220}
221
222static int w1_process_command_root(struct cn_msg *msg, struct w1_netlink_msg *mcmd)
223{
224 struct w1_master *m;
225 struct cn_msg *cn;
226 struct w1_netlink_msg *w;
227 u32 *id;
228
229 if (mcmd->type != W1_LIST_MASTERS) {
230 printk(KERN_NOTICE "%s: msg: %x.%x, wrong type: %u, len: %u.\n",
231 __func__, msg->id.idx, msg->id.val, mcmd->type, mcmd->len);
232 return -EPROTO;
233 }
234
235 cn = kmalloc(PAGE_SIZE, GFP_KERNEL);
236 if (!cn)
237 return -ENOMEM;
238
239 cn->id.idx = CN_W1_IDX;
240 cn->id.val = CN_W1_VAL;
241
242 cn->seq = msg->seq;
243 cn->ack = 1;
244 cn->len = sizeof(struct w1_netlink_msg);
245 w = (struct w1_netlink_msg *)(cn + 1);
246
247 w->type = W1_LIST_MASTERS;
248 w->status = 0;
249 w->len = 0;
250 id = (u32 *)(w + 1);
251
252 mutex_lock(&w1_mlock);
253 list_for_each_entry(m, &w1_masters, w1_master_entry) {
254 if (cn->len + sizeof(*id) > PAGE_SIZE - sizeof(struct cn_msg)) {
255 cn_netlink_send(cn, 0, GFP_KERNEL);
256 cn->ack++;
257 cn->len = sizeof(struct w1_netlink_msg);
258 w->len = 0;
259 id = (u32 *)(w + 1);
260 }
261
262 *id = m->id;
263 w->len += sizeof(*id);
264 cn->len += sizeof(*id);
265 id++;
266 }
267 cn->ack = 0;
268 cn_netlink_send(cn, 0, GFP_KERNEL);
269 mutex_unlock(&w1_mlock);
270
271 kfree(cn);
272 return 0;
273}
274
275static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rmsg,
276 struct w1_netlink_cmd *rcmd, int error)
277{
278 struct cn_msg *cmsg;
279 struct w1_netlink_msg *msg;
280 struct w1_netlink_cmd *cmd;
281
282 cmsg = kzalloc(sizeof(*msg) + sizeof(*cmd) + sizeof(*cmsg), GFP_KERNEL);
283 if (!cmsg)
284 return -ENOMEM;
285
286 msg = (struct w1_netlink_msg *)(cmsg + 1);
287 cmd = (struct w1_netlink_cmd *)(msg + 1);
288
289 memcpy(cmsg, rcmsg, sizeof(*cmsg));
290 cmsg->len = sizeof(*msg);
291
292 memcpy(msg, rmsg, sizeof(*msg));
293 msg->len = 0;
294 msg->status = (short)-error;
295
296 if (rcmd) {
297 memcpy(cmd, rcmd, sizeof(*cmd));
298 cmd->len = 0;
299 msg->len += sizeof(*cmd);
300 cmsg->len += sizeof(*cmd);
301 }
302
303 error = cn_netlink_send(cmsg, 0, GFP_KERNEL);
304 kfree(cmsg);
305
306 return error;
307}
308
131static void w1_cn_callback(void *data) 309static void w1_cn_callback(void *data)
132{ 310{
133 struct cn_msg *msg = data; 311 struct cn_msg *msg = data;
@@ -144,6 +322,7 @@ static void w1_cn_callback(void *data)
144 322
145 dev = NULL; 323 dev = NULL;
146 sl = NULL; 324 sl = NULL;
325 cmd = NULL;
147 326
148 memcpy(&id, m->id.id, sizeof(id)); 327 memcpy(&id, m->id.id, sizeof(id));
149#if 0 328#if 0
@@ -155,15 +334,15 @@ static void w1_cn_callback(void *data)
155 break; 334 break;
156 } 335 }
157 336
158 if (!mlen)
159 goto out_cont;
160
161 if (m->type == W1_MASTER_CMD) { 337 if (m->type == W1_MASTER_CMD) {
162 dev = w1_search_master_id(m->id.mst.id); 338 dev = w1_search_master_id(m->id.mst.id);
163 } else if (m->type == W1_SLAVE_CMD) { 339 } else if (m->type == W1_SLAVE_CMD) {
164 sl = w1_search_slave(&id); 340 sl = w1_search_slave(&id);
165 if (sl) 341 if (sl)
166 dev = sl->master; 342 dev = sl->master;
343 } else {
344 err = w1_process_command_root(msg, m);
345 goto out_cont;
167 } 346 }
168 347
169 if (!dev) { 348 if (!dev) {
@@ -171,6 +350,10 @@ static void w1_cn_callback(void *data)
171 goto out_cont; 350 goto out_cont;
172 } 351 }
173 352
353 err = 0;
354 if (!mlen)
355 goto out_cont;
356
174 mutex_lock(&dev->mutex); 357 mutex_lock(&dev->mutex);
175 358
176 if (sl && w1_reset_select_slave(sl)) { 359 if (sl && w1_reset_select_slave(sl)) {
@@ -187,9 +370,12 @@ static void w1_cn_callback(void *data)
187 } 370 }
188 371
189 if (sl) 372 if (sl)
190 w1_process_command_slave(sl, msg, m, cmd); 373 err = w1_process_command_slave(sl, msg, m, cmd);
191 else 374 else
192 w1_process_command_master(dev, msg, m, cmd); 375 err = w1_process_command_master(dev, msg, m, cmd);
376
377 w1_netlink_send_error(msg, m, cmd, err);
378 err = 0;
193 379
194 cmd_data += cmd->len + sizeof(struct w1_netlink_cmd); 380 cmd_data += cmd->len + sizeof(struct w1_netlink_cmd);
195 mlen -= cmd->len + sizeof(struct w1_netlink_cmd); 381 mlen -= cmd->len + sizeof(struct w1_netlink_cmd);
@@ -200,6 +386,8 @@ out_up:
200 atomic_dec(&sl->refcnt); 386 atomic_dec(&sl->refcnt);
201 mutex_unlock(&dev->mutex); 387 mutex_unlock(&dev->mutex);
202out_cont: 388out_cont:
389 if (!cmd || err)
390 w1_netlink_send_error(msg, m, cmd, err);
203 msg->len -= sizeof(struct w1_netlink_msg) + m->len; 391 msg->len -= sizeof(struct w1_netlink_msg) + m->len;
204 m = (struct w1_netlink_msg *)(((u8 *)m) + sizeof(struct w1_netlink_msg) + m->len); 392 m = (struct w1_netlink_msg *)(((u8 *)m) + sizeof(struct w1_netlink_msg) + m->len);
205 393
@@ -209,11 +397,6 @@ out_cont:
209 if (err == -ENODEV) 397 if (err == -ENODEV)
210 err = 0; 398 err = 0;
211 } 399 }
212#if 0
213 if (err) {
214 printk("%s: malformed message. Dropping.\n", __func__);
215 }
216#endif
217} 400}
218 401
219int w1_init_netlink(void) 402int w1_init_netlink(void)
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h
index 56122b9e9294..27e950f935b1 100644
--- a/drivers/w1/w1_netlink.h
+++ b/drivers/w1/w1_netlink.h
@@ -34,12 +34,13 @@ enum w1_netlink_message_types {
34 W1_MASTER_REMOVE, 34 W1_MASTER_REMOVE,
35 W1_MASTER_CMD, 35 W1_MASTER_CMD,
36 W1_SLAVE_CMD, 36 W1_SLAVE_CMD,
37 W1_LIST_MASTERS,
37}; 38};
38 39
39struct w1_netlink_msg 40struct w1_netlink_msg
40{ 41{
41 __u8 type; 42 __u8 type;
42 __u8 reserved; 43 __u8 status;
43 __u16 len; 44 __u16 len;
44 union { 45 union {
45 __u8 id[8]; 46 __u8 id[8];
@@ -51,10 +52,15 @@ struct w1_netlink_msg
51 __u8 data[0]; 52 __u8 data[0];
52}; 53};
53 54
54#define W1_CMD_READ 0x0 55enum w1_commands {
55#define W1_CMD_WRITE 0x1 56 W1_CMD_READ = 0,
56#define W1_CMD_SEARCH 0x2 57 W1_CMD_WRITE,
57#define W1_CMD_ALARM_SEARCH 0x3 58 W1_CMD_SEARCH,
59 W1_CMD_ALARM_SEARCH,
60 W1_CMD_TOUCH,
61 W1_CMD_RESET,
62 W1_CMD_MAX,
63};
58 64
59struct w1_netlink_cmd 65struct w1_netlink_cmd
60{ 66{
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 4b75a16de009..526187c8a12d 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -17,3 +17,27 @@ config XEN_SCRUB_PAGES
17 is not accidentally visible to other domains. Is it more 17 is not accidentally visible to other domains. Is it more
18 secure, but slightly less efficient. 18 secure, but slightly less efficient.
19 If in doubt, say yes. 19 If in doubt, say yes.
20
21config XENFS
22 tristate "Xen filesystem"
23 depends on XEN
24 default y
25 help
26 The xen filesystem provides a way for domains to share
27 information with each other and with the hypervisor.
28 For example, by reading and writing the "xenbus" file, guests
29 may pass arbitrary information to the initial domain.
30 If in doubt, say yes.
31
32config XEN_COMPAT_XENFS
33 bool "Create compatibility mount point /proc/xen"
34 depends on XENFS
35 default y
36 help
37 The old xenstore userspace tools expect to find "xenbus"
38 under /proc/xen, but "xenbus" is now found at the root of the
39 xenfs filesystem. Selecting this causes the kernel to create
40 the compatibilty mount point /proc/xen if it is running on
41 a xen platform.
42 If in doubt, say yes.
43
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index d2a8fdf0e191..ff8accc9e103 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,5 +1,7 @@
1obj-y += grant-table.o features.o events.o manage.o 1obj-y += grant-table.o features.o events.o manage.o
2obj-y += xenbus/ 2obj-y += xenbus/
3
3obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o 4obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
4obj-$(CONFIG_XEN_XENCOMM) += xencomm.o 5obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
5obj-$(CONFIG_XEN_BALLOON) += balloon.o 6obj-$(CONFIG_XEN_BALLOON) += balloon.o
7obj-$(CONFIG_XENFS) += xenfs/ \ No newline at end of file
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 9678b3e98c63..92a1ef80a288 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -136,7 +136,6 @@ EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
136/** 136/**
137 * xenbus_switch_state 137 * xenbus_switch_state
138 * @dev: xenbus device 138 * @dev: xenbus device
139 * @xbt: transaction handle
140 * @state: new state 139 * @state: new state
141 * 140 *
142 * Advertise in the store a change of the given driver to the given new_state. 141 * Advertise in the store a change of the given driver to the given new_state.
@@ -267,7 +266,7 @@ EXPORT_SYMBOL_GPL(xenbus_dev_error);
267 * @fmt: error message format 266 * @fmt: error message format
268 * 267 *
269 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by 268 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
270 * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly 269 * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
271 * closedown of this driver and its peer. 270 * closedown of this driver and its peer.
272 */ 271 */
273 272
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index b2a03184a246..773d1cf23283 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -40,6 +40,7 @@
40#include <linux/ctype.h> 40#include <linux/ctype.h>
41#include <linux/fcntl.h> 41#include <linux/fcntl.h>
42#include <linux/mm.h> 42#include <linux/mm.h>
43#include <linux/proc_fs.h>
43#include <linux/notifier.h> 44#include <linux/notifier.h>
44#include <linux/kthread.h> 45#include <linux/kthread.h>
45#include <linux/mutex.h> 46#include <linux/mutex.h>
@@ -55,7 +56,10 @@
55#include "xenbus_comms.h" 56#include "xenbus_comms.h"
56#include "xenbus_probe.h" 57#include "xenbus_probe.h"
57 58
59
58int xen_store_evtchn; 60int xen_store_evtchn;
61EXPORT_SYMBOL(xen_store_evtchn);
62
59struct xenstore_domain_interface *xen_store_interface; 63struct xenstore_domain_interface *xen_store_interface;
60static unsigned long xen_store_mfn; 64static unsigned long xen_store_mfn;
61 65
@@ -166,6 +170,9 @@ static int read_backend_details(struct xenbus_device *xendev)
166 return read_otherend_details(xendev, "backend-id", "backend"); 170 return read_otherend_details(xendev, "backend-id", "backend");
167} 171}
168 172
173static struct device_attribute xenbus_dev_attrs[] = {
174 __ATTR_NULL
175};
169 176
170/* Bus type for frontend drivers. */ 177/* Bus type for frontend drivers. */
171static struct xen_bus_type xenbus_frontend = { 178static struct xen_bus_type xenbus_frontend = {
@@ -174,12 +181,13 @@ static struct xen_bus_type xenbus_frontend = {
174 .get_bus_id = frontend_bus_id, 181 .get_bus_id = frontend_bus_id,
175 .probe = xenbus_probe_frontend, 182 .probe = xenbus_probe_frontend,
176 .bus = { 183 .bus = {
177 .name = "xen", 184 .name = "xen",
178 .match = xenbus_match, 185 .match = xenbus_match,
179 .uevent = xenbus_uevent, 186 .uevent = xenbus_uevent,
180 .probe = xenbus_dev_probe, 187 .probe = xenbus_dev_probe,
181 .remove = xenbus_dev_remove, 188 .remove = xenbus_dev_remove,
182 .shutdown = xenbus_dev_shutdown, 189 .shutdown = xenbus_dev_shutdown,
190 .dev_attrs = xenbus_dev_attrs,
183 }, 191 },
184}; 192};
185 193
@@ -852,6 +860,14 @@ static int __init xenbus_probe_init(void)
852 if (!xen_initial_domain()) 860 if (!xen_initial_domain())
853 xenbus_probe(NULL); 861 xenbus_probe(NULL);
854 862
863#ifdef CONFIG_XEN_COMPAT_XENFS
864 /*
865 * Create xenfs mountpoint in /proc for compatibility with
866 * utilities that expect to find "xenbus" under "/proc/xen".
867 */
868 proc_mkdir("xen", NULL);
869#endif
870
855 return 0; 871 return 0;
856 872
857 out_unreg_back: 873 out_unreg_back:
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 7f2f91c0e11d..e325eab4724d 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -184,6 +184,7 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
184 184
185 return ret; 185 return ret;
186} 186}
187EXPORT_SYMBOL(xenbus_dev_request_and_reply);
187 188
188/* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ 189/* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */
189static void *xs_talkv(struct xenbus_transaction t, 190static void *xs_talkv(struct xenbus_transaction t,
diff --git a/drivers/xen/xenfs/Makefile b/drivers/xen/xenfs/Makefile
new file mode 100644
index 000000000000..25275c3bbdff
--- /dev/null
+++ b/drivers/xen/xenfs/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_XENFS) += xenfs.o
2
3xenfs-objs = super.o xenbus.o \ No newline at end of file
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
new file mode 100644
index 000000000000..515741a8e6b8
--- /dev/null
+++ b/drivers/xen/xenfs/super.c
@@ -0,0 +1,64 @@
1/*
2 * xenfs.c - a filesystem for passing info between the a domain and
3 * the hypervisor.
4 *
5 * 2008-10-07 Alex Zeffertt Replaced /proc/xen/xenbus with xenfs filesystem
6 * and /proc/xen compatibility mount point.
7 * Turned xenfs into a loadable module.
8 */
9
10#include <linux/kernel.h>
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/fs.h>
14#include <linux/magic.h>
15
16#include "xenfs.h"
17
18#include <asm/xen/hypervisor.h>
19
20MODULE_DESCRIPTION("Xen filesystem");
21MODULE_LICENSE("GPL");
22
23static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
24{
25 static struct tree_descr xenfs_files[] = {
26 [2] = {"xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR},
27 {""},
28 };
29
30 return simple_fill_super(sb, XENFS_SUPER_MAGIC, xenfs_files);
31}
32
33static int xenfs_get_sb(struct file_system_type *fs_type,
34 int flags, const char *dev_name,
35 void *data, struct vfsmount *mnt)
36{
37 return get_sb_single(fs_type, flags, data, xenfs_fill_super, mnt);
38}
39
40static struct file_system_type xenfs_type = {
41 .owner = THIS_MODULE,
42 .name = "xenfs",
43 .get_sb = xenfs_get_sb,
44 .kill_sb = kill_litter_super,
45};
46
47static int __init xenfs_init(void)
48{
49 if (xen_pv_domain())
50 return register_filesystem(&xenfs_type);
51
52 printk(KERN_INFO "XENFS: not registering filesystem on non-xen platform\n");
53 return 0;
54}
55
56static void __exit xenfs_exit(void)
57{
58 if (xen_pv_domain())
59 unregister_filesystem(&xenfs_type);
60}
61
62module_init(xenfs_init);
63module_exit(xenfs_exit);
64
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c
new file mode 100644
index 000000000000..875a4c59c594
--- /dev/null
+++ b/drivers/xen/xenfs/xenbus.c
@@ -0,0 +1,593 @@
1/*
2 * Driver giving user-space access to the kernel's xenbus connection
3 * to xenstore.
4 *
5 * Copyright (c) 2005, Christian Limpach
6 * Copyright (c) 2005, Rusty Russell, IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 *
32 * Changes:
33 * 2008-10-07 Alex Zeffertt Replaced /proc/xen/xenbus with xenfs filesystem
34 * and /proc/xen compatibility mount point.
35 * Turned xenfs into a loadable module.
36 */
37
38#include <linux/kernel.h>
39#include <linux/errno.h>
40#include <linux/uio.h>
41#include <linux/notifier.h>
42#include <linux/wait.h>
43#include <linux/fs.h>
44#include <linux/poll.h>
45#include <linux/mutex.h>
46#include <linux/spinlock.h>
47#include <linux/mount.h>
48#include <linux/pagemap.h>
49#include <linux/uaccess.h>
50#include <linux/init.h>
51#include <linux/namei.h>
52#include <linux/string.h>
53
54#include "xenfs.h"
55#include "../xenbus/xenbus_comms.h"
56
57#include <xen/xenbus.h>
58#include <asm/xen/hypervisor.h>
59
60/*
61 * An element of a list of outstanding transactions, for which we're
62 * still waiting a reply.
63 */
64struct xenbus_transaction_holder {
65 struct list_head list;
66 struct xenbus_transaction handle;
67};
68
69/*
70 * A buffer of data on the queue.
71 */
72struct read_buffer {
73 struct list_head list;
74 unsigned int cons;
75 unsigned int len;
76 char msg[];
77};
78
79struct xenbus_file_priv {
80 /*
81 * msgbuffer_mutex is held while partial requests are built up
82 * and complete requests are acted on. It therefore protects
83 * the "transactions" and "watches" lists, and the partial
84 * request length and buffer.
85 *
86 * reply_mutex protects the reply being built up to return to
87 * usermode. It nests inside msgbuffer_mutex but may be held
88 * alone during a watch callback.
89 */
90 struct mutex msgbuffer_mutex;
91
92 /* In-progress transactions */
93 struct list_head transactions;
94
95 /* Active watches. */
96 struct list_head watches;
97
98 /* Partial request. */
99 unsigned int len;
100 union {
101 struct xsd_sockmsg msg;
102 char buffer[PAGE_SIZE];
103 } u;
104
105 /* Response queue. */
106 struct mutex reply_mutex;
107 struct list_head read_buffers;
108 wait_queue_head_t read_waitq;
109
110};
111
112/* Read out any raw xenbus messages queued up. */
113static ssize_t xenbus_file_read(struct file *filp,
114 char __user *ubuf,
115 size_t len, loff_t *ppos)
116{
117 struct xenbus_file_priv *u = filp->private_data;
118 struct read_buffer *rb;
119 unsigned i;
120 int ret;
121
122 mutex_lock(&u->reply_mutex);
123 while (list_empty(&u->read_buffers)) {
124 mutex_unlock(&u->reply_mutex);
125 ret = wait_event_interruptible(u->read_waitq,
126 !list_empty(&u->read_buffers));
127 if (ret)
128 return ret;
129 mutex_lock(&u->reply_mutex);
130 }
131
132 rb = list_entry(u->read_buffers.next, struct read_buffer, list);
133 i = 0;
134 while (i < len) {
135 unsigned sz = min((unsigned)len - i, rb->len - rb->cons);
136
137 ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
138
139 i += sz - ret;
140 rb->cons += sz - ret;
141
142 if (ret != sz) {
143 if (i == 0)
144 i = -EFAULT;
145 goto out;
146 }
147
148 /* Clear out buffer if it has been consumed */
149 if (rb->cons == rb->len) {
150 list_del(&rb->list);
151 kfree(rb);
152 if (list_empty(&u->read_buffers))
153 break;
154 rb = list_entry(u->read_buffers.next,
155 struct read_buffer, list);
156 }
157 }
158
159out:
160 mutex_unlock(&u->reply_mutex);
161 return i;
162}
163
164/*
165 * Add a buffer to the queue. Caller must hold the appropriate lock
166 * if the queue is not local. (Commonly the caller will build up
167 * multiple queued buffers on a temporary local list, and then add it
168 * to the appropriate list under lock once all the buffers have een
169 * successfully allocated.)
170 */
171static int queue_reply(struct list_head *queue, const void *data, size_t len)
172{
173 struct read_buffer *rb;
174
175 if (len == 0)
176 return 0;
177
178 rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
179 if (rb == NULL)
180 return -ENOMEM;
181
182 rb->cons = 0;
183 rb->len = len;
184
185 memcpy(rb->msg, data, len);
186
187 list_add_tail(&rb->list, queue);
188 return 0;
189}
190
191/*
192 * Free all the read_buffer s on a list.
193 * Caller must have sole reference to list.
194 */
195static void queue_cleanup(struct list_head *list)
196{
197 struct read_buffer *rb;
198
199 while (!list_empty(list)) {
200 rb = list_entry(list->next, struct read_buffer, list);
201 list_del(list->next);
202 kfree(rb);
203 }
204}
205
206struct watch_adapter {
207 struct list_head list;
208 struct xenbus_watch watch;
209 struct xenbus_file_priv *dev_data;
210 char *token;
211};
212
213static void free_watch_adapter(struct watch_adapter *watch)
214{
215 kfree(watch->watch.node);
216 kfree(watch->token);
217 kfree(watch);
218}
219
220static struct watch_adapter *alloc_watch_adapter(const char *path,
221 const char *token)
222{
223 struct watch_adapter *watch;
224
225 watch = kzalloc(sizeof(*watch), GFP_KERNEL);
226 if (watch == NULL)
227 goto out_fail;
228
229 watch->watch.node = kstrdup(path, GFP_KERNEL);
230 if (watch->watch.node == NULL)
231 goto out_free;
232
233 watch->token = kstrdup(token, GFP_KERNEL);
234 if (watch->token == NULL)
235 goto out_free;
236
237 return watch;
238
239out_free:
240 free_watch_adapter(watch);
241
242out_fail:
243 return NULL;
244}
245
246static void watch_fired(struct xenbus_watch *watch,
247 const char **vec,
248 unsigned int len)
249{
250 struct watch_adapter *adap;
251 struct xsd_sockmsg hdr;
252 const char *path, *token;
253 int path_len, tok_len, body_len, data_len = 0;
254 int ret;
255 LIST_HEAD(staging_q);
256
257 adap = container_of(watch, struct watch_adapter, watch);
258
259 path = vec[XS_WATCH_PATH];
260 token = adap->token;
261
262 path_len = strlen(path) + 1;
263 tok_len = strlen(token) + 1;
264 if (len > 2)
265 data_len = vec[len] - vec[2] + 1;
266 body_len = path_len + tok_len + data_len;
267
268 hdr.type = XS_WATCH_EVENT;
269 hdr.len = body_len;
270
271 mutex_lock(&adap->dev_data->reply_mutex);
272
273 ret = queue_reply(&staging_q, &hdr, sizeof(hdr));
274 if (!ret)
275 ret = queue_reply(&staging_q, path, path_len);
276 if (!ret)
277 ret = queue_reply(&staging_q, token, tok_len);
278 if (!ret && len > 2)
279 ret = queue_reply(&staging_q, vec[2], data_len);
280
281 if (!ret) {
282 /* success: pass reply list onto watcher */
283 list_splice_tail(&staging_q, &adap->dev_data->read_buffers);
284 wake_up(&adap->dev_data->read_waitq);
285 } else
286 queue_cleanup(&staging_q);
287
288 mutex_unlock(&adap->dev_data->reply_mutex);
289}
290
291static int xenbus_write_transaction(unsigned msg_type,
292 struct xenbus_file_priv *u)
293{
294 int rc, ret;
295 void *reply;
296 struct xenbus_transaction_holder *trans = NULL;
297 LIST_HEAD(staging_q);
298
299 if (msg_type == XS_TRANSACTION_START) {
300 trans = kmalloc(sizeof(*trans), GFP_KERNEL);
301 if (!trans) {
302 rc = -ENOMEM;
303 goto out;
304 }
305 }
306
307 reply = xenbus_dev_request_and_reply(&u->u.msg);
308 if (IS_ERR(reply)) {
309 kfree(trans);
310 rc = PTR_ERR(reply);
311 goto out;
312 }
313
314 if (msg_type == XS_TRANSACTION_START) {
315 trans->handle.id = simple_strtoul(reply, NULL, 0);
316
317 list_add(&trans->list, &u->transactions);
318 } else if (msg_type == XS_TRANSACTION_END) {
319 list_for_each_entry(trans, &u->transactions, list)
320 if (trans->handle.id == u->u.msg.tx_id)
321 break;
322 BUG_ON(&trans->list == &u->transactions);
323 list_del(&trans->list);
324
325 kfree(trans);
326 }
327
328 mutex_lock(&u->reply_mutex);
329 ret = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg));
330 if (!ret)
331 ret = queue_reply(&staging_q, reply, u->u.msg.len);
332 if (!ret) {
333 list_splice_tail(&staging_q, &u->read_buffers);
334 wake_up(&u->read_waitq);
335 } else {
336 queue_cleanup(&staging_q);
337 rc = ret;
338 }
339 mutex_unlock(&u->reply_mutex);
340
341 kfree(reply);
342
343out:
344 return rc;
345}
346
347static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
348{
349 struct watch_adapter *watch, *tmp_watch;
350 char *path, *token;
351 int err, rc;
352 LIST_HEAD(staging_q);
353
354 path = u->u.buffer + sizeof(u->u.msg);
355 token = memchr(path, 0, u->u.msg.len);
356 if (token == NULL) {
357 rc = -EILSEQ;
358 goto out;
359 }
360 token++;
361
362 if (msg_type == XS_WATCH) {
363 watch = alloc_watch_adapter(path, token);
364 if (watch == NULL) {
365 rc = -ENOMEM;
366 goto out;
367 }
368
369 watch->watch.callback = watch_fired;
370 watch->dev_data = u;
371
372 err = register_xenbus_watch(&watch->watch);
373 if (err) {
374 free_watch_adapter(watch);
375 rc = err;
376 goto out;
377 }
378 list_add(&watch->list, &u->watches);
379 } else {
380 list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
381 if (!strcmp(watch->token, token) &&
382 !strcmp(watch->watch.node, path)) {
383 unregister_xenbus_watch(&watch->watch);
384 list_del(&watch->list);
385 free_watch_adapter(watch);
386 break;
387 }
388 }
389 }
390
391 /* Success. Synthesize a reply to say all is OK. */
392 {
393 struct {
394 struct xsd_sockmsg hdr;
395 char body[3];
396 } __packed reply = {
397 {
398 .type = msg_type,
399 .len = sizeof(reply.body)
400 },
401 "OK"
402 };
403
404 mutex_lock(&u->reply_mutex);
405 rc = queue_reply(&u->read_buffers, &reply, sizeof(reply));
406 mutex_unlock(&u->reply_mutex);
407 }
408
409out:
410 return rc;
411}
412
413static ssize_t xenbus_file_write(struct file *filp,
414 const char __user *ubuf,
415 size_t len, loff_t *ppos)
416{
417 struct xenbus_file_priv *u = filp->private_data;
418 uint32_t msg_type;
419 int rc = len;
420 int ret;
421 LIST_HEAD(staging_q);
422
423 /*
424 * We're expecting usermode to be writing properly formed
425 * xenbus messages. If they write an incomplete message we
426 * buffer it up. Once it is complete, we act on it.
427 */
428
429 /*
430 * Make sure concurrent writers can't stomp all over each
431 * other's messages and make a mess of our partial message
432 * buffer. We don't make any attemppt to stop multiple
433 * writers from making a mess of each other's incomplete
434 * messages; we're just trying to guarantee our own internal
435 * consistency and make sure that single writes are handled
436 * atomically.
437 */
438 mutex_lock(&u->msgbuffer_mutex);
439
440 /* Get this out of the way early to avoid confusion */
441 if (len == 0)
442 goto out;
443
444 /* Can't write a xenbus message larger we can buffer */
445 if ((len + u->len) > sizeof(u->u.buffer)) {
446 /* On error, dump existing buffer */
447 u->len = 0;
448 rc = -EINVAL;
449 goto out;
450 }
451
452 ret = copy_from_user(u->u.buffer + u->len, ubuf, len);
453
454 if (ret == len) {
455 rc = -EFAULT;
456 goto out;
457 }
458
459 /* Deal with a partial copy. */
460 len -= ret;
461 rc = len;
462
463 u->len += len;
464
465 /* Return if we haven't got a full message yet */
466 if (u->len < sizeof(u->u.msg))
467 goto out; /* not even the header yet */
468
469 /* If we're expecting a message that's larger than we can
470 possibly send, dump what we have and return an error. */
471 if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) {
472 rc = -E2BIG;
473 u->len = 0;
474 goto out;
475 }
476
477 if (u->len < (sizeof(u->u.msg) + u->u.msg.len))
478 goto out; /* incomplete data portion */
479
480 /*
481 * OK, now we have a complete message. Do something with it.
482 */
483
484 msg_type = u->u.msg.type;
485
486 switch (msg_type) {
487 case XS_TRANSACTION_START:
488 case XS_TRANSACTION_END:
489 case XS_DIRECTORY:
490 case XS_READ:
491 case XS_GET_PERMS:
492 case XS_RELEASE:
493 case XS_GET_DOMAIN_PATH:
494 case XS_WRITE:
495 case XS_MKDIR:
496 case XS_RM:
497 case XS_SET_PERMS:
498 /* Send out a transaction */
499 ret = xenbus_write_transaction(msg_type, u);
500 break;
501
502 case XS_WATCH:
503 case XS_UNWATCH:
504 /* (Un)Ask for some path to be watched for changes */
505 ret = xenbus_write_watch(msg_type, u);
506 break;
507
508 default:
509 ret = -EINVAL;
510 break;
511 }
512 if (ret != 0)
513 rc = ret;
514
515 /* Buffered message consumed */
516 u->len = 0;
517
518 out:
519 mutex_unlock(&u->msgbuffer_mutex);
520 return rc;
521}
522
523static int xenbus_file_open(struct inode *inode, struct file *filp)
524{
525 struct xenbus_file_priv *u;
526
527 if (xen_store_evtchn == 0)
528 return -ENOENT;
529
530 nonseekable_open(inode, filp);
531
532 u = kzalloc(sizeof(*u), GFP_KERNEL);
533 if (u == NULL)
534 return -ENOMEM;
535
536 INIT_LIST_HEAD(&u->transactions);
537 INIT_LIST_HEAD(&u->watches);
538 INIT_LIST_HEAD(&u->read_buffers);
539 init_waitqueue_head(&u->read_waitq);
540
541 mutex_init(&u->reply_mutex);
542 mutex_init(&u->msgbuffer_mutex);
543
544 filp->private_data = u;
545
546 return 0;
547}
548
549static int xenbus_file_release(struct inode *inode, struct file *filp)
550{
551 struct xenbus_file_priv *u = filp->private_data;
552 struct xenbus_transaction_holder *trans, *tmp;
553 struct watch_adapter *watch, *tmp_watch;
554
555 /*
556 * No need for locking here because there are no other users,
557 * by definition.
558 */
559
560 list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
561 xenbus_transaction_end(trans->handle, 1);
562 list_del(&trans->list);
563 kfree(trans);
564 }
565
566 list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
567 unregister_xenbus_watch(&watch->watch);
568 list_del(&watch->list);
569 free_watch_adapter(watch);
570 }
571
572 kfree(u);
573
574 return 0;
575}
576
577static unsigned int xenbus_file_poll(struct file *file, poll_table *wait)
578{
579 struct xenbus_file_priv *u = file->private_data;
580
581 poll_wait(file, &u->read_waitq, wait);
582 if (!list_empty(&u->read_buffers))
583 return POLLIN | POLLRDNORM;
584 return 0;
585}
586
587const struct file_operations xenbus_file_ops = {
588 .read = xenbus_file_read,
589 .write = xenbus_file_write,
590 .open = xenbus_file_open,
591 .release = xenbus_file_release,
592 .poll = xenbus_file_poll,
593};
diff --git a/drivers/xen/xenfs/xenfs.h b/drivers/xen/xenfs/xenfs.h
new file mode 100644
index 000000000000..51f08b2d0bf1
--- /dev/null
+++ b/drivers/xen/xenfs/xenfs.h
@@ -0,0 +1,6 @@
1#ifndef _XENFS_XENBUS_H
2#define _XENFS_XENBUS_H
3
4extern const struct file_operations xenbus_file_ops;
5
6#endif /* _XENFS_XENBUS_H */
diff --git a/firmware/Makefile b/firmware/Makefile
index 55d3082ea13e..ea1d28f9b44c 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -37,6 +37,8 @@ fw-shipped-$(CONFIG_CHELSIO_T3) += cxgb3/t3b_psram-1.1.0.bin \
37 cxgb3/t3c_psram-1.1.0.bin \ 37 cxgb3/t3c_psram-1.1.0.bin \
38 cxgb3/t3fw-7.0.0.bin 38 cxgb3/t3fw-7.0.0.bin
39fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin 39fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin
40fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \
41 e100/d102e_ucode.bin
40fw-shipped-$(CONFIG_SMCTR) += tr_smctr.bin 42fw-shipped-$(CONFIG_SMCTR) += tr_smctr.bin
41fw-shipped-$(CONFIG_SND_KORG1212) += korg/k1212.dsp 43fw-shipped-$(CONFIG_SND_KORG1212) += korg/k1212.dsp
42fw-shipped-$(CONFIG_SND_MAESTRO3) += ess/maestro3_assp_kernel.fw \ 44fw-shipped-$(CONFIG_SND_MAESTRO3) += ess/maestro3_assp_kernel.fw \
diff --git a/firmware/WHENCE b/firmware/WHENCE
index 1bb2cf4b1735..8b5651347791 100644
--- a/firmware/WHENCE
+++ b/firmware/WHENCE
@@ -360,6 +360,18 @@ License: GPLv2 or OpenIB.org BSD license, no source visible
360 360
361-------------------------------------------------------------------------- 361--------------------------------------------------------------------------
362 362
363Driver: e100 -- Intel PRO/100 Ethernet NIC
364
365File: e100/d101m_ucode.bin
366File: e100/d101s_ucode.bin
367File: e100/d102e_ucode.bin
368
369Licence: Unknown
370
371Found in hex form in kernel source.
372
373--------------------------------------------------------------------------
374
363Driver: acenic -- Alteon AceNIC Gigabit Ethernet card 375Driver: acenic -- Alteon AceNIC Gigabit Ethernet card
364 376
365File: acenic/tg1.bin 377File: acenic/tg1.bin
diff --git a/firmware/e100/d101m_ucode.bin.ihex b/firmware/e100/d101m_ucode.bin.ihex
new file mode 100644
index 000000000000..12971ed458a6
--- /dev/null
+++ b/firmware/e100/d101m_ucode.bin.ihex
@@ -0,0 +1,38 @@
1:10000000150255003704FFFFFFFFFFFF8907A70612
2:10001000FFFFFFFFFFFF580501000C001213100047
3:1000200008000C00160238009C001000564020000A
4:10003000CC802300560038009C0010000B4C24009C
5:1000400000080000184812003804380000000000C2
6:1000500000001400550538000080300062061000D2
7:100060006105100008040E006148130002000C0036
8:10007000933010000080300024061000610510004D
9:1000800008040E00610810007E000C00212C2200E4
10:1000900002000C00933010007A0C380000000800B9
11:1000A000903010007A0C38000000000000000000C2
12:1000B00000000000000000009C0010002D4C2400F7
13:1000C000040001000010040037043A00104004004E
14:1000D0008A07380000000000990010007A6C2000A8
15:1000E0009C001000484C24002408130001000C0060
16:1000F00013121000750C260000100400040001000B
17:100100002608130006000C00A806220026C91300CA
18:1001100013131000A80638000000000000000000C3
19:1001200000000000000000000000000000000000CF
20:10013000000000000000000000060800101B100076
21:10014000040005002608100010121000340C3800BE
22:1001500000000000000000005B1521009900100065
23:10016000596520009C0010005945240036081300F2
24:1001700000000C00620C220001000C00131B100098
25:100180000E9C22000E0C21000E6C22000E6C210031
26:100190000EFC22000E5C21000E4C2100550538009B
27:1001A0000400010000100400678C27000008040010
28:1001B0000081010037043A002608130001000C00FA
29:1001C00059052200131310005905380000000000E3
30:1001D000000000000000000000000000000000001F
31:1001E00000000000000000000000000031081300C3
32:1001F0000B0910001348120080FF0C00AB0626000C
33:100200000010040004000100A806380000000000EF
34:0B02100000000000000000004E417ED6
35:00000001FF
36/********************************************************/
37/* Micro code for 8086:1229 Rev 8 */
38/********************************************************/
diff --git a/firmware/e100/d101s_ucode.bin.ihex b/firmware/e100/d101s_ucode.bin.ihex
new file mode 100644
index 000000000000..102c7feb666e
--- /dev/null
+++ b/firmware/e100/d101s_ucode.bin.ihex
@@ -0,0 +1,38 @@
1:10000000420255007E04FFFFFFFFFFFF1808FF06B6
2:10001000FFFFFFFFFFFFA60501000C0012131000F9
3:1000200008000C00430238009C00100056402000DD
4:10003000D0802300560038009C0010008B4F240015
5:1000400000080000184812007F043800000000007B
6:1000500000001400A30538000080300010061000D6
7:100060006105100008040E006148130002000C0036
8:10007000933010000080300024061000610510004D
9:1000800008040E00610810007E000C00A12F220061
10:1000900002000C0093301000900F380000000800A0
11:1000A00090301000900F38000000000000000000A9
12:1000B00000000000000000009C001000AD4F240074
13:1000C00004000100001004007E043A001040040007
14:1000D000190838000000000099001000FD6F200092
15:1000E0009A001000FDAF20009C001000C84F2400B3
16:1000F0002408130001000C0013121000F70F260053
17:1001000000100400040001002608130006000C0083
18:100110000007220026C9130013131000000738003F
19:1001200000000000000000000000000000000000CF
20:10013000000000000000000000060800101B100076
21:10014000040005002608100010121000B60F380039
22:100150000000000000000000A91521009900100017
23:10016000A76520009A001000A7A520009C001000A1
24:10017000A74524003608130000000C00E40F2200FD
25:1001800001000C00131B10008E9F22008E0F210017
26:100190008E6F22008E6F21008EFF22008E5F210065
27:1001A0008E4F2100A3053800040001000010040058
28:1001B000E98F270000080400008101007E043A0056
29:1001C0002608130001000C00A705220013131000DD
30:1001D000A70538000000000000000000000000003B
31:1001E000000000000000000000000000000000000F
32:1001F00000000000310813000B0910001348120022
33:1002000080FF0C000307260000100400040001001A
34:0B02100000073800000000004E438093
35:00000001FF
36/********************************************************/
37/* Micro code for 8086:1229 Rev 9 */
38/********************************************************/
diff --git a/firmware/e100/d102e_ucode.bin.ihex b/firmware/e100/d102e_ucode.bin.ihex
new file mode 100644
index 000000000000..9e806da854de
--- /dev/null
+++ b/firmware/e100/d102e_ucode.bin.ihex
@@ -0,0 +1,38 @@
1:100000008F027D00F904420E850CED14E914FA14F8
2:10001000360EF70EFF1FFF1FB914E00000000000AE
3:100020000000000000000000BD14E000000000001F
4:100030000000000000000000D514E00000000000F7
5:1000400000000000000000000000000000000000B0
6:100050000000000000000000C114E00000000000EB
7:100060000000000000000000000000000000000090
8:100070000000000000000000000000000000000080
9:100080000000000000000000000000000000000070
10:100090000000000000000000C814E00000000000A4
11:1000A000000000000000000000062000EE14E00048
12:1000B000000000000000000080FF3000460E9400A9
13:1000C0000082030000201000430EE000000000004A
14:1000D000000000000000000006003000FB14E000FB
15:1000E0000000000000000000000000000000000010
16:1000F0000000000000000000000000000000000000
17:1001000000000000000000000000000000000000EF
18:100110000000000000000000416E90003C0E8000D6
19:10012000390EE00000000000FD6E9000FD0E900012
20:10013000F80EE000000000000000000000000000D9
21:1001400000000000000000000000000000000000AF
22:10015000000000000000000000000000000000009F
23:10016000000000000000000000000000000000008F
24:10017000000000000000000000000000000000007F
25:10018000000000000000000000000000000000006F
26:10019000000000000000000000000000000000005F
27:1001A000000000000000000000000000000000004F
28:1001B000000000000000000000000000000000003F
29:1001C000000000000000000000000000000000002F
30:1001D000000000000000000000000000000000001F
31:1001E000000000000000000000000000000000000F
32:1001F00000000000000000000000000000000000FF
33:1002000000000000000000000000000000000000EE
34:0B02100000000000000000002A362E55
35:00000001FF
36/********************************************************/
37/* Micro code for the 8086:1229 Rev F/10 */
38/********************************************************/
diff --git a/fs/Kconfig b/fs/Kconfig
index 32883589ee54..02cff86af1b4 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -269,6 +269,25 @@ config OCFS2_FS_POSIX_ACL
269 Posix Access Control Lists (ACLs) support permissions for users and 269 Posix Access Control Lists (ACLs) support permissions for users and
270 groups beyond the owner/group/world scheme. 270 groups beyond the owner/group/world scheme.
271 271
272config BTRFS_FS
273 tristate "Btrfs filesystem (EXPERIMENTAL) Unstable disk format"
274 depends on EXPERIMENTAL
275 select LIBCRC32C
276 select ZLIB_INFLATE
277 select ZLIB_DEFLATE
278 help
279 Btrfs is a new filesystem with extents, writable snapshotting,
280 support for multiple devices and many more features.
281
282 Btrfs is highly experimental, and THE DISK FORMAT IS NOT YET
283 FINALIZED. You should say N here unless you are interested in
284 testing Btrfs with non-critical data.
285
286 To compile this file system support as a module, choose M here. The
287 module will be called btrfs.
288
289 If unsure, say N.
290
272endif # BLOCK 291endif # BLOCK
273 292
274source "fs/notify/Kconfig" 293source "fs/notify/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index c830611550d3..bc4e14df1082 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -119,4 +119,5 @@ obj-$(CONFIG_HOSTFS) += hostfs/
119obj-$(CONFIG_HPPFS) += hppfs/ 119obj-$(CONFIG_HPPFS) += hppfs/
120obj-$(CONFIG_DEBUG_FS) += debugfs/ 120obj-$(CONFIG_DEBUG_FS) += debugfs/
121obj-$(CONFIG_OCFS2_FS) += ocfs2/ 121obj-$(CONFIG_OCFS2_FS) += ocfs2/
122obj-$(CONFIG_BTRFS_FS) += btrfs/
122obj-$(CONFIG_GFS2_FS) += gfs2/ 123obj-$(CONFIG_GFS2_FS) += gfs2/
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index c41fa2af7677..e3ff2b9e602f 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -152,8 +152,10 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
152 elf_addr_t __user *sp; 152 elf_addr_t __user *sp;
153 elf_addr_t __user *u_platform; 153 elf_addr_t __user *u_platform;
154 elf_addr_t __user *u_base_platform; 154 elf_addr_t __user *u_base_platform;
155 elf_addr_t __user *u_rand_bytes;
155 const char *k_platform = ELF_PLATFORM; 156 const char *k_platform = ELF_PLATFORM;
156 const char *k_base_platform = ELF_BASE_PLATFORM; 157 const char *k_base_platform = ELF_BASE_PLATFORM;
158 unsigned char k_rand_bytes[16];
157 int items; 159 int items;
158 elf_addr_t *elf_info; 160 elf_addr_t *elf_info;
159 int ei_index = 0; 161 int ei_index = 0;
@@ -196,6 +198,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
196 return -EFAULT; 198 return -EFAULT;
197 } 199 }
198 200
201 /*
202 * Generate 16 random bytes for userspace PRNG seeding.
203 */
204 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
205 u_rand_bytes = (elf_addr_t __user *)
206 STACK_ALLOC(p, sizeof(k_rand_bytes));
207 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
208 return -EFAULT;
209
199 /* Create the ELF interpreter info */ 210 /* Create the ELF interpreter info */
200 elf_info = (elf_addr_t *)current->mm->saved_auxv; 211 elf_info = (elf_addr_t *)current->mm->saved_auxv;
201 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */ 212 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
@@ -228,6 +239,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
228 NEW_AUX_ENT(AT_GID, cred->gid); 239 NEW_AUX_ENT(AT_GID, cred->gid);
229 NEW_AUX_ENT(AT_EGID, cred->egid); 240 NEW_AUX_ENT(AT_EGID, cred->egid);
230 NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm)); 241 NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
242 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
231 NEW_AUX_ENT(AT_EXECFN, bprm->exec); 243 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
232 if (k_platform) { 244 if (k_platform) {
233 NEW_AUX_ENT(AT_PLATFORM, 245 NEW_AUX_ENT(AT_PLATFORM,
diff --git a/fs/bio.c b/fs/bio.c
index 711cee103602..062299acbccd 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -788,6 +788,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
788 int i, ret; 788 int i, ret;
789 int nr_pages = 0; 789 int nr_pages = 0;
790 unsigned int len = 0; 790 unsigned int len = 0;
791 unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0;
791 792
792 for (i = 0; i < iov_count; i++) { 793 for (i = 0; i < iov_count; i++) {
793 unsigned long uaddr; 794 unsigned long uaddr;
@@ -814,35 +815,42 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
814 bio->bi_rw |= (!write_to_vm << BIO_RW); 815 bio->bi_rw |= (!write_to_vm << BIO_RW);
815 816
816 ret = 0; 817 ret = 0;
817 i = 0; 818
819 if (map_data) {
820 nr_pages = 1 << map_data->page_order;
821 i = map_data->offset / PAGE_SIZE;
822 }
818 while (len) { 823 while (len) {
819 unsigned int bytes; 824 unsigned int bytes = PAGE_SIZE;
820 825
821 if (map_data) 826 bytes -= offset;
822 bytes = 1U << (PAGE_SHIFT + map_data->page_order);
823 else
824 bytes = PAGE_SIZE;
825 827
826 if (bytes > len) 828 if (bytes > len)
827 bytes = len; 829 bytes = len;
828 830
829 if (map_data) { 831 if (map_data) {
830 if (i == map_data->nr_entries) { 832 if (i == map_data->nr_entries * nr_pages) {
831 ret = -ENOMEM; 833 ret = -ENOMEM;
832 break; 834 break;
833 } 835 }
834 page = map_data->pages[i++]; 836
835 } else 837 page = map_data->pages[i / nr_pages];
838 page += (i % nr_pages);
839
840 i++;
841 } else {
836 page = alloc_page(q->bounce_gfp | gfp_mask); 842 page = alloc_page(q->bounce_gfp | gfp_mask);
837 if (!page) { 843 if (!page) {
838 ret = -ENOMEM; 844 ret = -ENOMEM;
839 break; 845 break;
846 }
840 } 847 }
841 848
842 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) 849 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
843 break; 850 break;
844 851
845 len -= bytes; 852 len -= bytes;
853 offset = 0;
846 } 854 }
847 855
848 if (ret) 856 if (ret)
@@ -851,7 +859,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
851 /* 859 /*
852 * success 860 * success
853 */ 861 */
854 if (!write_to_vm) { 862 if (!write_to_vm && (!map_data || !map_data->null_mapped)) {
855 ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 0); 863 ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 0);
856 if (ret) 864 if (ret)
857 goto cleanup; 865 goto cleanup;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index b957717e25ab..ac7031f12ea5 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1005,6 +1005,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1005 } 1005 }
1006 1006
1007 lock_kernel(); 1007 lock_kernel();
1008 restart:
1008 1009
1009 ret = -ENXIO; 1010 ret = -ENXIO;
1010 disk = get_gendisk(bdev->bd_dev, &partno); 1011 disk = get_gendisk(bdev->bd_dev, &partno);
@@ -1025,6 +1026,19 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1025 1026
1026 if (disk->fops->open) { 1027 if (disk->fops->open) {
1027 ret = disk->fops->open(bdev, mode); 1028 ret = disk->fops->open(bdev, mode);
1029 if (ret == -ERESTARTSYS) {
1030 /* Lost a race with 'disk' being
1031 * deleted, try again.
1032 * See md.c
1033 */
1034 disk_put_part(bdev->bd_part);
1035 bdev->bd_part = NULL;
1036 module_put(disk->fops->owner);
1037 put_disk(disk);
1038 bdev->bd_disk = NULL;
1039 mutex_unlock(&bdev->bd_mutex);
1040 goto restart;
1041 }
1028 if (ret) 1042 if (ret)
1029 goto out_clear; 1043 goto out_clear;
1030 } 1044 }
@@ -1220,6 +1234,20 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1220 return blkdev_ioctl(bdev, mode, cmd, arg); 1234 return blkdev_ioctl(bdev, mode, cmd, arg);
1221} 1235}
1222 1236
1237/*
1238 * Try to release a page associated with block device when the system
1239 * is under memory pressure.
1240 */
1241static int blkdev_releasepage(struct page *page, gfp_t wait)
1242{
1243 struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super;
1244
1245 if (super && super->s_op->bdev_try_to_free_page)
1246 return super->s_op->bdev_try_to_free_page(super, page, wait);
1247
1248 return try_to_free_buffers(page);
1249}
1250
1223static const struct address_space_operations def_blk_aops = { 1251static const struct address_space_operations def_blk_aops = {
1224 .readpage = blkdev_readpage, 1252 .readpage = blkdev_readpage,
1225 .writepage = blkdev_writepage, 1253 .writepage = blkdev_writepage,
@@ -1227,6 +1255,7 @@ static const struct address_space_operations def_blk_aops = {
1227 .write_begin = blkdev_write_begin, 1255 .write_begin = blkdev_write_begin,
1228 .write_end = blkdev_write_end, 1256 .write_end = blkdev_write_end,
1229 .writepages = generic_writepages, 1257 .writepages = generic_writepages,
1258 .releasepage = blkdev_releasepage,
1230 .direct_IO = blkdev_direct_IO, 1259 .direct_IO = blkdev_direct_IO,
1231}; 1260};
1232 1261
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
new file mode 100644
index 000000000000..d2cf5a54a4b8
--- /dev/null
+++ b/fs/btrfs/Makefile
@@ -0,0 +1,25 @@
1ifneq ($(KERNELRELEASE),)
2# kbuild part of makefile
3
4obj-$(CONFIG_BTRFS_FS) := btrfs.o
5btrfs-y := super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
6 file-item.o inode-item.o inode-map.o disk-io.o \
7 transaction.o inode.o file.o tree-defrag.o \
8 extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
9 extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
10 ref-cache.o export.o tree-log.o acl.o free-space-cache.o zlib.o \
11 compression.o
12else
13
14# Normal Makefile
15
16KERNELDIR := /lib/modules/`uname -r`/build
17all:
18 $(MAKE) -C $(KERNELDIR) M=`pwd` CONFIG_BTRFS_FS=m modules
19
20modules_install:
21 $(MAKE) -C $(KERNELDIR) M=`pwd` modules_install
22clean:
23 $(MAKE) -C $(KERNELDIR) M=`pwd` clean
24
25endif
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
new file mode 100644
index 000000000000..1d53b62dbba5
--- /dev/null
+++ b/fs/btrfs/acl.c
@@ -0,0 +1,351 @@
1/*
2 * Copyright (C) 2007 Red Hat. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/fs.h>
20#include <linux/string.h>
21#include <linux/xattr.h>
22#include <linux/posix_acl_xattr.h>
23#include <linux/posix_acl.h>
24#include <linux/sched.h>
25
26#include "ctree.h"
27#include "btrfs_inode.h"
28#include "xattr.h"
29
30#ifdef CONFIG_FS_POSIX_ACL
31
32static void btrfs_update_cached_acl(struct inode *inode,
33 struct posix_acl **p_acl,
34 struct posix_acl *acl)
35{
36 spin_lock(&inode->i_lock);
37 if (*p_acl && *p_acl != BTRFS_ACL_NOT_CACHED)
38 posix_acl_release(*p_acl);
39 *p_acl = posix_acl_dup(acl);
40 spin_unlock(&inode->i_lock);
41}
42
43static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
44{
45 int size;
46 const char *name;
47 char *value = NULL;
48 struct posix_acl *acl = NULL, **p_acl;
49
50 switch (type) {
51 case ACL_TYPE_ACCESS:
52 name = POSIX_ACL_XATTR_ACCESS;
53 p_acl = &BTRFS_I(inode)->i_acl;
54 break;
55 case ACL_TYPE_DEFAULT:
56 name = POSIX_ACL_XATTR_DEFAULT;
57 p_acl = &BTRFS_I(inode)->i_default_acl;
58 break;
59 default:
60 return ERR_PTR(-EINVAL);
61 }
62
63 spin_lock(&inode->i_lock);
64 if (*p_acl != BTRFS_ACL_NOT_CACHED)
65 acl = posix_acl_dup(*p_acl);
66 spin_unlock(&inode->i_lock);
67
68 if (acl)
69 return acl;
70
71
72 size = __btrfs_getxattr(inode, name, "", 0);
73 if (size > 0) {
74 value = kzalloc(size, GFP_NOFS);
75 if (!value)
76 return ERR_PTR(-ENOMEM);
77 size = __btrfs_getxattr(inode, name, value, size);
78 if (size > 0) {
79 acl = posix_acl_from_xattr(value, size);
80 btrfs_update_cached_acl(inode, p_acl, acl);
81 }
82 kfree(value);
83 } else if (size == -ENOENT) {
84 acl = NULL;
85 btrfs_update_cached_acl(inode, p_acl, acl);
86 }
87
88 return acl;
89}
90
91static int btrfs_xattr_get_acl(struct inode *inode, int type,
92 void *value, size_t size)
93{
94 struct posix_acl *acl;
95 int ret = 0;
96
97 acl = btrfs_get_acl(inode, type);
98
99 if (IS_ERR(acl))
100 return PTR_ERR(acl);
101 if (acl == NULL)
102 return -ENODATA;
103 ret = posix_acl_to_xattr(acl, value, size);
104 posix_acl_release(acl);
105
106 return ret;
107}
108
109/*
110 * Needs to be called with fs_mutex held
111 */
112static int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
113{
114 int ret, size = 0;
115 const char *name;
116 struct posix_acl **p_acl;
117 char *value = NULL;
118 mode_t mode;
119
120 if (acl) {
121 ret = posix_acl_valid(acl);
122 if (ret < 0)
123 return ret;
124 ret = 0;
125 }
126
127 switch (type) {
128 case ACL_TYPE_ACCESS:
129 mode = inode->i_mode;
130 ret = posix_acl_equiv_mode(acl, &mode);
131 if (ret < 0)
132 return ret;
133 ret = 0;
134 inode->i_mode = mode;
135 name = POSIX_ACL_XATTR_ACCESS;
136 p_acl = &BTRFS_I(inode)->i_acl;
137 break;
138 case ACL_TYPE_DEFAULT:
139 if (!S_ISDIR(inode->i_mode))
140 return acl ? -EINVAL : 0;
141 name = POSIX_ACL_XATTR_DEFAULT;
142 p_acl = &BTRFS_I(inode)->i_default_acl;
143 break;
144 default:
145 return -EINVAL;
146 }
147
148 if (acl) {
149 size = posix_acl_xattr_size(acl->a_count);
150 value = kmalloc(size, GFP_NOFS);
151 if (!value) {
152 ret = -ENOMEM;
153 goto out;
154 }
155
156 ret = posix_acl_to_xattr(acl, value, size);
157 if (ret < 0)
158 goto out;
159 }
160
161 ret = __btrfs_setxattr(inode, name, value, size, 0);
162
163out:
164 kfree(value);
165
166 if (!ret)
167 btrfs_update_cached_acl(inode, p_acl, acl);
168
169 return ret;
170}
171
172static int btrfs_xattr_set_acl(struct inode *inode, int type,
173 const void *value, size_t size)
174{
175 int ret = 0;
176 struct posix_acl *acl = NULL;
177
178 if (value) {
179 acl = posix_acl_from_xattr(value, size);
180 if (acl == NULL) {
181 value = NULL;
182 size = 0;
183 } else if (IS_ERR(acl)) {
184 return PTR_ERR(acl);
185 }
186 }
187
188 ret = btrfs_set_acl(inode, acl, type);
189
190 posix_acl_release(acl);
191
192 return ret;
193}
194
195
196static int btrfs_xattr_acl_access_get(struct inode *inode, const char *name,
197 void *value, size_t size)
198{
199 return btrfs_xattr_get_acl(inode, ACL_TYPE_ACCESS, value, size);
200}
201
202static int btrfs_xattr_acl_access_set(struct inode *inode, const char *name,
203 const void *value, size_t size, int flags)
204{
205 return btrfs_xattr_set_acl(inode, ACL_TYPE_ACCESS, value, size);
206}
207
208static int btrfs_xattr_acl_default_get(struct inode *inode, const char *name,
209 void *value, size_t size)
210{
211 return btrfs_xattr_get_acl(inode, ACL_TYPE_DEFAULT, value, size);
212}
213
214static int btrfs_xattr_acl_default_set(struct inode *inode, const char *name,
215 const void *value, size_t size, int flags)
216{
217 return btrfs_xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size);
218}
219
220int btrfs_check_acl(struct inode *inode, int mask)
221{
222 struct posix_acl *acl;
223 int error = -EAGAIN;
224
225 acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS);
226
227 if (IS_ERR(acl))
228 return PTR_ERR(acl);
229 if (acl) {
230 error = posix_acl_permission(inode, acl, mask);
231 posix_acl_release(acl);
232 }
233
234 return error;
235}
236
237/*
238 * btrfs_init_acl is already generally called under fs_mutex, so the locking
239 * stuff has been fixed to work with that. If the locking stuff changes, we
240 * need to re-evaluate the acl locking stuff.
241 */
242int btrfs_init_acl(struct inode *inode, struct inode *dir)
243{
244 struct posix_acl *acl = NULL;
245 int ret = 0;
246
247 /* this happens with subvols */
248 if (!dir)
249 return 0;
250
251 if (!S_ISLNK(inode->i_mode)) {
252 if (IS_POSIXACL(dir)) {
253 acl = btrfs_get_acl(dir, ACL_TYPE_DEFAULT);
254 if (IS_ERR(acl))
255 return PTR_ERR(acl);
256 }
257
258 if (!acl)
259 inode->i_mode &= ~current->fs->umask;
260 }
261
262 if (IS_POSIXACL(dir) && acl) {
263 struct posix_acl *clone;
264 mode_t mode;
265
266 if (S_ISDIR(inode->i_mode)) {
267 ret = btrfs_set_acl(inode, acl, ACL_TYPE_DEFAULT);
268 if (ret)
269 goto failed;
270 }
271 clone = posix_acl_clone(acl, GFP_NOFS);
272 ret = -ENOMEM;
273 if (!clone)
274 goto failed;
275
276 mode = inode->i_mode;
277 ret = posix_acl_create_masq(clone, &mode);
278 if (ret >= 0) {
279 inode->i_mode = mode;
280 if (ret > 0) {
281 /* we need an acl */
282 ret = btrfs_set_acl(inode, clone,
283 ACL_TYPE_ACCESS);
284 }
285 }
286 }
287failed:
288 posix_acl_release(acl);
289
290 return ret;
291}
292
293int btrfs_acl_chmod(struct inode *inode)
294{
295 struct posix_acl *acl, *clone;
296 int ret = 0;
297
298 if (S_ISLNK(inode->i_mode))
299 return -EOPNOTSUPP;
300
301 if (!IS_POSIXACL(inode))
302 return 0;
303
304 acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS);
305 if (IS_ERR(acl) || !acl)
306 return PTR_ERR(acl);
307
308 clone = posix_acl_clone(acl, GFP_KERNEL);
309 posix_acl_release(acl);
310 if (!clone)
311 return -ENOMEM;
312
313 ret = posix_acl_chmod_masq(clone, inode->i_mode);
314 if (!ret)
315 ret = btrfs_set_acl(inode, clone, ACL_TYPE_ACCESS);
316
317 posix_acl_release(clone);
318
319 return ret;
320}
321
322struct xattr_handler btrfs_xattr_acl_default_handler = {
323 .prefix = POSIX_ACL_XATTR_DEFAULT,
324 .get = btrfs_xattr_acl_default_get,
325 .set = btrfs_xattr_acl_default_set,
326};
327
328struct xattr_handler btrfs_xattr_acl_access_handler = {
329 .prefix = POSIX_ACL_XATTR_ACCESS,
330 .get = btrfs_xattr_acl_access_get,
331 .set = btrfs_xattr_acl_access_set,
332};
333
334#else /* CONFIG_FS_POSIX_ACL */
335
336int btrfs_acl_chmod(struct inode *inode)
337{
338 return 0;
339}
340
341int btrfs_init_acl(struct inode *inode, struct inode *dir)
342{
343 return 0;
344}
345
346int btrfs_check_acl(struct inode *inode, int mask)
347{
348 return 0;
349}
350
351#endif /* CONFIG_FS_POSIX_ACL */
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
new file mode 100644
index 000000000000..8e2fec05dbe0
--- /dev/null
+++ b/fs/btrfs/async-thread.c
@@ -0,0 +1,419 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/version.h>
20#include <linux/kthread.h>
21#include <linux/list.h>
22#include <linux/spinlock.h>
23# include <linux/freezer.h>
24#include "async-thread.h"
25
26#define WORK_QUEUED_BIT 0
27#define WORK_DONE_BIT 1
28#define WORK_ORDER_DONE_BIT 2
29
30/*
31 * container for the kthread task pointer and the list of pending work
32 * One of these is allocated per thread.
33 */
34struct btrfs_worker_thread {
35 /* pool we belong to */
36 struct btrfs_workers *workers;
37
38 /* list of struct btrfs_work that are waiting for service */
39 struct list_head pending;
40
41 /* list of worker threads from struct btrfs_workers */
42 struct list_head worker_list;
43
44 /* kthread */
45 struct task_struct *task;
46
47 /* number of things on the pending list */
48 atomic_t num_pending;
49
50 unsigned long sequence;
51
52 /* protects the pending list. */
53 spinlock_t lock;
54
55 /* set to non-zero when this thread is already awake and kicking */
56 int working;
57
58 /* are we currently idle */
59 int idle;
60};
61
62/*
63 * helper function to move a thread onto the idle list after it
64 * has finished some requests.
65 */
66static void check_idle_worker(struct btrfs_worker_thread *worker)
67{
68 if (!worker->idle && atomic_read(&worker->num_pending) <
69 worker->workers->idle_thresh / 2) {
70 unsigned long flags;
71 spin_lock_irqsave(&worker->workers->lock, flags);
72 worker->idle = 1;
73 list_move(&worker->worker_list, &worker->workers->idle_list);
74 spin_unlock_irqrestore(&worker->workers->lock, flags);
75 }
76}
77
78/*
79 * helper function to move a thread off the idle list after new
80 * pending work is added.
81 */
82static void check_busy_worker(struct btrfs_worker_thread *worker)
83{
84 if (worker->idle && atomic_read(&worker->num_pending) >=
85 worker->workers->idle_thresh) {
86 unsigned long flags;
87 spin_lock_irqsave(&worker->workers->lock, flags);
88 worker->idle = 0;
89 list_move_tail(&worker->worker_list,
90 &worker->workers->worker_list);
91 spin_unlock_irqrestore(&worker->workers->lock, flags);
92 }
93}
94
95static noinline int run_ordered_completions(struct btrfs_workers *workers,
96 struct btrfs_work *work)
97{
98 unsigned long flags;
99
100 if (!workers->ordered)
101 return 0;
102
103 set_bit(WORK_DONE_BIT, &work->flags);
104
105 spin_lock_irqsave(&workers->lock, flags);
106
107 while (!list_empty(&workers->order_list)) {
108 work = list_entry(workers->order_list.next,
109 struct btrfs_work, order_list);
110
111 if (!test_bit(WORK_DONE_BIT, &work->flags))
112 break;
113
114 /* we are going to call the ordered done function, but
115 * we leave the work item on the list as a barrier so
116 * that later work items that are done don't have their
117 * functions called before this one returns
118 */
119 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
120 break;
121
122 spin_unlock_irqrestore(&workers->lock, flags);
123
124 work->ordered_func(work);
125
126 /* now take the lock again and call the freeing code */
127 spin_lock_irqsave(&workers->lock, flags);
128 list_del(&work->order_list);
129 work->ordered_free(work);
130 }
131
132 spin_unlock_irqrestore(&workers->lock, flags);
133 return 0;
134}
135
136/*
137 * main loop for servicing work items
138 */
139static int worker_loop(void *arg)
140{
141 struct btrfs_worker_thread *worker = arg;
142 struct list_head *cur;
143 struct btrfs_work *work;
144 do {
145 spin_lock_irq(&worker->lock);
146 while (!list_empty(&worker->pending)) {
147 cur = worker->pending.next;
148 work = list_entry(cur, struct btrfs_work, list);
149 list_del(&work->list);
150 clear_bit(WORK_QUEUED_BIT, &work->flags);
151
152 work->worker = worker;
153 spin_unlock_irq(&worker->lock);
154
155 work->func(work);
156
157 atomic_dec(&worker->num_pending);
158 /*
159 * unless this is an ordered work queue,
160 * 'work' was probably freed by func above.
161 */
162 run_ordered_completions(worker->workers, work);
163
164 spin_lock_irq(&worker->lock);
165 check_idle_worker(worker);
166
167 }
168 worker->working = 0;
169 if (freezing(current)) {
170 refrigerator();
171 } else {
172 set_current_state(TASK_INTERRUPTIBLE);
173 spin_unlock_irq(&worker->lock);
174 if (!kthread_should_stop())
175 schedule();
176 __set_current_state(TASK_RUNNING);
177 }
178 } while (!kthread_should_stop());
179 return 0;
180}
181
182/*
183 * this will wait for all the worker threads to shutdown
184 */
185int btrfs_stop_workers(struct btrfs_workers *workers)
186{
187 struct list_head *cur;
188 struct btrfs_worker_thread *worker;
189
190 list_splice_init(&workers->idle_list, &workers->worker_list);
191 while (!list_empty(&workers->worker_list)) {
192 cur = workers->worker_list.next;
193 worker = list_entry(cur, struct btrfs_worker_thread,
194 worker_list);
195 kthread_stop(worker->task);
196 list_del(&worker->worker_list);
197 kfree(worker);
198 }
199 return 0;
200}
201
202/*
203 * simple init on struct btrfs_workers
204 */
205void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
206{
207 workers->num_workers = 0;
208 INIT_LIST_HEAD(&workers->worker_list);
209 INIT_LIST_HEAD(&workers->idle_list);
210 INIT_LIST_HEAD(&workers->order_list);
211 spin_lock_init(&workers->lock);
212 workers->max_workers = max;
213 workers->idle_thresh = 32;
214 workers->name = name;
215 workers->ordered = 0;
216}
217
218/*
219 * starts new worker threads. This does not enforce the max worker
220 * count in case you need to temporarily go past it.
221 */
222int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
223{
224 struct btrfs_worker_thread *worker;
225 int ret = 0;
226 int i;
227
228 for (i = 0; i < num_workers; i++) {
229 worker = kzalloc(sizeof(*worker), GFP_NOFS);
230 if (!worker) {
231 ret = -ENOMEM;
232 goto fail;
233 }
234
235 INIT_LIST_HEAD(&worker->pending);
236 INIT_LIST_HEAD(&worker->worker_list);
237 spin_lock_init(&worker->lock);
238 atomic_set(&worker->num_pending, 0);
239 worker->task = kthread_run(worker_loop, worker,
240 "btrfs-%s-%d", workers->name,
241 workers->num_workers + i);
242 worker->workers = workers;
243 if (IS_ERR(worker->task)) {
244 kfree(worker);
245 ret = PTR_ERR(worker->task);
246 goto fail;
247 }
248
249 spin_lock_irq(&workers->lock);
250 list_add_tail(&worker->worker_list, &workers->idle_list);
251 worker->idle = 1;
252 workers->num_workers++;
253 spin_unlock_irq(&workers->lock);
254 }
255 return 0;
256fail:
257 btrfs_stop_workers(workers);
258 return ret;
259}
260
261/*
262 * run through the list and find a worker thread that doesn't have a lot
263 * to do right now. This can return null if we aren't yet at the thread
264 * count limit and all of the threads are busy.
265 */
266static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
267{
268 struct btrfs_worker_thread *worker;
269 struct list_head *next;
270 int enforce_min = workers->num_workers < workers->max_workers;
271
272 /*
273 * if we find an idle thread, don't move it to the end of the
274 * idle list. This improves the chance that the next submission
275 * will reuse the same thread, and maybe catch it while it is still
276 * working
277 */
278 if (!list_empty(&workers->idle_list)) {
279 next = workers->idle_list.next;
280 worker = list_entry(next, struct btrfs_worker_thread,
281 worker_list);
282 return worker;
283 }
284 if (enforce_min || list_empty(&workers->worker_list))
285 return NULL;
286
287 /*
288 * if we pick a busy task, move the task to the end of the list.
289 * hopefully this will keep things somewhat evenly balanced.
290 * Do the move in batches based on the sequence number. This groups
291 * requests submitted at roughly the same time onto the same worker.
292 */
293 next = workers->worker_list.next;
294 worker = list_entry(next, struct btrfs_worker_thread, worker_list);
295 atomic_inc(&worker->num_pending);
296 worker->sequence++;
297
298 if (worker->sequence % workers->idle_thresh == 0)
299 list_move_tail(next, &workers->worker_list);
300 return worker;
301}
302
303/*
304 * selects a worker thread to take the next job. This will either find
305 * an idle worker, start a new worker up to the max count, or just return
306 * one of the existing busy workers.
307 */
308static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
309{
310 struct btrfs_worker_thread *worker;
311 unsigned long flags;
312
313again:
314 spin_lock_irqsave(&workers->lock, flags);
315 worker = next_worker(workers);
316 spin_unlock_irqrestore(&workers->lock, flags);
317
318 if (!worker) {
319 spin_lock_irqsave(&workers->lock, flags);
320 if (workers->num_workers >= workers->max_workers) {
321 struct list_head *fallback = NULL;
322 /*
323 * we have failed to find any workers, just
324 * return the force one
325 */
326 if (!list_empty(&workers->worker_list))
327 fallback = workers->worker_list.next;
328 if (!list_empty(&workers->idle_list))
329 fallback = workers->idle_list.next;
330 BUG_ON(!fallback);
331 worker = list_entry(fallback,
332 struct btrfs_worker_thread, worker_list);
333 spin_unlock_irqrestore(&workers->lock, flags);
334 } else {
335 spin_unlock_irqrestore(&workers->lock, flags);
336 /* we're below the limit, start another worker */
337 btrfs_start_workers(workers, 1);
338 goto again;
339 }
340 }
341 return worker;
342}
343
344/*
345 * btrfs_requeue_work just puts the work item back on the tail of the list
346 * it was taken from. It is intended for use with long running work functions
347 * that make some progress and want to give the cpu up for others.
348 */
349int btrfs_requeue_work(struct btrfs_work *work)
350{
351 struct btrfs_worker_thread *worker = work->worker;
352 unsigned long flags;
353
354 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
355 goto out;
356
357 spin_lock_irqsave(&worker->lock, flags);
358 atomic_inc(&worker->num_pending);
359 list_add_tail(&work->list, &worker->pending);
360
361 /* by definition we're busy, take ourselves off the idle
362 * list
363 */
364 if (worker->idle) {
365 spin_lock_irqsave(&worker->workers->lock, flags);
366 worker->idle = 0;
367 list_move_tail(&worker->worker_list,
368 &worker->workers->worker_list);
369 spin_unlock_irqrestore(&worker->workers->lock, flags);
370 }
371
372 spin_unlock_irqrestore(&worker->lock, flags);
373
374out:
375 return 0;
376}
377
378/*
379 * places a struct btrfs_work into the pending queue of one of the kthreads
380 */
381int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
382{
383 struct btrfs_worker_thread *worker;
384 unsigned long flags;
385 int wake = 0;
386
387 /* don't requeue something already on a list */
388 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
389 goto out;
390
391 worker = find_worker(workers);
392 if (workers->ordered) {
393 spin_lock_irqsave(&workers->lock, flags);
394 list_add_tail(&work->order_list, &workers->order_list);
395 spin_unlock_irqrestore(&workers->lock, flags);
396 } else {
397 INIT_LIST_HEAD(&work->order_list);
398 }
399
400 spin_lock_irqsave(&worker->lock, flags);
401 atomic_inc(&worker->num_pending);
402 check_busy_worker(worker);
403 list_add_tail(&work->list, &worker->pending);
404
405 /*
406 * avoid calling into wake_up_process if this thread has already
407 * been kicked
408 */
409 if (!worker->working)
410 wake = 1;
411 worker->working = 1;
412
413 spin_unlock_irqrestore(&worker->lock, flags);
414
415 if (wake)
416 wake_up_process(worker->task);
417out:
418 return 0;
419}
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
new file mode 100644
index 000000000000..31be4ed8b63e
--- /dev/null
+++ b/fs/btrfs/async-thread.h
@@ -0,0 +1,101 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#ifndef __BTRFS_ASYNC_THREAD_
20#define __BTRFS_ASYNC_THREAD_
21
22struct btrfs_worker_thread;
23
24/*
25 * This is similar to a workqueue, but it is meant to spread the operations
26 * across all available cpus instead of just the CPU that was used to
27 * queue the work. There is also some batching introduced to try and
28 * cut down on context switches.
29 *
30 * By default threads are added on demand up to 2 * the number of cpus.
31 * Changing struct btrfs_workers->max_workers is one way to prevent
32 * demand creation of kthreads.
33 *
34 * the basic model of these worker threads is to embed a btrfs_work
35 * structure in your own data struct, and use container_of in a
36 * work function to get back to your data struct.
37 */
38struct btrfs_work {
39 /*
40 * func should be set to the function you want called
41 * your work struct is passed as the only arg
42 *
43 * ordered_func must be set for work sent to an ordered work queue,
44 * and it is called to complete a given work item in the same
45 * order they were sent to the queue.
46 */
47 void (*func)(struct btrfs_work *work);
48 void (*ordered_func)(struct btrfs_work *work);
49 void (*ordered_free)(struct btrfs_work *work);
50
51 /*
52 * flags should be set to zero. It is used to make sure the
53 * struct is only inserted once into the list.
54 */
55 unsigned long flags;
56
57 /* don't touch these */
58 struct btrfs_worker_thread *worker;
59 struct list_head list;
60 struct list_head order_list;
61};
62
63struct btrfs_workers {
64 /* current number of running workers */
65 int num_workers;
66
67 /* max number of workers allowed. changed by btrfs_start_workers */
68 int max_workers;
69
70 /* once a worker has this many requests or fewer, it is idle */
71 int idle_thresh;
72
73 /* force completions in the order they were queued */
74 int ordered;
75
76 /* list with all the work threads. The workers on the idle thread
77 * may be actively servicing jobs, but they haven't yet hit the
78 * idle thresh limit above.
79 */
80 struct list_head worker_list;
81 struct list_head idle_list;
82
83 /*
84 * when operating in ordered mode, this maintains the list
85 * of work items waiting for completion
86 */
87 struct list_head order_list;
88
89 /* lock for finding the next worker thread to queue on */
90 spinlock_t lock;
91
92 /* extra name for this worker, used for current->name */
93 char *name;
94};
95
96int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
97int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
98int btrfs_stop_workers(struct btrfs_workers *workers);
99void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max);
100int btrfs_requeue_work(struct btrfs_work *work);
101#endif
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
new file mode 100644
index 000000000000..a8c9693b75ac
--- /dev/null
+++ b/fs/btrfs/btrfs_inode.h
@@ -0,0 +1,131 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#ifndef __BTRFS_I__
20#define __BTRFS_I__
21
22#include "extent_map.h"
23#include "extent_io.h"
24#include "ordered-data.h"
25
26/* in memory btrfs inode */
27struct btrfs_inode {
28 /* which subvolume this inode belongs to */
29 struct btrfs_root *root;
30
31 /* key used to find this inode on disk. This is used by the code
32 * to read in roots of subvolumes
33 */
34 struct btrfs_key location;
35
36 /* the extent_tree has caches of all the extent mappings to disk */
37 struct extent_map_tree extent_tree;
38
39 /* the io_tree does range state (DIRTY, LOCKED etc) */
40 struct extent_io_tree io_tree;
41
42 /* special utility tree used to record which mirrors have already been
43 * tried when checksums fail for a given block
44 */
45 struct extent_io_tree io_failure_tree;
46
47 /* held while inesrting or deleting extents from files */
48 struct mutex extent_mutex;
49
50 /* held while logging the inode in tree-log.c */
51 struct mutex log_mutex;
52
53 /* used to order data wrt metadata */
54 struct btrfs_ordered_inode_tree ordered_tree;
55
56 /* standard acl pointers */
57 struct posix_acl *i_acl;
58 struct posix_acl *i_default_acl;
59
60 /* for keeping track of orphaned inodes */
61 struct list_head i_orphan;
62
63 /* list of all the delalloc inodes in the FS. There are times we need
64 * to write all the delalloc pages to disk, and this list is used
65 * to walk them all.
66 */
67 struct list_head delalloc_inodes;
68
69 /* full 64 bit generation number, struct vfs_inode doesn't have a big
70 * enough field for this.
71 */
72 u64 generation;
73
74 /* sequence number for NFS changes */
75 u64 sequence;
76
77 /*
78 * transid of the trans_handle that last modified this inode
79 */
80 u64 last_trans;
81 /*
82 * transid that last logged this inode
83 */
84 u64 logged_trans;
85
86 /*
87 * trans that last made a change that should be fully fsync'd. This
88 * gets reset to zero each time the inode is logged
89 */
90 u64 log_dirty_trans;
91
92 /* total number of bytes pending delalloc, used by stat to calc the
93 * real block usage of the file
94 */
95 u64 delalloc_bytes;
96
97 /*
98 * the size of the file stored in the metadata on disk. data=ordered
99 * means the in-memory i_size might be larger than the size on disk
100 * because not all the blocks are written yet.
101 */
102 u64 disk_i_size;
103
104 /* flags field from the on disk inode */
105 u32 flags;
106
107 /*
108 * if this is a directory then index_cnt is the counter for the index
109 * number for new files that are created
110 */
111 u64 index_cnt;
112
113 /* the start of block group preferred for allocations. */
114 u64 block_group;
115
116 struct inode vfs_inode;
117};
118
119static inline struct btrfs_inode *BTRFS_I(struct inode *inode)
120{
121 return container_of(inode, struct btrfs_inode, vfs_inode);
122}
123
124static inline void btrfs_i_size_write(struct inode *inode, u64 size)
125{
126 inode->i_size = size;
127 BTRFS_I(inode)->disk_i_size = size;
128}
129
130
131#endif
diff --git a/fs/btrfs/compat.h b/fs/btrfs/compat.h
new file mode 100644
index 000000000000..7c4503ef6efd
--- /dev/null
+++ b/fs/btrfs/compat.h
@@ -0,0 +1,7 @@
1#ifndef _COMPAT_H_
2#define _COMPAT_H_
3
4#define btrfs_drop_nlink(inode) drop_nlink(inode)
5#define btrfs_inc_nlink(inode) inc_nlink(inode)
6
7#endif /* _COMPAT_H_ */
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
new file mode 100644
index 000000000000..ee848d8585d9
--- /dev/null
+++ b/fs/btrfs/compression.c
@@ -0,0 +1,709 @@
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/kernel.h>
20#include <linux/bio.h>
21#include <linux/buffer_head.h>
22#include <linux/file.h>
23#include <linux/fs.h>
24#include <linux/pagemap.h>
25#include <linux/highmem.h>
26#include <linux/time.h>
27#include <linux/init.h>
28#include <linux/string.h>
29#include <linux/smp_lock.h>
30#include <linux/backing-dev.h>
31#include <linux/mpage.h>
32#include <linux/swap.h>
33#include <linux/writeback.h>
34#include <linux/bit_spinlock.h>
35#include <linux/version.h>
36#include <linux/pagevec.h>
37#include "compat.h"
38#include "ctree.h"
39#include "disk-io.h"
40#include "transaction.h"
41#include "btrfs_inode.h"
42#include "volumes.h"
43#include "ordered-data.h"
44#include "compression.h"
45#include "extent_io.h"
46#include "extent_map.h"
47
48struct compressed_bio {
49 /* number of bios pending for this compressed extent */
50 atomic_t pending_bios;
51
52 /* the pages with the compressed data on them */
53 struct page **compressed_pages;
54
55 /* inode that owns this data */
56 struct inode *inode;
57
58 /* starting offset in the inode for our pages */
59 u64 start;
60
61 /* number of bytes in the inode we're working on */
62 unsigned long len;
63
64 /* number of bytes on disk */
65 unsigned long compressed_len;
66
67 /* number of compressed pages in the array */
68 unsigned long nr_pages;
69
70 /* IO errors */
71 int errors;
72 int mirror_num;
73
74 /* for reads, this is the bio we are copying the data into */
75 struct bio *orig_bio;
76
77 /*
78 * the start of a variable length array of checksums only
79 * used by reads
80 */
81 u32 sums;
82};
83
84static inline int compressed_bio_size(struct btrfs_root *root,
85 unsigned long disk_size)
86{
87 u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy);
88 return sizeof(struct compressed_bio) +
89 ((disk_size + root->sectorsize - 1) / root->sectorsize) *
90 csum_size;
91}
92
93static struct bio *compressed_bio_alloc(struct block_device *bdev,
94 u64 first_byte, gfp_t gfp_flags)
95{
96 struct bio *bio;
97 int nr_vecs;
98
99 nr_vecs = bio_get_nr_vecs(bdev);
100 bio = bio_alloc(gfp_flags, nr_vecs);
101
102 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
103 while (!bio && (nr_vecs /= 2))
104 bio = bio_alloc(gfp_flags, nr_vecs);
105 }
106
107 if (bio) {
108 bio->bi_size = 0;
109 bio->bi_bdev = bdev;
110 bio->bi_sector = first_byte >> 9;
111 }
112 return bio;
113}
114
115static int check_compressed_csum(struct inode *inode,
116 struct compressed_bio *cb,
117 u64 disk_start)
118{
119 int ret;
120 struct btrfs_root *root = BTRFS_I(inode)->root;
121 struct page *page;
122 unsigned long i;
123 char *kaddr;
124 u32 csum;
125 u32 *cb_sum = &cb->sums;
126
127 if (btrfs_test_flag(inode, NODATASUM))
128 return 0;
129
130 for (i = 0; i < cb->nr_pages; i++) {
131 page = cb->compressed_pages[i];
132 csum = ~(u32)0;
133
134 kaddr = kmap_atomic(page, KM_USER0);
135 csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE);
136 btrfs_csum_final(csum, (char *)&csum);
137 kunmap_atomic(kaddr, KM_USER0);
138
139 if (csum != *cb_sum) {
140 printk(KERN_INFO "btrfs csum failed ino %lu "
141 "extent %llu csum %u "
142 "wanted %u mirror %d\n", inode->i_ino,
143 (unsigned long long)disk_start,
144 csum, *cb_sum, cb->mirror_num);
145 ret = -EIO;
146 goto fail;
147 }
148 cb_sum++;
149
150 }
151 ret = 0;
152fail:
153 return ret;
154}
155
156/* when we finish reading compressed pages from the disk, we
157 * decompress them and then run the bio end_io routines on the
158 * decompressed pages (in the inode address space).
159 *
160 * This allows the checksumming and other IO error handling routines
161 * to work normally
162 *
163 * The compressed pages are freed here, and it must be run
164 * in process context
165 */
166static void end_compressed_bio_read(struct bio *bio, int err)
167{
168 struct extent_io_tree *tree;
169 struct compressed_bio *cb = bio->bi_private;
170 struct inode *inode;
171 struct page *page;
172 unsigned long index;
173 int ret;
174
175 if (err)
176 cb->errors = 1;
177
178 /* if there are more bios still pending for this compressed
179 * extent, just exit
180 */
181 if (!atomic_dec_and_test(&cb->pending_bios))
182 goto out;
183
184 inode = cb->inode;
185 ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9);
186 if (ret)
187 goto csum_failed;
188
189 /* ok, we're the last bio for this extent, lets start
190 * the decompression.
191 */
192 tree = &BTRFS_I(inode)->io_tree;
193 ret = btrfs_zlib_decompress_biovec(cb->compressed_pages,
194 cb->start,
195 cb->orig_bio->bi_io_vec,
196 cb->orig_bio->bi_vcnt,
197 cb->compressed_len);
198csum_failed:
199 if (ret)
200 cb->errors = 1;
201
202 /* release the compressed pages */
203 index = 0;
204 for (index = 0; index < cb->nr_pages; index++) {
205 page = cb->compressed_pages[index];
206 page->mapping = NULL;
207 page_cache_release(page);
208 }
209
210 /* do io completion on the original bio */
211 if (cb->errors) {
212 bio_io_error(cb->orig_bio);
213 } else {
214 int bio_index = 0;
215 struct bio_vec *bvec = cb->orig_bio->bi_io_vec;
216
217 /*
218 * we have verified the checksum already, set page
219 * checked so the end_io handlers know about it
220 */
221 while (bio_index < cb->orig_bio->bi_vcnt) {
222 SetPageChecked(bvec->bv_page);
223 bvec++;
224 bio_index++;
225 }
226 bio_endio(cb->orig_bio, 0);
227 }
228
229 /* finally free the cb struct */
230 kfree(cb->compressed_pages);
231 kfree(cb);
232out:
233 bio_put(bio);
234}
235
236/*
237 * Clear the writeback bits on all of the file
238 * pages for a compressed write
239 */
240static noinline int end_compressed_writeback(struct inode *inode, u64 start,
241 unsigned long ram_size)
242{
243 unsigned long index = start >> PAGE_CACHE_SHIFT;
244 unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT;
245 struct page *pages[16];
246 unsigned long nr_pages = end_index - index + 1;
247 int i;
248 int ret;
249
250 while (nr_pages > 0) {
251 ret = find_get_pages_contig(inode->i_mapping, index,
252 min_t(unsigned long,
253 nr_pages, ARRAY_SIZE(pages)), pages);
254 if (ret == 0) {
255 nr_pages -= 1;
256 index += 1;
257 continue;
258 }
259 for (i = 0; i < ret; i++) {
260 end_page_writeback(pages[i]);
261 page_cache_release(pages[i]);
262 }
263 nr_pages -= ret;
264 index += ret;
265 }
266 /* the inode may be gone now */
267 return 0;
268}
269
270/*
271 * do the cleanup once all the compressed pages hit the disk.
272 * This will clear writeback on the file pages and free the compressed
273 * pages.
274 *
275 * This also calls the writeback end hooks for the file pages so that
276 * metadata and checksums can be updated in the file.
277 */
278static void end_compressed_bio_write(struct bio *bio, int err)
279{
280 struct extent_io_tree *tree;
281 struct compressed_bio *cb = bio->bi_private;
282 struct inode *inode;
283 struct page *page;
284 unsigned long index;
285
286 if (err)
287 cb->errors = 1;
288
289 /* if there are more bios still pending for this compressed
290 * extent, just exit
291 */
292 if (!atomic_dec_and_test(&cb->pending_bios))
293 goto out;
294
295 /* ok, we're the last bio for this extent, step one is to
296 * call back into the FS and do all the end_io operations
297 */
298 inode = cb->inode;
299 tree = &BTRFS_I(inode)->io_tree;
300 cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
301 tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
302 cb->start,
303 cb->start + cb->len - 1,
304 NULL, 1);
305 cb->compressed_pages[0]->mapping = NULL;
306
307 end_compressed_writeback(inode, cb->start, cb->len);
308 /* note, our inode could be gone now */
309
310 /*
311 * release the compressed pages, these came from alloc_page and
312 * are not attached to the inode at all
313 */
314 index = 0;
315 for (index = 0; index < cb->nr_pages; index++) {
316 page = cb->compressed_pages[index];
317 page->mapping = NULL;
318 page_cache_release(page);
319 }
320
321 /* finally free the cb struct */
322 kfree(cb->compressed_pages);
323 kfree(cb);
324out:
325 bio_put(bio);
326}
327
328/*
329 * worker function to build and submit bios for previously compressed pages.
330 * The corresponding pages in the inode should be marked for writeback
331 * and the compressed pages should have a reference on them for dropping
332 * when the IO is complete.
333 *
334 * This also checksums the file bytes and gets things ready for
335 * the end io hooks.
336 */
337int btrfs_submit_compressed_write(struct inode *inode, u64 start,
338 unsigned long len, u64 disk_start,
339 unsigned long compressed_len,
340 struct page **compressed_pages,
341 unsigned long nr_pages)
342{
343 struct bio *bio = NULL;
344 struct btrfs_root *root = BTRFS_I(inode)->root;
345 struct compressed_bio *cb;
346 unsigned long bytes_left;
347 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
348 int page_index = 0;
349 struct page *page;
350 u64 first_byte = disk_start;
351 struct block_device *bdev;
352 int ret;
353
354 WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1));
355 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
356 atomic_set(&cb->pending_bios, 0);
357 cb->errors = 0;
358 cb->inode = inode;
359 cb->start = start;
360 cb->len = len;
361 cb->mirror_num = 0;
362 cb->compressed_pages = compressed_pages;
363 cb->compressed_len = compressed_len;
364 cb->orig_bio = NULL;
365 cb->nr_pages = nr_pages;
366
367 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
368
369 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
370 bio->bi_private = cb;
371 bio->bi_end_io = end_compressed_bio_write;
372 atomic_inc(&cb->pending_bios);
373
374 /* create and submit bios for the compressed pages */
375 bytes_left = compressed_len;
376 for (page_index = 0; page_index < cb->nr_pages; page_index++) {
377 page = compressed_pages[page_index];
378 page->mapping = inode->i_mapping;
379 if (bio->bi_size)
380 ret = io_tree->ops->merge_bio_hook(page, 0,
381 PAGE_CACHE_SIZE,
382 bio, 0);
383 else
384 ret = 0;
385
386 page->mapping = NULL;
387 if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) <
388 PAGE_CACHE_SIZE) {
389 bio_get(bio);
390
391 /*
392 * inc the count before we submit the bio so
393 * we know the end IO handler won't happen before
394 * we inc the count. Otherwise, the cb might get
395 * freed before we're done setting it up
396 */
397 atomic_inc(&cb->pending_bios);
398 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
399 BUG_ON(ret);
400
401 ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
402 BUG_ON(ret);
403
404 ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
405 BUG_ON(ret);
406
407 bio_put(bio);
408
409 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
410 bio->bi_private = cb;
411 bio->bi_end_io = end_compressed_bio_write;
412 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
413 }
414 if (bytes_left < PAGE_CACHE_SIZE) {
415 printk("bytes left %lu compress len %lu nr %lu\n",
416 bytes_left, cb->compressed_len, cb->nr_pages);
417 }
418 bytes_left -= PAGE_CACHE_SIZE;
419 first_byte += PAGE_CACHE_SIZE;
420 cond_resched();
421 }
422 bio_get(bio);
423
424 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
425 BUG_ON(ret);
426
427 ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
428 BUG_ON(ret);
429
430 ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
431 BUG_ON(ret);
432
433 bio_put(bio);
434 return 0;
435}
436
437static noinline int add_ra_bio_pages(struct inode *inode,
438 u64 compressed_end,
439 struct compressed_bio *cb)
440{
441 unsigned long end_index;
442 unsigned long page_index;
443 u64 last_offset;
444 u64 isize = i_size_read(inode);
445 int ret;
446 struct page *page;
447 unsigned long nr_pages = 0;
448 struct extent_map *em;
449 struct address_space *mapping = inode->i_mapping;
450 struct pagevec pvec;
451 struct extent_map_tree *em_tree;
452 struct extent_io_tree *tree;
453 u64 end;
454 int misses = 0;
455
456 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page;
457 last_offset = (page_offset(page) + PAGE_CACHE_SIZE);
458 em_tree = &BTRFS_I(inode)->extent_tree;
459 tree = &BTRFS_I(inode)->io_tree;
460
461 if (isize == 0)
462 return 0;
463
464 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
465
466 pagevec_init(&pvec, 0);
467 while (last_offset < compressed_end) {
468 page_index = last_offset >> PAGE_CACHE_SHIFT;
469
470 if (page_index > end_index)
471 break;
472
473 rcu_read_lock();
474 page = radix_tree_lookup(&mapping->page_tree, page_index);
475 rcu_read_unlock();
476 if (page) {
477 misses++;
478 if (misses > 4)
479 break;
480 goto next;
481 }
482
483 page = alloc_page(mapping_gfp_mask(mapping) | GFP_NOFS);
484 if (!page)
485 break;
486
487 page->index = page_index;
488 /*
489 * what we want to do here is call add_to_page_cache_lru,
490 * but that isn't exported, so we reproduce it here
491 */
492 if (add_to_page_cache(page, mapping,
493 page->index, GFP_NOFS)) {
494 page_cache_release(page);
495 goto next;
496 }
497
498 /* open coding of lru_cache_add, also not exported */
499 page_cache_get(page);
500 if (!pagevec_add(&pvec, page))
501 __pagevec_lru_add_file(&pvec);
502
503 end = last_offset + PAGE_CACHE_SIZE - 1;
504 /*
505 * at this point, we have a locked page in the page cache
506 * for these bytes in the file. But, we have to make
507 * sure they map to this compressed extent on disk.
508 */
509 set_page_extent_mapped(page);
510 lock_extent(tree, last_offset, end, GFP_NOFS);
511 spin_lock(&em_tree->lock);
512 em = lookup_extent_mapping(em_tree, last_offset,
513 PAGE_CACHE_SIZE);
514 spin_unlock(&em_tree->lock);
515
516 if (!em || last_offset < em->start ||
517 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
518 (em->block_start >> 9) != cb->orig_bio->bi_sector) {
519 free_extent_map(em);
520 unlock_extent(tree, last_offset, end, GFP_NOFS);
521 unlock_page(page);
522 page_cache_release(page);
523 break;
524 }
525 free_extent_map(em);
526
527 if (page->index == end_index) {
528 char *userpage;
529 size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1);
530
531 if (zero_offset) {
532 int zeros;
533 zeros = PAGE_CACHE_SIZE - zero_offset;
534 userpage = kmap_atomic(page, KM_USER0);
535 memset(userpage + zero_offset, 0, zeros);
536 flush_dcache_page(page);
537 kunmap_atomic(userpage, KM_USER0);
538 }
539 }
540
541 ret = bio_add_page(cb->orig_bio, page,
542 PAGE_CACHE_SIZE, 0);
543
544 if (ret == PAGE_CACHE_SIZE) {
545 nr_pages++;
546 page_cache_release(page);
547 } else {
548 unlock_extent(tree, last_offset, end, GFP_NOFS);
549 unlock_page(page);
550 page_cache_release(page);
551 break;
552 }
553next:
554 last_offset += PAGE_CACHE_SIZE;
555 }
556 if (pagevec_count(&pvec))
557 __pagevec_lru_add_file(&pvec);
558 return 0;
559}
560
561/*
562 * for a compressed read, the bio we get passed has all the inode pages
563 * in it. We don't actually do IO on those pages but allocate new ones
564 * to hold the compressed pages on disk.
565 *
566 * bio->bi_sector points to the compressed extent on disk
567 * bio->bi_io_vec points to all of the inode pages
568 * bio->bi_vcnt is a count of pages
569 *
570 * After the compressed pages are read, we copy the bytes into the
571 * bio we were passed and then call the bio end_io calls
572 */
573int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
574 int mirror_num, unsigned long bio_flags)
575{
576 struct extent_io_tree *tree;
577 struct extent_map_tree *em_tree;
578 struct compressed_bio *cb;
579 struct btrfs_root *root = BTRFS_I(inode)->root;
580 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
581 unsigned long compressed_len;
582 unsigned long nr_pages;
583 unsigned long page_index;
584 struct page *page;
585 struct block_device *bdev;
586 struct bio *comp_bio;
587 u64 cur_disk_byte = (u64)bio->bi_sector << 9;
588 u64 em_len;
589 u64 em_start;
590 struct extent_map *em;
591 int ret;
592 u32 *sums;
593
594 tree = &BTRFS_I(inode)->io_tree;
595 em_tree = &BTRFS_I(inode)->extent_tree;
596
597 /* we need the actual starting offset of this extent in the file */
598 spin_lock(&em_tree->lock);
599 em = lookup_extent_mapping(em_tree,
600 page_offset(bio->bi_io_vec->bv_page),
601 PAGE_CACHE_SIZE);
602 spin_unlock(&em_tree->lock);
603
604 compressed_len = em->block_len;
605 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
606 atomic_set(&cb->pending_bios, 0);
607 cb->errors = 0;
608 cb->inode = inode;
609 cb->mirror_num = mirror_num;
610 sums = &cb->sums;
611
612 cb->start = em->orig_start;
613 em_len = em->len;
614 em_start = em->start;
615
616 free_extent_map(em);
617 em = NULL;
618
619 cb->len = uncompressed_len;
620 cb->compressed_len = compressed_len;
621 cb->orig_bio = bio;
622
623 nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) /
624 PAGE_CACHE_SIZE;
625 cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages,
626 GFP_NOFS);
627 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
628
629 for (page_index = 0; page_index < nr_pages; page_index++) {
630 cb->compressed_pages[page_index] = alloc_page(GFP_NOFS |
631 __GFP_HIGHMEM);
632 }
633 cb->nr_pages = nr_pages;
634
635 add_ra_bio_pages(inode, em_start + em_len, cb);
636
637 /* include any pages we added in add_ra-bio_pages */
638 uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
639 cb->len = uncompressed_len;
640
641 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
642 comp_bio->bi_private = cb;
643 comp_bio->bi_end_io = end_compressed_bio_read;
644 atomic_inc(&cb->pending_bios);
645
646 for (page_index = 0; page_index < nr_pages; page_index++) {
647 page = cb->compressed_pages[page_index];
648 page->mapping = inode->i_mapping;
649 page->index = em_start >> PAGE_CACHE_SHIFT;
650
651 if (comp_bio->bi_size)
652 ret = tree->ops->merge_bio_hook(page, 0,
653 PAGE_CACHE_SIZE,
654 comp_bio, 0);
655 else
656 ret = 0;
657
658 page->mapping = NULL;
659 if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) <
660 PAGE_CACHE_SIZE) {
661 bio_get(comp_bio);
662
663 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
664 BUG_ON(ret);
665
666 /*
667 * inc the count before we submit the bio so
668 * we know the end IO handler won't happen before
669 * we inc the count. Otherwise, the cb might get
670 * freed before we're done setting it up
671 */
672 atomic_inc(&cb->pending_bios);
673
674 if (!btrfs_test_flag(inode, NODATASUM)) {
675 btrfs_lookup_bio_sums(root, inode, comp_bio,
676 sums);
677 }
678 sums += (comp_bio->bi_size + root->sectorsize - 1) /
679 root->sectorsize;
680
681 ret = btrfs_map_bio(root, READ, comp_bio,
682 mirror_num, 0);
683 BUG_ON(ret);
684
685 bio_put(comp_bio);
686
687 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
688 GFP_NOFS);
689 comp_bio->bi_private = cb;
690 comp_bio->bi_end_io = end_compressed_bio_read;
691
692 bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0);
693 }
694 cur_disk_byte += PAGE_CACHE_SIZE;
695 }
696 bio_get(comp_bio);
697
698 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
699 BUG_ON(ret);
700
701 if (!btrfs_test_flag(inode, NODATASUM))
702 btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
703
704 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
705 BUG_ON(ret);
706
707 bio_put(comp_bio);
708 return 0;
709}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
new file mode 100644
index 000000000000..421f5b4aa715
--- /dev/null
+++ b/fs/btrfs/compression.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#ifndef __BTRFS_COMPRESSION_
20#define __BTRFS_COMPRESSION_
21
22int btrfs_zlib_decompress(unsigned char *data_in,
23 struct page *dest_page,
24 unsigned long start_byte,
25 size_t srclen, size_t destlen);
26int btrfs_zlib_compress_pages(struct address_space *mapping,
27 u64 start, unsigned long len,
28 struct page **pages,
29 unsigned long nr_dest_pages,
30 unsigned long *out_pages,
31 unsigned long *total_in,
32 unsigned long *total_out,
33 unsigned long max_out);
34int btrfs_zlib_decompress_biovec(struct page **pages_in,
35 u64 disk_start,
36 struct bio_vec *bvec,
37 int vcnt,
38 size_t srclen);
39void btrfs_zlib_exit(void);
40int btrfs_submit_compressed_write(struct inode *inode, u64 start,
41 unsigned long len, u64 disk_start,
42 unsigned long compressed_len,
43 struct page **compressed_pages,
44 unsigned long nr_pages);
45int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
46 int mirror_num, unsigned long bio_flags);
47#endif
diff --git a/fs/btrfs/crc32c.h b/fs/btrfs/crc32c.h
new file mode 100644
index 000000000000..6e1b3de36700
--- /dev/null
+++ b/fs/btrfs/crc32c.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#ifndef __BTRFS_CRC32C__
20#define __BTRFS_CRC32C__
21#include <linux/crc32c.h>
22
23/*
24 * this file used to do more for selecting the HW version of crc32c,
25 * perhaps it will one day again soon.
26 */
27#define btrfs_crc32c(seed, data, length) crc32c(seed, data, length)
28#endif
29
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
new file mode 100644
index 000000000000..9e46c0776816
--- /dev/null
+++ b/fs/btrfs/ctree.c
@@ -0,0 +1,3953 @@
1/*
2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include "ctree.h"
21#include "disk-io.h"
22#include "transaction.h"
23#include "print-tree.h"
24#include "locking.h"
25
26static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
27 *root, struct btrfs_path *path, int level);
28static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_key *ins_key,
30 struct btrfs_path *path, int data_size, int extend);
31static int push_node_left(struct btrfs_trans_handle *trans,
32 struct btrfs_root *root, struct extent_buffer *dst,
33 struct extent_buffer *src, int empty);
34static int balance_node_right(struct btrfs_trans_handle *trans,
35 struct btrfs_root *root,
36 struct extent_buffer *dst_buf,
37 struct extent_buffer *src_buf);
38static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
39 struct btrfs_path *path, int level, int slot);
40
41inline void btrfs_init_path(struct btrfs_path *p)
42{
43 memset(p, 0, sizeof(*p));
44}
45
46struct btrfs_path *btrfs_alloc_path(void)
47{
48 struct btrfs_path *path;
49 path = kmem_cache_alloc(btrfs_path_cachep, GFP_NOFS);
50 if (path) {
51 btrfs_init_path(path);
52 path->reada = 1;
53 }
54 return path;
55}
56
57/* this also releases the path */
58void btrfs_free_path(struct btrfs_path *p)
59{
60 btrfs_release_path(NULL, p);
61 kmem_cache_free(btrfs_path_cachep, p);
62}
63
64/*
65 * path release drops references on the extent buffers in the path
66 * and it drops any locks held by this path
67 *
68 * It is safe to call this on paths that no locks or extent buffers held.
69 */
70noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
71{
72 int i;
73
74 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
75 p->slots[i] = 0;
76 if (!p->nodes[i])
77 continue;
78 if (p->locks[i]) {
79 btrfs_tree_unlock(p->nodes[i]);
80 p->locks[i] = 0;
81 }
82 free_extent_buffer(p->nodes[i]);
83 p->nodes[i] = NULL;
84 }
85}
86
87/*
88 * safely gets a reference on the root node of a tree. A lock
89 * is not taken, so a concurrent writer may put a different node
90 * at the root of the tree. See btrfs_lock_root_node for the
91 * looping required.
92 *
93 * The extent buffer returned by this has a reference taken, so
94 * it won't disappear. It may stop being the root of the tree
95 * at any time because there are no locks held.
96 */
97struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
98{
99 struct extent_buffer *eb;
100 spin_lock(&root->node_lock);
101 eb = root->node;
102 extent_buffer_get(eb);
103 spin_unlock(&root->node_lock);
104 return eb;
105}
106
107/* loop around taking references on and locking the root node of the
108 * tree until you end up with a lock on the root. A locked buffer
109 * is returned, with a reference held.
110 */
111struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
112{
113 struct extent_buffer *eb;
114
115 while (1) {
116 eb = btrfs_root_node(root);
117 btrfs_tree_lock(eb);
118
119 spin_lock(&root->node_lock);
120 if (eb == root->node) {
121 spin_unlock(&root->node_lock);
122 break;
123 }
124 spin_unlock(&root->node_lock);
125
126 btrfs_tree_unlock(eb);
127 free_extent_buffer(eb);
128 }
129 return eb;
130}
131
132/* cowonly root (everything not a reference counted cow subvolume), just get
133 * put onto a simple dirty list. transaction.c walks this to make sure they
134 * get properly updated on disk.
135 */
136static void add_root_to_dirty_list(struct btrfs_root *root)
137{
138 if (root->track_dirty && list_empty(&root->dirty_list)) {
139 list_add(&root->dirty_list,
140 &root->fs_info->dirty_cowonly_roots);
141 }
142}
143
144/*
145 * used by snapshot creation to make a copy of a root for a tree with
146 * a given objectid. The buffer with the new root node is returned in
147 * cow_ret, and this func returns zero on success or a negative error code.
148 */
149int btrfs_copy_root(struct btrfs_trans_handle *trans,
150 struct btrfs_root *root,
151 struct extent_buffer *buf,
152 struct extent_buffer **cow_ret, u64 new_root_objectid)
153{
154 struct extent_buffer *cow;
155 u32 nritems;
156 int ret = 0;
157 int level;
158 struct btrfs_root *new_root;
159
160 new_root = kmalloc(sizeof(*new_root), GFP_NOFS);
161 if (!new_root)
162 return -ENOMEM;
163
164 memcpy(new_root, root, sizeof(*new_root));
165 new_root->root_key.objectid = new_root_objectid;
166
167 WARN_ON(root->ref_cows && trans->transid !=
168 root->fs_info->running_transaction->transid);
169 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
170
171 level = btrfs_header_level(buf);
172 nritems = btrfs_header_nritems(buf);
173
174 cow = btrfs_alloc_free_block(trans, new_root, buf->len, 0,
175 new_root_objectid, trans->transid,
176 level, buf->start, 0);
177 if (IS_ERR(cow)) {
178 kfree(new_root);
179 return PTR_ERR(cow);
180 }
181
182 copy_extent_buffer(cow, buf, 0, 0, cow->len);
183 btrfs_set_header_bytenr(cow, cow->start);
184 btrfs_set_header_generation(cow, trans->transid);
185 btrfs_set_header_owner(cow, new_root_objectid);
186 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN);
187
188 write_extent_buffer(cow, root->fs_info->fsid,
189 (unsigned long)btrfs_header_fsid(cow),
190 BTRFS_FSID_SIZE);
191
192 WARN_ON(btrfs_header_generation(buf) > trans->transid);
193 ret = btrfs_inc_ref(trans, new_root, buf, cow, NULL);
194 kfree(new_root);
195
196 if (ret)
197 return ret;
198
199 btrfs_mark_buffer_dirty(cow);
200 *cow_ret = cow;
201 return 0;
202}
203
204/*
205 * does the dirty work in cow of a single block. The parent block (if
206 * supplied) is updated to point to the new cow copy. The new buffer is marked
207 * dirty and returned locked. If you modify the block it needs to be marked
208 * dirty again.
209 *
210 * search_start -- an allocation hint for the new block
211 *
212 * empty_size -- a hint that you plan on doing more cow. This is the size in
213 * bytes the allocator should try to find free next to the block it returns.
214 * This is just a hint and may be ignored by the allocator.
215 *
216 * prealloc_dest -- if you have already reserved a destination for the cow,
217 * this uses that block instead of allocating a new one.
218 * btrfs_alloc_reserved_extent is used to finish the allocation.
219 */
220static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
221 struct btrfs_root *root,
222 struct extent_buffer *buf,
223 struct extent_buffer *parent, int parent_slot,
224 struct extent_buffer **cow_ret,
225 u64 search_start, u64 empty_size,
226 u64 prealloc_dest)
227{
228 u64 parent_start;
229 struct extent_buffer *cow;
230 u32 nritems;
231 int ret = 0;
232 int level;
233 int unlock_orig = 0;
234
235 if (*cow_ret == buf)
236 unlock_orig = 1;
237
238 WARN_ON(!btrfs_tree_locked(buf));
239
240 if (parent)
241 parent_start = parent->start;
242 else
243 parent_start = 0;
244
245 WARN_ON(root->ref_cows && trans->transid !=
246 root->fs_info->running_transaction->transid);
247 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
248
249 level = btrfs_header_level(buf);
250 nritems = btrfs_header_nritems(buf);
251
252 if (prealloc_dest) {
253 struct btrfs_key ins;
254
255 ins.objectid = prealloc_dest;
256 ins.offset = buf->len;
257 ins.type = BTRFS_EXTENT_ITEM_KEY;
258
259 ret = btrfs_alloc_reserved_extent(trans, root, parent_start,
260 root->root_key.objectid,
261 trans->transid, level, &ins);
262 BUG_ON(ret);
263 cow = btrfs_init_new_buffer(trans, root, prealloc_dest,
264 buf->len);
265 } else {
266 cow = btrfs_alloc_free_block(trans, root, buf->len,
267 parent_start,
268 root->root_key.objectid,
269 trans->transid, level,
270 search_start, empty_size);
271 }
272 if (IS_ERR(cow))
273 return PTR_ERR(cow);
274
275 copy_extent_buffer(cow, buf, 0, 0, cow->len);
276 btrfs_set_header_bytenr(cow, cow->start);
277 btrfs_set_header_generation(cow, trans->transid);
278 btrfs_set_header_owner(cow, root->root_key.objectid);
279 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN);
280
281 write_extent_buffer(cow, root->fs_info->fsid,
282 (unsigned long)btrfs_header_fsid(cow),
283 BTRFS_FSID_SIZE);
284
285 WARN_ON(btrfs_header_generation(buf) > trans->transid);
286 if (btrfs_header_generation(buf) != trans->transid) {
287 u32 nr_extents;
288 ret = btrfs_inc_ref(trans, root, buf, cow, &nr_extents);
289 if (ret)
290 return ret;
291
292 ret = btrfs_cache_ref(trans, root, buf, nr_extents);
293 WARN_ON(ret);
294 } else if (btrfs_header_owner(buf) == BTRFS_TREE_RELOC_OBJECTID) {
295 /*
296 * There are only two places that can drop reference to
297 * tree blocks owned by living reloc trees, one is here,
298 * the other place is btrfs_drop_subtree. In both places,
299 * we check reference count while tree block is locked.
300 * Furthermore, if reference count is one, it won't get
301 * increased by someone else.
302 */
303 u32 refs;
304 ret = btrfs_lookup_extent_ref(trans, root, buf->start,
305 buf->len, &refs);
306 BUG_ON(ret);
307 if (refs == 1) {
308 ret = btrfs_update_ref(trans, root, buf, cow,
309 0, nritems);
310 clean_tree_block(trans, root, buf);
311 } else {
312 ret = btrfs_inc_ref(trans, root, buf, cow, NULL);
313 }
314 BUG_ON(ret);
315 } else {
316 ret = btrfs_update_ref(trans, root, buf, cow, 0, nritems);
317 if (ret)
318 return ret;
319 clean_tree_block(trans, root, buf);
320 }
321
322 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
323 ret = btrfs_reloc_tree_cache_ref(trans, root, cow, buf->start);
324 WARN_ON(ret);
325 }
326
327 if (buf == root->node) {
328 WARN_ON(parent && parent != buf);
329
330 spin_lock(&root->node_lock);
331 root->node = cow;
332 extent_buffer_get(cow);
333 spin_unlock(&root->node_lock);
334
335 if (buf != root->commit_root) {
336 btrfs_free_extent(trans, root, buf->start,
337 buf->len, buf->start,
338 root->root_key.objectid,
339 btrfs_header_generation(buf),
340 level, 1);
341 }
342 free_extent_buffer(buf);
343 add_root_to_dirty_list(root);
344 } else {
345 btrfs_set_node_blockptr(parent, parent_slot,
346 cow->start);
347 WARN_ON(trans->transid == 0);
348 btrfs_set_node_ptr_generation(parent, parent_slot,
349 trans->transid);
350 btrfs_mark_buffer_dirty(parent);
351 WARN_ON(btrfs_header_generation(parent) != trans->transid);
352 btrfs_free_extent(trans, root, buf->start, buf->len,
353 parent_start, btrfs_header_owner(parent),
354 btrfs_header_generation(parent), level, 1);
355 }
356 if (unlock_orig)
357 btrfs_tree_unlock(buf);
358 free_extent_buffer(buf);
359 btrfs_mark_buffer_dirty(cow);
360 *cow_ret = cow;
361 return 0;
362}
363
364/*
365 * cows a single block, see __btrfs_cow_block for the real work.
366 * This version of it has extra checks so that a block isn't cow'd more than
367 * once per transaction, as long as it hasn't been written yet
368 */
369noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
370 struct btrfs_root *root, struct extent_buffer *buf,
371 struct extent_buffer *parent, int parent_slot,
372 struct extent_buffer **cow_ret, u64 prealloc_dest)
373{
374 u64 search_start;
375 int ret;
376
377 if (trans->transaction != root->fs_info->running_transaction) {
378 printk(KERN_CRIT "trans %llu running %llu\n",
379 (unsigned long long)trans->transid,
380 (unsigned long long)
381 root->fs_info->running_transaction->transid);
382 WARN_ON(1);
383 }
384 if (trans->transid != root->fs_info->generation) {
385 printk(KERN_CRIT "trans %llu running %llu\n",
386 (unsigned long long)trans->transid,
387 (unsigned long long)root->fs_info->generation);
388 WARN_ON(1);
389 }
390
391 spin_lock(&root->fs_info->hash_lock);
392 if (btrfs_header_generation(buf) == trans->transid &&
393 btrfs_header_owner(buf) == root->root_key.objectid &&
394 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
395 *cow_ret = buf;
396 spin_unlock(&root->fs_info->hash_lock);
397 WARN_ON(prealloc_dest);
398 return 0;
399 }
400 spin_unlock(&root->fs_info->hash_lock);
401 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
402 ret = __btrfs_cow_block(trans, root, buf, parent,
403 parent_slot, cow_ret, search_start, 0,
404 prealloc_dest);
405 return ret;
406}
407
408/*
409 * helper function for defrag to decide if two blocks pointed to by a
410 * node are actually close by
411 */
412static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
413{
414 if (blocknr < other && other - (blocknr + blocksize) < 32768)
415 return 1;
416 if (blocknr > other && blocknr - (other + blocksize) < 32768)
417 return 1;
418 return 0;
419}
420
421/*
422 * compare two keys in a memcmp fashion
423 */
424static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
425{
426 struct btrfs_key k1;
427
428 btrfs_disk_key_to_cpu(&k1, disk);
429
430 if (k1.objectid > k2->objectid)
431 return 1;
432 if (k1.objectid < k2->objectid)
433 return -1;
434 if (k1.type > k2->type)
435 return 1;
436 if (k1.type < k2->type)
437 return -1;
438 if (k1.offset > k2->offset)
439 return 1;
440 if (k1.offset < k2->offset)
441 return -1;
442 return 0;
443}
444
445/*
446 * same as comp_keys only with two btrfs_key's
447 */
448static int comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
449{
450 if (k1->objectid > k2->objectid)
451 return 1;
452 if (k1->objectid < k2->objectid)
453 return -1;
454 if (k1->type > k2->type)
455 return 1;
456 if (k1->type < k2->type)
457 return -1;
458 if (k1->offset > k2->offset)
459 return 1;
460 if (k1->offset < k2->offset)
461 return -1;
462 return 0;
463}
464
465/*
466 * this is used by the defrag code to go through all the
467 * leaves pointed to by a node and reallocate them so that
468 * disk order is close to key order
469 */
470int btrfs_realloc_node(struct btrfs_trans_handle *trans,
471 struct btrfs_root *root, struct extent_buffer *parent,
472 int start_slot, int cache_only, u64 *last_ret,
473 struct btrfs_key *progress)
474{
475 struct extent_buffer *cur;
476 u64 blocknr;
477 u64 gen;
478 u64 search_start = *last_ret;
479 u64 last_block = 0;
480 u64 other;
481 u32 parent_nritems;
482 int end_slot;
483 int i;
484 int err = 0;
485 int parent_level;
486 int uptodate;
487 u32 blocksize;
488 int progress_passed = 0;
489 struct btrfs_disk_key disk_key;
490
491 parent_level = btrfs_header_level(parent);
492 if (cache_only && parent_level != 1)
493 return 0;
494
495 if (trans->transaction != root->fs_info->running_transaction)
496 WARN_ON(1);
497 if (trans->transid != root->fs_info->generation)
498 WARN_ON(1);
499
500 parent_nritems = btrfs_header_nritems(parent);
501 blocksize = btrfs_level_size(root, parent_level - 1);
502 end_slot = parent_nritems;
503
504 if (parent_nritems == 1)
505 return 0;
506
507 for (i = start_slot; i < end_slot; i++) {
508 int close = 1;
509
510 if (!parent->map_token) {
511 map_extent_buffer(parent,
512 btrfs_node_key_ptr_offset(i),
513 sizeof(struct btrfs_key_ptr),
514 &parent->map_token, &parent->kaddr,
515 &parent->map_start, &parent->map_len,
516 KM_USER1);
517 }
518 btrfs_node_key(parent, &disk_key, i);
519 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
520 continue;
521
522 progress_passed = 1;
523 blocknr = btrfs_node_blockptr(parent, i);
524 gen = btrfs_node_ptr_generation(parent, i);
525 if (last_block == 0)
526 last_block = blocknr;
527
528 if (i > 0) {
529 other = btrfs_node_blockptr(parent, i - 1);
530 close = close_blocks(blocknr, other, blocksize);
531 }
532 if (!close && i < end_slot - 2) {
533 other = btrfs_node_blockptr(parent, i + 1);
534 close = close_blocks(blocknr, other, blocksize);
535 }
536 if (close) {
537 last_block = blocknr;
538 continue;
539 }
540 if (parent->map_token) {
541 unmap_extent_buffer(parent, parent->map_token,
542 KM_USER1);
543 parent->map_token = NULL;
544 }
545
546 cur = btrfs_find_tree_block(root, blocknr, blocksize);
547 if (cur)
548 uptodate = btrfs_buffer_uptodate(cur, gen);
549 else
550 uptodate = 0;
551 if (!cur || !uptodate) {
552 if (cache_only) {
553 free_extent_buffer(cur);
554 continue;
555 }
556 if (!cur) {
557 cur = read_tree_block(root, blocknr,
558 blocksize, gen);
559 } else if (!uptodate) {
560 btrfs_read_buffer(cur, gen);
561 }
562 }
563 if (search_start == 0)
564 search_start = last_block;
565
566 btrfs_tree_lock(cur);
567 err = __btrfs_cow_block(trans, root, cur, parent, i,
568 &cur, search_start,
569 min(16 * blocksize,
570 (end_slot - i) * blocksize), 0);
571 if (err) {
572 btrfs_tree_unlock(cur);
573 free_extent_buffer(cur);
574 break;
575 }
576 search_start = cur->start;
577 last_block = cur->start;
578 *last_ret = search_start;
579 btrfs_tree_unlock(cur);
580 free_extent_buffer(cur);
581 }
582 if (parent->map_token) {
583 unmap_extent_buffer(parent, parent->map_token,
584 KM_USER1);
585 parent->map_token = NULL;
586 }
587 return err;
588}
589
590/*
591 * The leaf data grows from end-to-front in the node.
592 * this returns the address of the start of the last item,
593 * which is the stop of the leaf data stack
594 */
595static inline unsigned int leaf_data_end(struct btrfs_root *root,
596 struct extent_buffer *leaf)
597{
598 u32 nr = btrfs_header_nritems(leaf);
599 if (nr == 0)
600 return BTRFS_LEAF_DATA_SIZE(root);
601 return btrfs_item_offset_nr(leaf, nr - 1);
602}
603
604/*
605 * extra debugging checks to make sure all the items in a key are
606 * well formed and in the proper order
607 */
608static int check_node(struct btrfs_root *root, struct btrfs_path *path,
609 int level)
610{
611 struct extent_buffer *parent = NULL;
612 struct extent_buffer *node = path->nodes[level];
613 struct btrfs_disk_key parent_key;
614 struct btrfs_disk_key node_key;
615 int parent_slot;
616 int slot;
617 struct btrfs_key cpukey;
618 u32 nritems = btrfs_header_nritems(node);
619
620 if (path->nodes[level + 1])
621 parent = path->nodes[level + 1];
622
623 slot = path->slots[level];
624 BUG_ON(nritems == 0);
625 if (parent) {
626 parent_slot = path->slots[level + 1];
627 btrfs_node_key(parent, &parent_key, parent_slot);
628 btrfs_node_key(node, &node_key, 0);
629 BUG_ON(memcmp(&parent_key, &node_key,
630 sizeof(struct btrfs_disk_key)));
631 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
632 btrfs_header_bytenr(node));
633 }
634 BUG_ON(nritems > BTRFS_NODEPTRS_PER_BLOCK(root));
635 if (slot != 0) {
636 btrfs_node_key_to_cpu(node, &cpukey, slot - 1);
637 btrfs_node_key(node, &node_key, slot);
638 BUG_ON(comp_keys(&node_key, &cpukey) <= 0);
639 }
640 if (slot < nritems - 1) {
641 btrfs_node_key_to_cpu(node, &cpukey, slot + 1);
642 btrfs_node_key(node, &node_key, slot);
643 BUG_ON(comp_keys(&node_key, &cpukey) >= 0);
644 }
645 return 0;
646}
647
648/*
649 * extra checking to make sure all the items in a leaf are
650 * well formed and in the proper order
651 */
652static int check_leaf(struct btrfs_root *root, struct btrfs_path *path,
653 int level)
654{
655 struct extent_buffer *leaf = path->nodes[level];
656 struct extent_buffer *parent = NULL;
657 int parent_slot;
658 struct btrfs_key cpukey;
659 struct btrfs_disk_key parent_key;
660 struct btrfs_disk_key leaf_key;
661 int slot = path->slots[0];
662
663 u32 nritems = btrfs_header_nritems(leaf);
664
665 if (path->nodes[level + 1])
666 parent = path->nodes[level + 1];
667
668 if (nritems == 0)
669 return 0;
670
671 if (parent) {
672 parent_slot = path->slots[level + 1];
673 btrfs_node_key(parent, &parent_key, parent_slot);
674 btrfs_item_key(leaf, &leaf_key, 0);
675
676 BUG_ON(memcmp(&parent_key, &leaf_key,
677 sizeof(struct btrfs_disk_key)));
678 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
679 btrfs_header_bytenr(leaf));
680 }
681 if (slot != 0 && slot < nritems - 1) {
682 btrfs_item_key(leaf, &leaf_key, slot);
683 btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1);
684 if (comp_keys(&leaf_key, &cpukey) <= 0) {
685 btrfs_print_leaf(root, leaf);
686 printk(KERN_CRIT "slot %d offset bad key\n", slot);
687 BUG_ON(1);
688 }
689 if (btrfs_item_offset_nr(leaf, slot - 1) !=
690 btrfs_item_end_nr(leaf, slot)) {
691 btrfs_print_leaf(root, leaf);
692 printk(KERN_CRIT "slot %d offset bad\n", slot);
693 BUG_ON(1);
694 }
695 }
696 if (slot < nritems - 1) {
697 btrfs_item_key(leaf, &leaf_key, slot);
698 btrfs_item_key_to_cpu(leaf, &cpukey, slot + 1);
699 BUG_ON(comp_keys(&leaf_key, &cpukey) >= 0);
700 if (btrfs_item_offset_nr(leaf, slot) !=
701 btrfs_item_end_nr(leaf, slot + 1)) {
702 btrfs_print_leaf(root, leaf);
703 printk(KERN_CRIT "slot %d offset bad\n", slot);
704 BUG_ON(1);
705 }
706 }
707 BUG_ON(btrfs_item_offset_nr(leaf, 0) +
708 btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root));
709 return 0;
710}
711
712static noinline int check_block(struct btrfs_root *root,
713 struct btrfs_path *path, int level)
714{
715 return 0;
716 if (level == 0)
717 return check_leaf(root, path, level);
718 return check_node(root, path, level);
719}
720
721/*
722 * search for key in the extent_buffer. The items start at offset p,
723 * and they are item_size apart. There are 'max' items in p.
724 *
725 * the slot in the array is returned via slot, and it points to
726 * the place where you would insert key if it is not found in
727 * the array.
728 *
729 * slot may point to max if the key is bigger than all of the keys
730 */
731static noinline int generic_bin_search(struct extent_buffer *eb,
732 unsigned long p,
733 int item_size, struct btrfs_key *key,
734 int max, int *slot)
735{
736 int low = 0;
737 int high = max;
738 int mid;
739 int ret;
740 struct btrfs_disk_key *tmp = NULL;
741 struct btrfs_disk_key unaligned;
742 unsigned long offset;
743 char *map_token = NULL;
744 char *kaddr = NULL;
745 unsigned long map_start = 0;
746 unsigned long map_len = 0;
747 int err;
748
749 while (low < high) {
750 mid = (low + high) / 2;
751 offset = p + mid * item_size;
752
753 if (!map_token || offset < map_start ||
754 (offset + sizeof(struct btrfs_disk_key)) >
755 map_start + map_len) {
756 if (map_token) {
757 unmap_extent_buffer(eb, map_token, KM_USER0);
758 map_token = NULL;
759 }
760
761 err = map_private_extent_buffer(eb, offset,
762 sizeof(struct btrfs_disk_key),
763 &map_token, &kaddr,
764 &map_start, &map_len, KM_USER0);
765
766 if (!err) {
767 tmp = (struct btrfs_disk_key *)(kaddr + offset -
768 map_start);
769 } else {
770 read_extent_buffer(eb, &unaligned,
771 offset, sizeof(unaligned));
772 tmp = &unaligned;
773 }
774
775 } else {
776 tmp = (struct btrfs_disk_key *)(kaddr + offset -
777 map_start);
778 }
779 ret = comp_keys(tmp, key);
780
781 if (ret < 0)
782 low = mid + 1;
783 else if (ret > 0)
784 high = mid;
785 else {
786 *slot = mid;
787 if (map_token)
788 unmap_extent_buffer(eb, map_token, KM_USER0);
789 return 0;
790 }
791 }
792 *slot = low;
793 if (map_token)
794 unmap_extent_buffer(eb, map_token, KM_USER0);
795 return 1;
796}
797
798/*
799 * simple bin_search frontend that does the right thing for
800 * leaves vs nodes
801 */
802static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
803 int level, int *slot)
804{
805 if (level == 0) {
806 return generic_bin_search(eb,
807 offsetof(struct btrfs_leaf, items),
808 sizeof(struct btrfs_item),
809 key, btrfs_header_nritems(eb),
810 slot);
811 } else {
812 return generic_bin_search(eb,
813 offsetof(struct btrfs_node, ptrs),
814 sizeof(struct btrfs_key_ptr),
815 key, btrfs_header_nritems(eb),
816 slot);
817 }
818 return -1;
819}
820
821/* given a node and slot number, this reads the blocks it points to. The
822 * extent buffer is returned with a reference taken (but unlocked).
823 * NULL is returned on error.
824 */
825static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
826 struct extent_buffer *parent, int slot)
827{
828 int level = btrfs_header_level(parent);
829 if (slot < 0)
830 return NULL;
831 if (slot >= btrfs_header_nritems(parent))
832 return NULL;
833
834 BUG_ON(level == 0);
835
836 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
837 btrfs_level_size(root, level - 1),
838 btrfs_node_ptr_generation(parent, slot));
839}
840
841/*
842 * node level balancing, used to make sure nodes are in proper order for
843 * item deletion. We balance from the top down, so we have to make sure
844 * that a deletion won't leave an node completely empty later on.
845 */
846static noinline int balance_level(struct btrfs_trans_handle *trans,
847 struct btrfs_root *root,
848 struct btrfs_path *path, int level)
849{
850 struct extent_buffer *right = NULL;
851 struct extent_buffer *mid;
852 struct extent_buffer *left = NULL;
853 struct extent_buffer *parent = NULL;
854 int ret = 0;
855 int wret;
856 int pslot;
857 int orig_slot = path->slots[level];
858 int err_on_enospc = 0;
859 u64 orig_ptr;
860
861 if (level == 0)
862 return 0;
863
864 mid = path->nodes[level];
865 WARN_ON(!path->locks[level]);
866 WARN_ON(btrfs_header_generation(mid) != trans->transid);
867
868 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
869
870 if (level < BTRFS_MAX_LEVEL - 1)
871 parent = path->nodes[level + 1];
872 pslot = path->slots[level + 1];
873
874 /*
875 * deal with the case where there is only one pointer in the root
876 * by promoting the node below to a root
877 */
878 if (!parent) {
879 struct extent_buffer *child;
880
881 if (btrfs_header_nritems(mid) != 1)
882 return 0;
883
884 /* promote the child to a root */
885 child = read_node_slot(root, mid, 0);
886 btrfs_tree_lock(child);
887 BUG_ON(!child);
888 ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 0);
889 BUG_ON(ret);
890
891 spin_lock(&root->node_lock);
892 root->node = child;
893 spin_unlock(&root->node_lock);
894
895 ret = btrfs_update_extent_ref(trans, root, child->start,
896 mid->start, child->start,
897 root->root_key.objectid,
898 trans->transid, level - 1);
899 BUG_ON(ret);
900
901 add_root_to_dirty_list(root);
902 btrfs_tree_unlock(child);
903 path->locks[level] = 0;
904 path->nodes[level] = NULL;
905 clean_tree_block(trans, root, mid);
906 btrfs_tree_unlock(mid);
907 /* once for the path */
908 free_extent_buffer(mid);
909 ret = btrfs_free_extent(trans, root, mid->start, mid->len,
910 mid->start, root->root_key.objectid,
911 btrfs_header_generation(mid),
912 level, 1);
913 /* once for the root ptr */
914 free_extent_buffer(mid);
915 return ret;
916 }
917 if (btrfs_header_nritems(mid) >
918 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
919 return 0;
920
921 if (btrfs_header_nritems(mid) < 2)
922 err_on_enospc = 1;
923
924 left = read_node_slot(root, parent, pslot - 1);
925 if (left) {
926 btrfs_tree_lock(left);
927 wret = btrfs_cow_block(trans, root, left,
928 parent, pslot - 1, &left, 0);
929 if (wret) {
930 ret = wret;
931 goto enospc;
932 }
933 }
934 right = read_node_slot(root, parent, pslot + 1);
935 if (right) {
936 btrfs_tree_lock(right);
937 wret = btrfs_cow_block(trans, root, right,
938 parent, pslot + 1, &right, 0);
939 if (wret) {
940 ret = wret;
941 goto enospc;
942 }
943 }
944
945 /* first, try to make some room in the middle buffer */
946 if (left) {
947 orig_slot += btrfs_header_nritems(left);
948 wret = push_node_left(trans, root, left, mid, 1);
949 if (wret < 0)
950 ret = wret;
951 if (btrfs_header_nritems(mid) < 2)
952 err_on_enospc = 1;
953 }
954
955 /*
956 * then try to empty the right most buffer into the middle
957 */
958 if (right) {
959 wret = push_node_left(trans, root, mid, right, 1);
960 if (wret < 0 && wret != -ENOSPC)
961 ret = wret;
962 if (btrfs_header_nritems(right) == 0) {
963 u64 bytenr = right->start;
964 u64 generation = btrfs_header_generation(parent);
965 u32 blocksize = right->len;
966
967 clean_tree_block(trans, root, right);
968 btrfs_tree_unlock(right);
969 free_extent_buffer(right);
970 right = NULL;
971 wret = del_ptr(trans, root, path, level + 1, pslot +
972 1);
973 if (wret)
974 ret = wret;
975 wret = btrfs_free_extent(trans, root, bytenr,
976 blocksize, parent->start,
977 btrfs_header_owner(parent),
978 generation, level, 1);
979 if (wret)
980 ret = wret;
981 } else {
982 struct btrfs_disk_key right_key;
983 btrfs_node_key(right, &right_key, 0);
984 btrfs_set_node_key(parent, &right_key, pslot + 1);
985 btrfs_mark_buffer_dirty(parent);
986 }
987 }
988 if (btrfs_header_nritems(mid) == 1) {
989 /*
990 * we're not allowed to leave a node with one item in the
991 * tree during a delete. A deletion from lower in the tree
992 * could try to delete the only pointer in this node.
993 * So, pull some keys from the left.
994 * There has to be a left pointer at this point because
995 * otherwise we would have pulled some pointers from the
996 * right
997 */
998 BUG_ON(!left);
999 wret = balance_node_right(trans, root, mid, left);
1000 if (wret < 0) {
1001 ret = wret;
1002 goto enospc;
1003 }
1004 if (wret == 1) {
1005 wret = push_node_left(trans, root, left, mid, 1);
1006 if (wret < 0)
1007 ret = wret;
1008 }
1009 BUG_ON(wret == 1);
1010 }
1011 if (btrfs_header_nritems(mid) == 0) {
1012 /* we've managed to empty the middle node, drop it */
1013 u64 root_gen = btrfs_header_generation(parent);
1014 u64 bytenr = mid->start;
1015 u32 blocksize = mid->len;
1016
1017 clean_tree_block(trans, root, mid);
1018 btrfs_tree_unlock(mid);
1019 free_extent_buffer(mid);
1020 mid = NULL;
1021 wret = del_ptr(trans, root, path, level + 1, pslot);
1022 if (wret)
1023 ret = wret;
1024 wret = btrfs_free_extent(trans, root, bytenr, blocksize,
1025 parent->start,
1026 btrfs_header_owner(parent),
1027 root_gen, level, 1);
1028 if (wret)
1029 ret = wret;
1030 } else {
1031 /* update the parent key to reflect our changes */
1032 struct btrfs_disk_key mid_key;
1033 btrfs_node_key(mid, &mid_key, 0);
1034 btrfs_set_node_key(parent, &mid_key, pslot);
1035 btrfs_mark_buffer_dirty(parent);
1036 }
1037
1038 /* update the path */
1039 if (left) {
1040 if (btrfs_header_nritems(left) > orig_slot) {
1041 extent_buffer_get(left);
1042 /* left was locked after cow */
1043 path->nodes[level] = left;
1044 path->slots[level + 1] -= 1;
1045 path->slots[level] = orig_slot;
1046 if (mid) {
1047 btrfs_tree_unlock(mid);
1048 free_extent_buffer(mid);
1049 }
1050 } else {
1051 orig_slot -= btrfs_header_nritems(left);
1052 path->slots[level] = orig_slot;
1053 }
1054 }
1055 /* double check we haven't messed things up */
1056 check_block(root, path, level);
1057 if (orig_ptr !=
1058 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1059 BUG();
1060enospc:
1061 if (right) {
1062 btrfs_tree_unlock(right);
1063 free_extent_buffer(right);
1064 }
1065 if (left) {
1066 if (path->nodes[level] != left)
1067 btrfs_tree_unlock(left);
1068 free_extent_buffer(left);
1069 }
1070 return ret;
1071}
1072
1073/* Node balancing for insertion. Here we only split or push nodes around
1074 * when they are completely full. This is also done top down, so we
1075 * have to be pessimistic.
1076 */
1077static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1078 struct btrfs_root *root,
1079 struct btrfs_path *path, int level)
1080{
1081 struct extent_buffer *right = NULL;
1082 struct extent_buffer *mid;
1083 struct extent_buffer *left = NULL;
1084 struct extent_buffer *parent = NULL;
1085 int ret = 0;
1086 int wret;
1087 int pslot;
1088 int orig_slot = path->slots[level];
1089 u64 orig_ptr;
1090
1091 if (level == 0)
1092 return 1;
1093
1094 mid = path->nodes[level];
1095 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1096 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1097
1098 if (level < BTRFS_MAX_LEVEL - 1)
1099 parent = path->nodes[level + 1];
1100 pslot = path->slots[level + 1];
1101
1102 if (!parent)
1103 return 1;
1104
1105 left = read_node_slot(root, parent, pslot - 1);
1106
1107 /* first, try to make some room in the middle buffer */
1108 if (left) {
1109 u32 left_nr;
1110
1111 btrfs_tree_lock(left);
1112 left_nr = btrfs_header_nritems(left);
1113 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1114 wret = 1;
1115 } else {
1116 ret = btrfs_cow_block(trans, root, left, parent,
1117 pslot - 1, &left, 0);
1118 if (ret)
1119 wret = 1;
1120 else {
1121 wret = push_node_left(trans, root,
1122 left, mid, 0);
1123 }
1124 }
1125 if (wret < 0)
1126 ret = wret;
1127 if (wret == 0) {
1128 struct btrfs_disk_key disk_key;
1129 orig_slot += left_nr;
1130 btrfs_node_key(mid, &disk_key, 0);
1131 btrfs_set_node_key(parent, &disk_key, pslot);
1132 btrfs_mark_buffer_dirty(parent);
1133 if (btrfs_header_nritems(left) > orig_slot) {
1134 path->nodes[level] = left;
1135 path->slots[level + 1] -= 1;
1136 path->slots[level] = orig_slot;
1137 btrfs_tree_unlock(mid);
1138 free_extent_buffer(mid);
1139 } else {
1140 orig_slot -=
1141 btrfs_header_nritems(left);
1142 path->slots[level] = orig_slot;
1143 btrfs_tree_unlock(left);
1144 free_extent_buffer(left);
1145 }
1146 return 0;
1147 }
1148 btrfs_tree_unlock(left);
1149 free_extent_buffer(left);
1150 }
1151 right = read_node_slot(root, parent, pslot + 1);
1152
1153 /*
1154 * then try to empty the right most buffer into the middle
1155 */
1156 if (right) {
1157 u32 right_nr;
1158 btrfs_tree_lock(right);
1159 right_nr = btrfs_header_nritems(right);
1160 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1161 wret = 1;
1162 } else {
1163 ret = btrfs_cow_block(trans, root, right,
1164 parent, pslot + 1,
1165 &right, 0);
1166 if (ret)
1167 wret = 1;
1168 else {
1169 wret = balance_node_right(trans, root,
1170 right, mid);
1171 }
1172 }
1173 if (wret < 0)
1174 ret = wret;
1175 if (wret == 0) {
1176 struct btrfs_disk_key disk_key;
1177
1178 btrfs_node_key(right, &disk_key, 0);
1179 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1180 btrfs_mark_buffer_dirty(parent);
1181
1182 if (btrfs_header_nritems(mid) <= orig_slot) {
1183 path->nodes[level] = right;
1184 path->slots[level + 1] += 1;
1185 path->slots[level] = orig_slot -
1186 btrfs_header_nritems(mid);
1187 btrfs_tree_unlock(mid);
1188 free_extent_buffer(mid);
1189 } else {
1190 btrfs_tree_unlock(right);
1191 free_extent_buffer(right);
1192 }
1193 return 0;
1194 }
1195 btrfs_tree_unlock(right);
1196 free_extent_buffer(right);
1197 }
1198 return 1;
1199}
1200
1201/*
1202 * readahead one full node of leaves, finding things that are close
1203 * to the block in 'slot', and triggering ra on them.
1204 */
1205static noinline void reada_for_search(struct btrfs_root *root,
1206 struct btrfs_path *path,
1207 int level, int slot, u64 objectid)
1208{
1209 struct extent_buffer *node;
1210 struct btrfs_disk_key disk_key;
1211 u32 nritems;
1212 u64 search;
1213 u64 lowest_read;
1214 u64 highest_read;
1215 u64 nread = 0;
1216 int direction = path->reada;
1217 struct extent_buffer *eb;
1218 u32 nr;
1219 u32 blocksize;
1220 u32 nscan = 0;
1221
1222 if (level != 1)
1223 return;
1224
1225 if (!path->nodes[level])
1226 return;
1227
1228 node = path->nodes[level];
1229
1230 search = btrfs_node_blockptr(node, slot);
1231 blocksize = btrfs_level_size(root, level - 1);
1232 eb = btrfs_find_tree_block(root, search, blocksize);
1233 if (eb) {
1234 free_extent_buffer(eb);
1235 return;
1236 }
1237
1238 highest_read = search;
1239 lowest_read = search;
1240
1241 nritems = btrfs_header_nritems(node);
1242 nr = slot;
1243 while (1) {
1244 if (direction < 0) {
1245 if (nr == 0)
1246 break;
1247 nr--;
1248 } else if (direction > 0) {
1249 nr++;
1250 if (nr >= nritems)
1251 break;
1252 }
1253 if (path->reada < 0 && objectid) {
1254 btrfs_node_key(node, &disk_key, nr);
1255 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1256 break;
1257 }
1258 search = btrfs_node_blockptr(node, nr);
1259 if ((search >= lowest_read && search <= highest_read) ||
1260 (search < lowest_read && lowest_read - search <= 16384) ||
1261 (search > highest_read && search - highest_read <= 16384)) {
1262 readahead_tree_block(root, search, blocksize,
1263 btrfs_node_ptr_generation(node, nr));
1264 nread += blocksize;
1265 }
1266 nscan++;
1267 if (path->reada < 2 && (nread > (64 * 1024) || nscan > 32))
1268 break;
1269
1270 if (nread > (256 * 1024) || nscan > 128)
1271 break;
1272
1273 if (search < lowest_read)
1274 lowest_read = search;
1275 if (search > highest_read)
1276 highest_read = search;
1277 }
1278}
1279
1280/*
1281 * when we walk down the tree, it is usually safe to unlock the higher layers
1282 * in the tree. The exceptions are when our path goes through slot 0, because
1283 * operations on the tree might require changing key pointers higher up in the
1284 * tree.
1285 *
1286 * callers might also have set path->keep_locks, which tells this code to keep
1287 * the lock if the path points to the last slot in the block. This is part of
1288 * walking through the tree, and selecting the next slot in the higher block.
1289 *
1290 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1291 * if lowest_unlock is 1, level 0 won't be unlocked
1292 */
1293static noinline void unlock_up(struct btrfs_path *path, int level,
1294 int lowest_unlock)
1295{
1296 int i;
1297 int skip_level = level;
1298 int no_skips = 0;
1299 struct extent_buffer *t;
1300
1301 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1302 if (!path->nodes[i])
1303 break;
1304 if (!path->locks[i])
1305 break;
1306 if (!no_skips && path->slots[i] == 0) {
1307 skip_level = i + 1;
1308 continue;
1309 }
1310 if (!no_skips && path->keep_locks) {
1311 u32 nritems;
1312 t = path->nodes[i];
1313 nritems = btrfs_header_nritems(t);
1314 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1315 skip_level = i + 1;
1316 continue;
1317 }
1318 }
1319 if (skip_level < i && i >= lowest_unlock)
1320 no_skips = 1;
1321
1322 t = path->nodes[i];
1323 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
1324 btrfs_tree_unlock(t);
1325 path->locks[i] = 0;
1326 }
1327 }
1328}
1329
1330/*
1331 * look for key in the tree. path is filled in with nodes along the way
1332 * if key is found, we return zero and you can find the item in the leaf
1333 * level of the path (level 0)
1334 *
1335 * If the key isn't found, the path points to the slot where it should
1336 * be inserted, and 1 is returned. If there are other errors during the
1337 * search a negative error number is returned.
1338 *
1339 * if ins_len > 0, nodes and leaves will be split as we walk down the
1340 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1341 * possible)
1342 */
1343int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1344 *root, struct btrfs_key *key, struct btrfs_path *p, int
1345 ins_len, int cow)
1346{
1347 struct extent_buffer *b;
1348 struct extent_buffer *tmp;
1349 int slot;
1350 int ret;
1351 int level;
1352 int should_reada = p->reada;
1353 int lowest_unlock = 1;
1354 int blocksize;
1355 u8 lowest_level = 0;
1356 u64 blocknr;
1357 u64 gen;
1358 struct btrfs_key prealloc_block;
1359
1360 lowest_level = p->lowest_level;
1361 WARN_ON(lowest_level && ins_len > 0);
1362 WARN_ON(p->nodes[0] != NULL);
1363
1364 if (ins_len < 0)
1365 lowest_unlock = 2;
1366
1367 prealloc_block.objectid = 0;
1368
1369again:
1370 if (p->skip_locking)
1371 b = btrfs_root_node(root);
1372 else
1373 b = btrfs_lock_root_node(root);
1374
1375 while (b) {
1376 level = btrfs_header_level(b);
1377
1378 /*
1379 * setup the path here so we can release it under lock
1380 * contention with the cow code
1381 */
1382 p->nodes[level] = b;
1383 if (!p->skip_locking)
1384 p->locks[level] = 1;
1385
1386 if (cow) {
1387 int wret;
1388
1389 /* is a cow on this block not required */
1390 spin_lock(&root->fs_info->hash_lock);
1391 if (btrfs_header_generation(b) == trans->transid &&
1392 btrfs_header_owner(b) == root->root_key.objectid &&
1393 !btrfs_header_flag(b, BTRFS_HEADER_FLAG_WRITTEN)) {
1394 spin_unlock(&root->fs_info->hash_lock);
1395 goto cow_done;
1396 }
1397 spin_unlock(&root->fs_info->hash_lock);
1398
1399 /* ok, we have to cow, is our old prealloc the right
1400 * size?
1401 */
1402 if (prealloc_block.objectid &&
1403 prealloc_block.offset != b->len) {
1404 btrfs_free_reserved_extent(root,
1405 prealloc_block.objectid,
1406 prealloc_block.offset);
1407 prealloc_block.objectid = 0;
1408 }
1409
1410 /*
1411 * for higher level blocks, try not to allocate blocks
1412 * with the block and the parent locks held.
1413 */
1414 if (level > 1 && !prealloc_block.objectid &&
1415 btrfs_path_lock_waiting(p, level)) {
1416 u32 size = b->len;
1417 u64 hint = b->start;
1418
1419 btrfs_release_path(root, p);
1420 ret = btrfs_reserve_extent(trans, root,
1421 size, size, 0,
1422 hint, (u64)-1,
1423 &prealloc_block, 0);
1424 BUG_ON(ret);
1425 goto again;
1426 }
1427
1428 wret = btrfs_cow_block(trans, root, b,
1429 p->nodes[level + 1],
1430 p->slots[level + 1],
1431 &b, prealloc_block.objectid);
1432 prealloc_block.objectid = 0;
1433 if (wret) {
1434 free_extent_buffer(b);
1435 ret = wret;
1436 goto done;
1437 }
1438 }
1439cow_done:
1440 BUG_ON(!cow && ins_len);
1441 if (level != btrfs_header_level(b))
1442 WARN_ON(1);
1443 level = btrfs_header_level(b);
1444
1445 p->nodes[level] = b;
1446 if (!p->skip_locking)
1447 p->locks[level] = 1;
1448
1449 ret = check_block(root, p, level);
1450 if (ret) {
1451 ret = -1;
1452 goto done;
1453 }
1454
1455 ret = bin_search(b, key, level, &slot);
1456 if (level != 0) {
1457 if (ret && slot > 0)
1458 slot -= 1;
1459 p->slots[level] = slot;
1460 if ((p->search_for_split || ins_len > 0) &&
1461 btrfs_header_nritems(b) >=
1462 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
1463 int sret = split_node(trans, root, p, level);
1464 BUG_ON(sret > 0);
1465 if (sret) {
1466 ret = sret;
1467 goto done;
1468 }
1469 b = p->nodes[level];
1470 slot = p->slots[level];
1471 } else if (ins_len < 0) {
1472 int sret = balance_level(trans, root, p,
1473 level);
1474 if (sret) {
1475 ret = sret;
1476 goto done;
1477 }
1478 b = p->nodes[level];
1479 if (!b) {
1480 btrfs_release_path(NULL, p);
1481 goto again;
1482 }
1483 slot = p->slots[level];
1484 BUG_ON(btrfs_header_nritems(b) == 1);
1485 }
1486 unlock_up(p, level, lowest_unlock);
1487
1488 /* this is only true while dropping a snapshot */
1489 if (level == lowest_level) {
1490 ret = 0;
1491 goto done;
1492 }
1493
1494 blocknr = btrfs_node_blockptr(b, slot);
1495 gen = btrfs_node_ptr_generation(b, slot);
1496 blocksize = btrfs_level_size(root, level - 1);
1497
1498 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
1499 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
1500 b = tmp;
1501 } else {
1502 /*
1503 * reduce lock contention at high levels
1504 * of the btree by dropping locks before
1505 * we read.
1506 */
1507 if (level > 1) {
1508 btrfs_release_path(NULL, p);
1509 if (tmp)
1510 free_extent_buffer(tmp);
1511 if (should_reada)
1512 reada_for_search(root, p,
1513 level, slot,
1514 key->objectid);
1515
1516 tmp = read_tree_block(root, blocknr,
1517 blocksize, gen);
1518 if (tmp)
1519 free_extent_buffer(tmp);
1520 goto again;
1521 } else {
1522 if (tmp)
1523 free_extent_buffer(tmp);
1524 if (should_reada)
1525 reada_for_search(root, p,
1526 level, slot,
1527 key->objectid);
1528 b = read_node_slot(root, b, slot);
1529 }
1530 }
1531 if (!p->skip_locking)
1532 btrfs_tree_lock(b);
1533 } else {
1534 p->slots[level] = slot;
1535 if (ins_len > 0 &&
1536 btrfs_leaf_free_space(root, b) < ins_len) {
1537 int sret = split_leaf(trans, root, key,
1538 p, ins_len, ret == 0);
1539 BUG_ON(sret > 0);
1540 if (sret) {
1541 ret = sret;
1542 goto done;
1543 }
1544 }
1545 if (!p->search_for_split)
1546 unlock_up(p, level, lowest_unlock);
1547 goto done;
1548 }
1549 }
1550 ret = 1;
1551done:
1552 if (prealloc_block.objectid) {
1553 btrfs_free_reserved_extent(root,
1554 prealloc_block.objectid,
1555 prealloc_block.offset);
1556 }
1557
1558 return ret;
1559}
1560
1561int btrfs_merge_path(struct btrfs_trans_handle *trans,
1562 struct btrfs_root *root,
1563 struct btrfs_key *node_keys,
1564 u64 *nodes, int lowest_level)
1565{
1566 struct extent_buffer *eb;
1567 struct extent_buffer *parent;
1568 struct btrfs_key key;
1569 u64 bytenr;
1570 u64 generation;
1571 u32 blocksize;
1572 int level;
1573 int slot;
1574 int key_match;
1575 int ret;
1576
1577 eb = btrfs_lock_root_node(root);
1578 ret = btrfs_cow_block(trans, root, eb, NULL, 0, &eb, 0);
1579 BUG_ON(ret);
1580
1581 parent = eb;
1582 while (1) {
1583 level = btrfs_header_level(parent);
1584 if (level == 0 || level <= lowest_level)
1585 break;
1586
1587 ret = bin_search(parent, &node_keys[lowest_level], level,
1588 &slot);
1589 if (ret && slot > 0)
1590 slot--;
1591
1592 bytenr = btrfs_node_blockptr(parent, slot);
1593 if (nodes[level - 1] == bytenr)
1594 break;
1595
1596 blocksize = btrfs_level_size(root, level - 1);
1597 generation = btrfs_node_ptr_generation(parent, slot);
1598 btrfs_node_key_to_cpu(eb, &key, slot);
1599 key_match = !memcmp(&key, &node_keys[level - 1], sizeof(key));
1600
1601 if (generation == trans->transid) {
1602 eb = read_tree_block(root, bytenr, blocksize,
1603 generation);
1604 btrfs_tree_lock(eb);
1605 }
1606
1607 /*
1608 * if node keys match and node pointer hasn't been modified
1609 * in the running transaction, we can merge the path. for
1610 * blocks owened by reloc trees, the node pointer check is
1611 * skipped, this is because these blocks are fully controlled
1612 * by the space balance code, no one else can modify them.
1613 */
1614 if (!nodes[level - 1] || !key_match ||
1615 (generation == trans->transid &&
1616 btrfs_header_owner(eb) != BTRFS_TREE_RELOC_OBJECTID)) {
1617 if (level == 1 || level == lowest_level + 1) {
1618 if (generation == trans->transid) {
1619 btrfs_tree_unlock(eb);
1620 free_extent_buffer(eb);
1621 }
1622 break;
1623 }
1624
1625 if (generation != trans->transid) {
1626 eb = read_tree_block(root, bytenr, blocksize,
1627 generation);
1628 btrfs_tree_lock(eb);
1629 }
1630
1631 ret = btrfs_cow_block(trans, root, eb, parent, slot,
1632 &eb, 0);
1633 BUG_ON(ret);
1634
1635 if (root->root_key.objectid ==
1636 BTRFS_TREE_RELOC_OBJECTID) {
1637 if (!nodes[level - 1]) {
1638 nodes[level - 1] = eb->start;
1639 memcpy(&node_keys[level - 1], &key,
1640 sizeof(node_keys[0]));
1641 } else {
1642 WARN_ON(1);
1643 }
1644 }
1645
1646 btrfs_tree_unlock(parent);
1647 free_extent_buffer(parent);
1648 parent = eb;
1649 continue;
1650 }
1651
1652 btrfs_set_node_blockptr(parent, slot, nodes[level - 1]);
1653 btrfs_set_node_ptr_generation(parent, slot, trans->transid);
1654 btrfs_mark_buffer_dirty(parent);
1655
1656 ret = btrfs_inc_extent_ref(trans, root,
1657 nodes[level - 1],
1658 blocksize, parent->start,
1659 btrfs_header_owner(parent),
1660 btrfs_header_generation(parent),
1661 level - 1);
1662 BUG_ON(ret);
1663
1664 /*
1665 * If the block was created in the running transaction,
1666 * it's possible this is the last reference to it, so we
1667 * should drop the subtree.
1668 */
1669 if (generation == trans->transid) {
1670 ret = btrfs_drop_subtree(trans, root, eb, parent);
1671 BUG_ON(ret);
1672 btrfs_tree_unlock(eb);
1673 free_extent_buffer(eb);
1674 } else {
1675 ret = btrfs_free_extent(trans, root, bytenr,
1676 blocksize, parent->start,
1677 btrfs_header_owner(parent),
1678 btrfs_header_generation(parent),
1679 level - 1, 1);
1680 BUG_ON(ret);
1681 }
1682 break;
1683 }
1684 btrfs_tree_unlock(parent);
1685 free_extent_buffer(parent);
1686 return 0;
1687}
1688
1689/*
1690 * adjust the pointers going up the tree, starting at level
1691 * making sure the right key of each node is points to 'key'.
1692 * This is used after shifting pointers to the left, so it stops
1693 * fixing up pointers when a given leaf/node is not in slot 0 of the
1694 * higher levels
1695 *
1696 * If this fails to write a tree block, it returns -1, but continues
1697 * fixing up the blocks in ram so the tree is consistent.
1698 */
1699static int fixup_low_keys(struct btrfs_trans_handle *trans,
1700 struct btrfs_root *root, struct btrfs_path *path,
1701 struct btrfs_disk_key *key, int level)
1702{
1703 int i;
1704 int ret = 0;
1705 struct extent_buffer *t;
1706
1707 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1708 int tslot = path->slots[i];
1709 if (!path->nodes[i])
1710 break;
1711 t = path->nodes[i];
1712 btrfs_set_node_key(t, key, tslot);
1713 btrfs_mark_buffer_dirty(path->nodes[i]);
1714 if (tslot != 0)
1715 break;
1716 }
1717 return ret;
1718}
1719
1720/*
1721 * update item key.
1722 *
1723 * This function isn't completely safe. It's the caller's responsibility
1724 * that the new key won't break the order
1725 */
1726int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1727 struct btrfs_root *root, struct btrfs_path *path,
1728 struct btrfs_key *new_key)
1729{
1730 struct btrfs_disk_key disk_key;
1731 struct extent_buffer *eb;
1732 int slot;
1733
1734 eb = path->nodes[0];
1735 slot = path->slots[0];
1736 if (slot > 0) {
1737 btrfs_item_key(eb, &disk_key, slot - 1);
1738 if (comp_keys(&disk_key, new_key) >= 0)
1739 return -1;
1740 }
1741 if (slot < btrfs_header_nritems(eb) - 1) {
1742 btrfs_item_key(eb, &disk_key, slot + 1);
1743 if (comp_keys(&disk_key, new_key) <= 0)
1744 return -1;
1745 }
1746
1747 btrfs_cpu_key_to_disk(&disk_key, new_key);
1748 btrfs_set_item_key(eb, &disk_key, slot);
1749 btrfs_mark_buffer_dirty(eb);
1750 if (slot == 0)
1751 fixup_low_keys(trans, root, path, &disk_key, 1);
1752 return 0;
1753}
1754
1755/*
1756 * try to push data from one node into the next node left in the
1757 * tree.
1758 *
1759 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1760 * error, and > 0 if there was no room in the left hand block.
1761 */
1762static int push_node_left(struct btrfs_trans_handle *trans,
1763 struct btrfs_root *root, struct extent_buffer *dst,
1764 struct extent_buffer *src, int empty)
1765{
1766 int push_items = 0;
1767 int src_nritems;
1768 int dst_nritems;
1769 int ret = 0;
1770
1771 src_nritems = btrfs_header_nritems(src);
1772 dst_nritems = btrfs_header_nritems(dst);
1773 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
1774 WARN_ON(btrfs_header_generation(src) != trans->transid);
1775 WARN_ON(btrfs_header_generation(dst) != trans->transid);
1776
1777 if (!empty && src_nritems <= 8)
1778 return 1;
1779
1780 if (push_items <= 0)
1781 return 1;
1782
1783 if (empty) {
1784 push_items = min(src_nritems, push_items);
1785 if (push_items < src_nritems) {
1786 /* leave at least 8 pointers in the node if
1787 * we aren't going to empty it
1788 */
1789 if (src_nritems - push_items < 8) {
1790 if (push_items <= 8)
1791 return 1;
1792 push_items -= 8;
1793 }
1794 }
1795 } else
1796 push_items = min(src_nritems - 8, push_items);
1797
1798 copy_extent_buffer(dst, src,
1799 btrfs_node_key_ptr_offset(dst_nritems),
1800 btrfs_node_key_ptr_offset(0),
1801 push_items * sizeof(struct btrfs_key_ptr));
1802
1803 if (push_items < src_nritems) {
1804 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
1805 btrfs_node_key_ptr_offset(push_items),
1806 (src_nritems - push_items) *
1807 sizeof(struct btrfs_key_ptr));
1808 }
1809 btrfs_set_header_nritems(src, src_nritems - push_items);
1810 btrfs_set_header_nritems(dst, dst_nritems + push_items);
1811 btrfs_mark_buffer_dirty(src);
1812 btrfs_mark_buffer_dirty(dst);
1813
1814 ret = btrfs_update_ref(trans, root, src, dst, dst_nritems, push_items);
1815 BUG_ON(ret);
1816
1817 return ret;
1818}
1819
1820/*
1821 * try to push data from one node into the next node right in the
1822 * tree.
1823 *
1824 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
1825 * error, and > 0 if there was no room in the right hand block.
1826 *
1827 * this will only push up to 1/2 the contents of the left node over
1828 */
1829static int balance_node_right(struct btrfs_trans_handle *trans,
1830 struct btrfs_root *root,
1831 struct extent_buffer *dst,
1832 struct extent_buffer *src)
1833{
1834 int push_items = 0;
1835 int max_push;
1836 int src_nritems;
1837 int dst_nritems;
1838 int ret = 0;
1839
1840 WARN_ON(btrfs_header_generation(src) != trans->transid);
1841 WARN_ON(btrfs_header_generation(dst) != trans->transid);
1842
1843 src_nritems = btrfs_header_nritems(src);
1844 dst_nritems = btrfs_header_nritems(dst);
1845 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
1846 if (push_items <= 0)
1847 return 1;
1848
1849 if (src_nritems < 4)
1850 return 1;
1851
1852 max_push = src_nritems / 2 + 1;
1853 /* don't try to empty the node */
1854 if (max_push >= src_nritems)
1855 return 1;
1856
1857 if (max_push < push_items)
1858 push_items = max_push;
1859
1860 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
1861 btrfs_node_key_ptr_offset(0),
1862 (dst_nritems) *
1863 sizeof(struct btrfs_key_ptr));
1864
1865 copy_extent_buffer(dst, src,
1866 btrfs_node_key_ptr_offset(0),
1867 btrfs_node_key_ptr_offset(src_nritems - push_items),
1868 push_items * sizeof(struct btrfs_key_ptr));
1869
1870 btrfs_set_header_nritems(src, src_nritems - push_items);
1871 btrfs_set_header_nritems(dst, dst_nritems + push_items);
1872
1873 btrfs_mark_buffer_dirty(src);
1874 btrfs_mark_buffer_dirty(dst);
1875
1876 ret = btrfs_update_ref(trans, root, src, dst, 0, push_items);
1877 BUG_ON(ret);
1878
1879 return ret;
1880}
1881
1882/*
1883 * helper function to insert a new root level in the tree.
1884 * A new node is allocated, and a single item is inserted to
1885 * point to the existing root
1886 *
1887 * returns zero on success or < 0 on failure.
1888 */
1889static noinline int insert_new_root(struct btrfs_trans_handle *trans,
1890 struct btrfs_root *root,
1891 struct btrfs_path *path, int level)
1892{
1893 u64 lower_gen;
1894 struct extent_buffer *lower;
1895 struct extent_buffer *c;
1896 struct extent_buffer *old;
1897 struct btrfs_disk_key lower_key;
1898 int ret;
1899
1900 BUG_ON(path->nodes[level]);
1901 BUG_ON(path->nodes[level-1] != root->node);
1902
1903 lower = path->nodes[level-1];
1904 if (level == 1)
1905 btrfs_item_key(lower, &lower_key, 0);
1906 else
1907 btrfs_node_key(lower, &lower_key, 0);
1908
1909 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
1910 root->root_key.objectid, trans->transid,
1911 level, root->node->start, 0);
1912 if (IS_ERR(c))
1913 return PTR_ERR(c);
1914
1915 memset_extent_buffer(c, 0, 0, root->nodesize);
1916 btrfs_set_header_nritems(c, 1);
1917 btrfs_set_header_level(c, level);
1918 btrfs_set_header_bytenr(c, c->start);
1919 btrfs_set_header_generation(c, trans->transid);
1920 btrfs_set_header_owner(c, root->root_key.objectid);
1921
1922 write_extent_buffer(c, root->fs_info->fsid,
1923 (unsigned long)btrfs_header_fsid(c),
1924 BTRFS_FSID_SIZE);
1925
1926 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
1927 (unsigned long)btrfs_header_chunk_tree_uuid(c),
1928 BTRFS_UUID_SIZE);
1929
1930 btrfs_set_node_key(c, &lower_key, 0);
1931 btrfs_set_node_blockptr(c, 0, lower->start);
1932 lower_gen = btrfs_header_generation(lower);
1933 WARN_ON(lower_gen != trans->transid);
1934
1935 btrfs_set_node_ptr_generation(c, 0, lower_gen);
1936
1937 btrfs_mark_buffer_dirty(c);
1938
1939 spin_lock(&root->node_lock);
1940 old = root->node;
1941 root->node = c;
1942 spin_unlock(&root->node_lock);
1943
1944 ret = btrfs_update_extent_ref(trans, root, lower->start,
1945 lower->start, c->start,
1946 root->root_key.objectid,
1947 trans->transid, level - 1);
1948 BUG_ON(ret);
1949
1950 /* the super has an extra ref to root->node */
1951 free_extent_buffer(old);
1952
1953 add_root_to_dirty_list(root);
1954 extent_buffer_get(c);
1955 path->nodes[level] = c;
1956 path->locks[level] = 1;
1957 path->slots[level] = 0;
1958 return 0;
1959}
1960
1961/*
1962 * worker function to insert a single pointer in a node.
1963 * the node should have enough room for the pointer already
1964 *
1965 * slot and level indicate where you want the key to go, and
1966 * blocknr is the block the key points to.
1967 *
1968 * returns zero on success and < 0 on any error
1969 */
1970static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
1971 *root, struct btrfs_path *path, struct btrfs_disk_key
1972 *key, u64 bytenr, int slot, int level)
1973{
1974 struct extent_buffer *lower;
1975 int nritems;
1976
1977 BUG_ON(!path->nodes[level]);
1978 lower = path->nodes[level];
1979 nritems = btrfs_header_nritems(lower);
1980 if (slot > nritems)
1981 BUG();
1982 if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root))
1983 BUG();
1984 if (slot != nritems) {
1985 memmove_extent_buffer(lower,
1986 btrfs_node_key_ptr_offset(slot + 1),
1987 btrfs_node_key_ptr_offset(slot),
1988 (nritems - slot) * sizeof(struct btrfs_key_ptr));
1989 }
1990 btrfs_set_node_key(lower, key, slot);
1991 btrfs_set_node_blockptr(lower, slot, bytenr);
1992 WARN_ON(trans->transid == 0);
1993 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
1994 btrfs_set_header_nritems(lower, nritems + 1);
1995 btrfs_mark_buffer_dirty(lower);
1996 return 0;
1997}
1998
1999/*
2000 * split the node at the specified level in path in two.
2001 * The path is corrected to point to the appropriate node after the split
2002 *
2003 * Before splitting this tries to make some room in the node by pushing
2004 * left and right, if either one works, it returns right away.
2005 *
2006 * returns 0 on success and < 0 on failure
2007 */
2008static noinline int split_node(struct btrfs_trans_handle *trans,
2009 struct btrfs_root *root,
2010 struct btrfs_path *path, int level)
2011{
2012 struct extent_buffer *c;
2013 struct extent_buffer *split;
2014 struct btrfs_disk_key disk_key;
2015 int mid;
2016 int ret;
2017 int wret;
2018 u32 c_nritems;
2019
2020 c = path->nodes[level];
2021 WARN_ON(btrfs_header_generation(c) != trans->transid);
2022 if (c == root->node) {
2023 /* trying to split the root, lets make a new one */
2024 ret = insert_new_root(trans, root, path, level + 1);
2025 if (ret)
2026 return ret;
2027 } else {
2028 ret = push_nodes_for_insert(trans, root, path, level);
2029 c = path->nodes[level];
2030 if (!ret && btrfs_header_nritems(c) <
2031 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
2032 return 0;
2033 if (ret < 0)
2034 return ret;
2035 }
2036
2037 c_nritems = btrfs_header_nritems(c);
2038
2039 split = btrfs_alloc_free_block(trans, root, root->nodesize,
2040 path->nodes[level + 1]->start,
2041 root->root_key.objectid,
2042 trans->transid, level, c->start, 0);
2043 if (IS_ERR(split))
2044 return PTR_ERR(split);
2045
2046 btrfs_set_header_flags(split, btrfs_header_flags(c));
2047 btrfs_set_header_level(split, btrfs_header_level(c));
2048 btrfs_set_header_bytenr(split, split->start);
2049 btrfs_set_header_generation(split, trans->transid);
2050 btrfs_set_header_owner(split, root->root_key.objectid);
2051 btrfs_set_header_flags(split, 0);
2052 write_extent_buffer(split, root->fs_info->fsid,
2053 (unsigned long)btrfs_header_fsid(split),
2054 BTRFS_FSID_SIZE);
2055 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
2056 (unsigned long)btrfs_header_chunk_tree_uuid(split),
2057 BTRFS_UUID_SIZE);
2058
2059 mid = (c_nritems + 1) / 2;
2060
2061 copy_extent_buffer(split, c,
2062 btrfs_node_key_ptr_offset(0),
2063 btrfs_node_key_ptr_offset(mid),
2064 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2065 btrfs_set_header_nritems(split, c_nritems - mid);
2066 btrfs_set_header_nritems(c, mid);
2067 ret = 0;
2068
2069 btrfs_mark_buffer_dirty(c);
2070 btrfs_mark_buffer_dirty(split);
2071
2072 btrfs_node_key(split, &disk_key, 0);
2073 wret = insert_ptr(trans, root, path, &disk_key, split->start,
2074 path->slots[level + 1] + 1,
2075 level + 1);
2076 if (wret)
2077 ret = wret;
2078
2079 ret = btrfs_update_ref(trans, root, c, split, 0, c_nritems - mid);
2080 BUG_ON(ret);
2081
2082 if (path->slots[level] >= mid) {
2083 path->slots[level] -= mid;
2084 btrfs_tree_unlock(c);
2085 free_extent_buffer(c);
2086 path->nodes[level] = split;
2087 path->slots[level + 1] += 1;
2088 } else {
2089 btrfs_tree_unlock(split);
2090 free_extent_buffer(split);
2091 }
2092 return ret;
2093}
2094
2095/*
2096 * how many bytes are required to store the items in a leaf. start
2097 * and nr indicate which items in the leaf to check. This totals up the
2098 * space used both by the item structs and the item data
2099 */
2100static int leaf_space_used(struct extent_buffer *l, int start, int nr)
2101{
2102 int data_len;
2103 int nritems = btrfs_header_nritems(l);
2104 int end = min(nritems, start + nr) - 1;
2105
2106 if (!nr)
2107 return 0;
2108 data_len = btrfs_item_end_nr(l, start);
2109 data_len = data_len - btrfs_item_offset_nr(l, end);
2110 data_len += sizeof(struct btrfs_item) * nr;
2111 WARN_ON(data_len < 0);
2112 return data_len;
2113}
2114
2115/*
2116 * The space between the end of the leaf items and
2117 * the start of the leaf data. IOW, how much room
2118 * the leaf has left for both items and data
2119 */
2120noinline int btrfs_leaf_free_space(struct btrfs_root *root,
2121 struct extent_buffer *leaf)
2122{
2123 int nritems = btrfs_header_nritems(leaf);
2124 int ret;
2125 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
2126 if (ret < 0) {
2127 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
2128 "used %d nritems %d\n",
2129 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
2130 leaf_space_used(leaf, 0, nritems), nritems);
2131 }
2132 return ret;
2133}
2134
2135/*
2136 * push some data in the path leaf to the right, trying to free up at
2137 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2138 *
2139 * returns 1 if the push failed because the other node didn't have enough
2140 * room, 0 if everything worked out and < 0 if there were major errors.
2141 */
2142static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2143 *root, struct btrfs_path *path, int data_size,
2144 int empty)
2145{
2146 struct extent_buffer *left = path->nodes[0];
2147 struct extent_buffer *right;
2148 struct extent_buffer *upper;
2149 struct btrfs_disk_key disk_key;
2150 int slot;
2151 u32 i;
2152 int free_space;
2153 int push_space = 0;
2154 int push_items = 0;
2155 struct btrfs_item *item;
2156 u32 left_nritems;
2157 u32 nr;
2158 u32 right_nritems;
2159 u32 data_end;
2160 u32 this_item_size;
2161 int ret;
2162
2163 slot = path->slots[1];
2164 if (!path->nodes[1])
2165 return 1;
2166
2167 upper = path->nodes[1];
2168 if (slot >= btrfs_header_nritems(upper) - 1)
2169 return 1;
2170
2171 WARN_ON(!btrfs_tree_locked(path->nodes[1]));
2172
2173 right = read_node_slot(root, upper, slot + 1);
2174 btrfs_tree_lock(right);
2175 free_space = btrfs_leaf_free_space(root, right);
2176 if (free_space < data_size)
2177 goto out_unlock;
2178
2179 /* cow and double check */
2180 ret = btrfs_cow_block(trans, root, right, upper,
2181 slot + 1, &right, 0);
2182 if (ret)
2183 goto out_unlock;
2184
2185 free_space = btrfs_leaf_free_space(root, right);
2186 if (free_space < data_size)
2187 goto out_unlock;
2188
2189 left_nritems = btrfs_header_nritems(left);
2190 if (left_nritems == 0)
2191 goto out_unlock;
2192
2193 if (empty)
2194 nr = 0;
2195 else
2196 nr = 1;
2197
2198 if (path->slots[0] >= left_nritems)
2199 push_space += data_size;
2200
2201 i = left_nritems - 1;
2202 while (i >= nr) {
2203 item = btrfs_item_nr(left, i);
2204
2205 if (!empty && push_items > 0) {
2206 if (path->slots[0] > i)
2207 break;
2208 if (path->slots[0] == i) {
2209 int space = btrfs_leaf_free_space(root, left);
2210 if (space + push_space * 2 > free_space)
2211 break;
2212 }
2213 }
2214
2215 if (path->slots[0] == i)
2216 push_space += data_size;
2217
2218 if (!left->map_token) {
2219 map_extent_buffer(left, (unsigned long)item,
2220 sizeof(struct btrfs_item),
2221 &left->map_token, &left->kaddr,
2222 &left->map_start, &left->map_len,
2223 KM_USER1);
2224 }
2225
2226 this_item_size = btrfs_item_size(left, item);
2227 if (this_item_size + sizeof(*item) + push_space > free_space)
2228 break;
2229
2230 push_items++;
2231 push_space += this_item_size + sizeof(*item);
2232 if (i == 0)
2233 break;
2234 i--;
2235 }
2236 if (left->map_token) {
2237 unmap_extent_buffer(left, left->map_token, KM_USER1);
2238 left->map_token = NULL;
2239 }
2240
2241 if (push_items == 0)
2242 goto out_unlock;
2243
2244 if (!empty && push_items == left_nritems)
2245 WARN_ON(1);
2246
2247 /* push left to right */
2248 right_nritems = btrfs_header_nritems(right);
2249
2250 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
2251 push_space -= leaf_data_end(root, left);
2252
2253 /* make room in the right data area */
2254 data_end = leaf_data_end(root, right);
2255 memmove_extent_buffer(right,
2256 btrfs_leaf_data(right) + data_end - push_space,
2257 btrfs_leaf_data(right) + data_end,
2258 BTRFS_LEAF_DATA_SIZE(root) - data_end);
2259
2260 /* copy from the left data area */
2261 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
2262 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2263 btrfs_leaf_data(left) + leaf_data_end(root, left),
2264 push_space);
2265
2266 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
2267 btrfs_item_nr_offset(0),
2268 right_nritems * sizeof(struct btrfs_item));
2269
2270 /* copy the items from left to right */
2271 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
2272 btrfs_item_nr_offset(left_nritems - push_items),
2273 push_items * sizeof(struct btrfs_item));
2274
2275 /* update the item pointers */
2276 right_nritems += push_items;
2277 btrfs_set_header_nritems(right, right_nritems);
2278 push_space = BTRFS_LEAF_DATA_SIZE(root);
2279 for (i = 0; i < right_nritems; i++) {
2280 item = btrfs_item_nr(right, i);
2281 if (!right->map_token) {
2282 map_extent_buffer(right, (unsigned long)item,
2283 sizeof(struct btrfs_item),
2284 &right->map_token, &right->kaddr,
2285 &right->map_start, &right->map_len,
2286 KM_USER1);
2287 }
2288 push_space -= btrfs_item_size(right, item);
2289 btrfs_set_item_offset(right, item, push_space);
2290 }
2291
2292 if (right->map_token) {
2293 unmap_extent_buffer(right, right->map_token, KM_USER1);
2294 right->map_token = NULL;
2295 }
2296 left_nritems -= push_items;
2297 btrfs_set_header_nritems(left, left_nritems);
2298
2299 if (left_nritems)
2300 btrfs_mark_buffer_dirty(left);
2301 btrfs_mark_buffer_dirty(right);
2302
2303 ret = btrfs_update_ref(trans, root, left, right, 0, push_items);
2304 BUG_ON(ret);
2305
2306 btrfs_item_key(right, &disk_key, 0);
2307 btrfs_set_node_key(upper, &disk_key, slot + 1);
2308 btrfs_mark_buffer_dirty(upper);
2309
2310 /* then fixup the leaf pointer in the path */
2311 if (path->slots[0] >= left_nritems) {
2312 path->slots[0] -= left_nritems;
2313 if (btrfs_header_nritems(path->nodes[0]) == 0)
2314 clean_tree_block(trans, root, path->nodes[0]);
2315 btrfs_tree_unlock(path->nodes[0]);
2316 free_extent_buffer(path->nodes[0]);
2317 path->nodes[0] = right;
2318 path->slots[1] += 1;
2319 } else {
2320 btrfs_tree_unlock(right);
2321 free_extent_buffer(right);
2322 }
2323 return 0;
2324
2325out_unlock:
2326 btrfs_tree_unlock(right);
2327 free_extent_buffer(right);
2328 return 1;
2329}
2330
2331/*
2332 * push some data in the path leaf to the left, trying to free up at
2333 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2334 */
2335static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
2336 *root, struct btrfs_path *path, int data_size,
2337 int empty)
2338{
2339 struct btrfs_disk_key disk_key;
2340 struct extent_buffer *right = path->nodes[0];
2341 struct extent_buffer *left;
2342 int slot;
2343 int i;
2344 int free_space;
2345 int push_space = 0;
2346 int push_items = 0;
2347 struct btrfs_item *item;
2348 u32 old_left_nritems;
2349 u32 right_nritems;
2350 u32 nr;
2351 int ret = 0;
2352 int wret;
2353 u32 this_item_size;
2354 u32 old_left_item_size;
2355
2356 slot = path->slots[1];
2357 if (slot == 0)
2358 return 1;
2359 if (!path->nodes[1])
2360 return 1;
2361
2362 right_nritems = btrfs_header_nritems(right);
2363 if (right_nritems == 0)
2364 return 1;
2365
2366 WARN_ON(!btrfs_tree_locked(path->nodes[1]));
2367
2368 left = read_node_slot(root, path->nodes[1], slot - 1);
2369 btrfs_tree_lock(left);
2370 free_space = btrfs_leaf_free_space(root, left);
2371 if (free_space < data_size) {
2372 ret = 1;
2373 goto out;
2374 }
2375
2376 /* cow and double check */
2377 ret = btrfs_cow_block(trans, root, left,
2378 path->nodes[1], slot - 1, &left, 0);
2379 if (ret) {
2380 /* we hit -ENOSPC, but it isn't fatal here */
2381 ret = 1;
2382 goto out;
2383 }
2384
2385 free_space = btrfs_leaf_free_space(root, left);
2386 if (free_space < data_size) {
2387 ret = 1;
2388 goto out;
2389 }
2390
2391 if (empty)
2392 nr = right_nritems;
2393 else
2394 nr = right_nritems - 1;
2395
2396 for (i = 0; i < nr; i++) {
2397 item = btrfs_item_nr(right, i);
2398 if (!right->map_token) {
2399 map_extent_buffer(right, (unsigned long)item,
2400 sizeof(struct btrfs_item),
2401 &right->map_token, &right->kaddr,
2402 &right->map_start, &right->map_len,
2403 KM_USER1);
2404 }
2405
2406 if (!empty && push_items > 0) {
2407 if (path->slots[0] < i)
2408 break;
2409 if (path->slots[0] == i) {
2410 int space = btrfs_leaf_free_space(root, right);
2411 if (space + push_space * 2 > free_space)
2412 break;
2413 }
2414 }
2415
2416 if (path->slots[0] == i)
2417 push_space += data_size;
2418
2419 this_item_size = btrfs_item_size(right, item);
2420 if (this_item_size + sizeof(*item) + push_space > free_space)
2421 break;
2422
2423 push_items++;
2424 push_space += this_item_size + sizeof(*item);
2425 }
2426
2427 if (right->map_token) {
2428 unmap_extent_buffer(right, right->map_token, KM_USER1);
2429 right->map_token = NULL;
2430 }
2431
2432 if (push_items == 0) {
2433 ret = 1;
2434 goto out;
2435 }
2436 if (!empty && push_items == btrfs_header_nritems(right))
2437 WARN_ON(1);
2438
2439 /* push data from right to left */
2440 copy_extent_buffer(left, right,
2441 btrfs_item_nr_offset(btrfs_header_nritems(left)),
2442 btrfs_item_nr_offset(0),
2443 push_items * sizeof(struct btrfs_item));
2444
2445 push_space = BTRFS_LEAF_DATA_SIZE(root) -
2446 btrfs_item_offset_nr(right, push_items - 1);
2447
2448 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
2449 leaf_data_end(root, left) - push_space,
2450 btrfs_leaf_data(right) +
2451 btrfs_item_offset_nr(right, push_items - 1),
2452 push_space);
2453 old_left_nritems = btrfs_header_nritems(left);
2454 BUG_ON(old_left_nritems <= 0);
2455
2456 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
2457 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
2458 u32 ioff;
2459
2460 item = btrfs_item_nr(left, i);
2461 if (!left->map_token) {
2462 map_extent_buffer(left, (unsigned long)item,
2463 sizeof(struct btrfs_item),
2464 &left->map_token, &left->kaddr,
2465 &left->map_start, &left->map_len,
2466 KM_USER1);
2467 }
2468
2469 ioff = btrfs_item_offset(left, item);
2470 btrfs_set_item_offset(left, item,
2471 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
2472 }
2473 btrfs_set_header_nritems(left, old_left_nritems + push_items);
2474 if (left->map_token) {
2475 unmap_extent_buffer(left, left->map_token, KM_USER1);
2476 left->map_token = NULL;
2477 }
2478
2479 /* fixup right node */
2480 if (push_items > right_nritems) {
2481 printk(KERN_CRIT "push items %d nr %u\n", push_items,
2482 right_nritems);
2483 WARN_ON(1);
2484 }
2485
2486 if (push_items < right_nritems) {
2487 push_space = btrfs_item_offset_nr(right, push_items - 1) -
2488 leaf_data_end(root, right);
2489 memmove_extent_buffer(right, btrfs_leaf_data(right) +
2490 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2491 btrfs_leaf_data(right) +
2492 leaf_data_end(root, right), push_space);
2493
2494 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
2495 btrfs_item_nr_offset(push_items),
2496 (btrfs_header_nritems(right) - push_items) *
2497 sizeof(struct btrfs_item));
2498 }
2499 right_nritems -= push_items;
2500 btrfs_set_header_nritems(right, right_nritems);
2501 push_space = BTRFS_LEAF_DATA_SIZE(root);
2502 for (i = 0; i < right_nritems; i++) {
2503 item = btrfs_item_nr(right, i);
2504
2505 if (!right->map_token) {
2506 map_extent_buffer(right, (unsigned long)item,
2507 sizeof(struct btrfs_item),
2508 &right->map_token, &right->kaddr,
2509 &right->map_start, &right->map_len,
2510 KM_USER1);
2511 }
2512
2513 push_space = push_space - btrfs_item_size(right, item);
2514 btrfs_set_item_offset(right, item, push_space);
2515 }
2516 if (right->map_token) {
2517 unmap_extent_buffer(right, right->map_token, KM_USER1);
2518 right->map_token = NULL;
2519 }
2520
2521 btrfs_mark_buffer_dirty(left);
2522 if (right_nritems)
2523 btrfs_mark_buffer_dirty(right);
2524
2525 ret = btrfs_update_ref(trans, root, right, left,
2526 old_left_nritems, push_items);
2527 BUG_ON(ret);
2528
2529 btrfs_item_key(right, &disk_key, 0);
2530 wret = fixup_low_keys(trans, root, path, &disk_key, 1);
2531 if (wret)
2532 ret = wret;
2533
2534 /* then fixup the leaf pointer in the path */
2535 if (path->slots[0] < push_items) {
2536 path->slots[0] += old_left_nritems;
2537 if (btrfs_header_nritems(path->nodes[0]) == 0)
2538 clean_tree_block(trans, root, path->nodes[0]);
2539 btrfs_tree_unlock(path->nodes[0]);
2540 free_extent_buffer(path->nodes[0]);
2541 path->nodes[0] = left;
2542 path->slots[1] -= 1;
2543 } else {
2544 btrfs_tree_unlock(left);
2545 free_extent_buffer(left);
2546 path->slots[0] -= push_items;
2547 }
2548 BUG_ON(path->slots[0] < 0);
2549 return ret;
2550out:
2551 btrfs_tree_unlock(left);
2552 free_extent_buffer(left);
2553 return ret;
2554}
2555
2556/*
2557 * split the path's leaf in two, making sure there is at least data_size
2558 * available for the resulting leaf level of the path.
2559 *
2560 * returns 0 if all went well and < 0 on failure.
2561 */
2562static noinline int split_leaf(struct btrfs_trans_handle *trans,
2563 struct btrfs_root *root,
2564 struct btrfs_key *ins_key,
2565 struct btrfs_path *path, int data_size,
2566 int extend)
2567{
2568 struct extent_buffer *l;
2569 u32 nritems;
2570 int mid;
2571 int slot;
2572 struct extent_buffer *right;
2573 int data_copy_size;
2574 int rt_data_off;
2575 int i;
2576 int ret = 0;
2577 int wret;
2578 int double_split;
2579 int num_doubles = 0;
2580 struct btrfs_disk_key disk_key;
2581
2582 /* first try to make some room by pushing left and right */
2583 if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) {
2584 wret = push_leaf_right(trans, root, path, data_size, 0);
2585 if (wret < 0)
2586 return wret;
2587 if (wret) {
2588 wret = push_leaf_left(trans, root, path, data_size, 0);
2589 if (wret < 0)
2590 return wret;
2591 }
2592 l = path->nodes[0];
2593
2594 /* did the pushes work? */
2595 if (btrfs_leaf_free_space(root, l) >= data_size)
2596 return 0;
2597 }
2598
2599 if (!path->nodes[1]) {
2600 ret = insert_new_root(trans, root, path, 1);
2601 if (ret)
2602 return ret;
2603 }
2604again:
2605 double_split = 0;
2606 l = path->nodes[0];
2607 slot = path->slots[0];
2608 nritems = btrfs_header_nritems(l);
2609 mid = (nritems + 1) / 2;
2610
2611 right = btrfs_alloc_free_block(trans, root, root->leafsize,
2612 path->nodes[1]->start,
2613 root->root_key.objectid,
2614 trans->transid, 0, l->start, 0);
2615 if (IS_ERR(right)) {
2616 BUG_ON(1);
2617 return PTR_ERR(right);
2618 }
2619
2620 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
2621 btrfs_set_header_bytenr(right, right->start);
2622 btrfs_set_header_generation(right, trans->transid);
2623 btrfs_set_header_owner(right, root->root_key.objectid);
2624 btrfs_set_header_level(right, 0);
2625 write_extent_buffer(right, root->fs_info->fsid,
2626 (unsigned long)btrfs_header_fsid(right),
2627 BTRFS_FSID_SIZE);
2628
2629 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
2630 (unsigned long)btrfs_header_chunk_tree_uuid(right),
2631 BTRFS_UUID_SIZE);
2632 if (mid <= slot) {
2633 if (nritems == 1 ||
2634 leaf_space_used(l, mid, nritems - mid) + data_size >
2635 BTRFS_LEAF_DATA_SIZE(root)) {
2636 if (slot >= nritems) {
2637 btrfs_cpu_key_to_disk(&disk_key, ins_key);
2638 btrfs_set_header_nritems(right, 0);
2639 wret = insert_ptr(trans, root, path,
2640 &disk_key, right->start,
2641 path->slots[1] + 1, 1);
2642 if (wret)
2643 ret = wret;
2644
2645 btrfs_tree_unlock(path->nodes[0]);
2646 free_extent_buffer(path->nodes[0]);
2647 path->nodes[0] = right;
2648 path->slots[0] = 0;
2649 path->slots[1] += 1;
2650 btrfs_mark_buffer_dirty(right);
2651 return ret;
2652 }
2653 mid = slot;
2654 if (mid != nritems &&
2655 leaf_space_used(l, mid, nritems - mid) +
2656 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2657 double_split = 1;
2658 }
2659 }
2660 } else {
2661 if (leaf_space_used(l, 0, mid) + data_size >
2662 BTRFS_LEAF_DATA_SIZE(root)) {
2663 if (!extend && data_size && slot == 0) {
2664 btrfs_cpu_key_to_disk(&disk_key, ins_key);
2665 btrfs_set_header_nritems(right, 0);
2666 wret = insert_ptr(trans, root, path,
2667 &disk_key,
2668 right->start,
2669 path->slots[1], 1);
2670 if (wret)
2671 ret = wret;
2672 btrfs_tree_unlock(path->nodes[0]);
2673 free_extent_buffer(path->nodes[0]);
2674 path->nodes[0] = right;
2675 path->slots[0] = 0;
2676 if (path->slots[1] == 0) {
2677 wret = fixup_low_keys(trans, root,
2678 path, &disk_key, 1);
2679 if (wret)
2680 ret = wret;
2681 }
2682 btrfs_mark_buffer_dirty(right);
2683 return ret;
2684 } else if ((extend || !data_size) && slot == 0) {
2685 mid = 1;
2686 } else {
2687 mid = slot;
2688 if (mid != nritems &&
2689 leaf_space_used(l, mid, nritems - mid) +
2690 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2691 double_split = 1;
2692 }
2693 }
2694 }
2695 }
2696 nritems = nritems - mid;
2697 btrfs_set_header_nritems(right, nritems);
2698 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
2699
2700 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
2701 btrfs_item_nr_offset(mid),
2702 nritems * sizeof(struct btrfs_item));
2703
2704 copy_extent_buffer(right, l,
2705 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
2706 data_copy_size, btrfs_leaf_data(l) +
2707 leaf_data_end(root, l), data_copy_size);
2708
2709 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
2710 btrfs_item_end_nr(l, mid);
2711
2712 for (i = 0; i < nritems; i++) {
2713 struct btrfs_item *item = btrfs_item_nr(right, i);
2714 u32 ioff;
2715
2716 if (!right->map_token) {
2717 map_extent_buffer(right, (unsigned long)item,
2718 sizeof(struct btrfs_item),
2719 &right->map_token, &right->kaddr,
2720 &right->map_start, &right->map_len,
2721 KM_USER1);
2722 }
2723
2724 ioff = btrfs_item_offset(right, item);
2725 btrfs_set_item_offset(right, item, ioff + rt_data_off);
2726 }
2727
2728 if (right->map_token) {
2729 unmap_extent_buffer(right, right->map_token, KM_USER1);
2730 right->map_token = NULL;
2731 }
2732
2733 btrfs_set_header_nritems(l, mid);
2734 ret = 0;
2735 btrfs_item_key(right, &disk_key, 0);
2736 wret = insert_ptr(trans, root, path, &disk_key, right->start,
2737 path->slots[1] + 1, 1);
2738 if (wret)
2739 ret = wret;
2740
2741 btrfs_mark_buffer_dirty(right);
2742 btrfs_mark_buffer_dirty(l);
2743 BUG_ON(path->slots[0] != slot);
2744
2745 ret = btrfs_update_ref(trans, root, l, right, 0, nritems);
2746 BUG_ON(ret);
2747
2748 if (mid <= slot) {
2749 btrfs_tree_unlock(path->nodes[0]);
2750 free_extent_buffer(path->nodes[0]);
2751 path->nodes[0] = right;
2752 path->slots[0] -= mid;
2753 path->slots[1] += 1;
2754 } else {
2755 btrfs_tree_unlock(right);
2756 free_extent_buffer(right);
2757 }
2758
2759 BUG_ON(path->slots[0] < 0);
2760
2761 if (double_split) {
2762 BUG_ON(num_doubles != 0);
2763 num_doubles++;
2764 goto again;
2765 }
2766 return ret;
2767}
2768
2769/*
2770 * This function splits a single item into two items,
2771 * giving 'new_key' to the new item and splitting the
2772 * old one at split_offset (from the start of the item).
2773 *
2774 * The path may be released by this operation. After
2775 * the split, the path is pointing to the old item. The
2776 * new item is going to be in the same node as the old one.
2777 *
2778 * Note, the item being split must be smaller enough to live alone on
2779 * a tree block with room for one extra struct btrfs_item
2780 *
2781 * This allows us to split the item in place, keeping a lock on the
2782 * leaf the entire time.
2783 */
2784int btrfs_split_item(struct btrfs_trans_handle *trans,
2785 struct btrfs_root *root,
2786 struct btrfs_path *path,
2787 struct btrfs_key *new_key,
2788 unsigned long split_offset)
2789{
2790 u32 item_size;
2791 struct extent_buffer *leaf;
2792 struct btrfs_key orig_key;
2793 struct btrfs_item *item;
2794 struct btrfs_item *new_item;
2795 int ret = 0;
2796 int slot;
2797 u32 nritems;
2798 u32 orig_offset;
2799 struct btrfs_disk_key disk_key;
2800 char *buf;
2801
2802 leaf = path->nodes[0];
2803 btrfs_item_key_to_cpu(leaf, &orig_key, path->slots[0]);
2804 if (btrfs_leaf_free_space(root, leaf) >= sizeof(struct btrfs_item))
2805 goto split;
2806
2807 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2808 btrfs_release_path(root, path);
2809
2810 path->search_for_split = 1;
2811 path->keep_locks = 1;
2812
2813 ret = btrfs_search_slot(trans, root, &orig_key, path, 0, 1);
2814 path->search_for_split = 0;
2815
2816 /* if our item isn't there or got smaller, return now */
2817 if (ret != 0 || item_size != btrfs_item_size_nr(path->nodes[0],
2818 path->slots[0])) {
2819 path->keep_locks = 0;
2820 return -EAGAIN;
2821 }
2822
2823 ret = split_leaf(trans, root, &orig_key, path,
2824 sizeof(struct btrfs_item), 1);
2825 path->keep_locks = 0;
2826 BUG_ON(ret);
2827
2828 leaf = path->nodes[0];
2829 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
2830
2831split:
2832 item = btrfs_item_nr(leaf, path->slots[0]);
2833 orig_offset = btrfs_item_offset(leaf, item);
2834 item_size = btrfs_item_size(leaf, item);
2835
2836
2837 buf = kmalloc(item_size, GFP_NOFS);
2838 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
2839 path->slots[0]), item_size);
2840 slot = path->slots[0] + 1;
2841 leaf = path->nodes[0];
2842
2843 nritems = btrfs_header_nritems(leaf);
2844
2845 if (slot != nritems) {
2846 /* shift the items */
2847 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
2848 btrfs_item_nr_offset(slot),
2849 (nritems - slot) * sizeof(struct btrfs_item));
2850
2851 }
2852
2853 btrfs_cpu_key_to_disk(&disk_key, new_key);
2854 btrfs_set_item_key(leaf, &disk_key, slot);
2855
2856 new_item = btrfs_item_nr(leaf, slot);
2857
2858 btrfs_set_item_offset(leaf, new_item, orig_offset);
2859 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
2860
2861 btrfs_set_item_offset(leaf, item,
2862 orig_offset + item_size - split_offset);
2863 btrfs_set_item_size(leaf, item, split_offset);
2864
2865 btrfs_set_header_nritems(leaf, nritems + 1);
2866
2867 /* write the data for the start of the original item */
2868 write_extent_buffer(leaf, buf,
2869 btrfs_item_ptr_offset(leaf, path->slots[0]),
2870 split_offset);
2871
2872 /* write the data for the new item */
2873 write_extent_buffer(leaf, buf + split_offset,
2874 btrfs_item_ptr_offset(leaf, slot),
2875 item_size - split_offset);
2876 btrfs_mark_buffer_dirty(leaf);
2877
2878 ret = 0;
2879 if (btrfs_leaf_free_space(root, leaf) < 0) {
2880 btrfs_print_leaf(root, leaf);
2881 BUG();
2882 }
2883 kfree(buf);
2884 return ret;
2885}
2886
2887/*
2888 * make the item pointed to by the path smaller. new_size indicates
2889 * how small to make it, and from_end tells us if we just chop bytes
2890 * off the end of the item or if we shift the item to chop bytes off
2891 * the front.
2892 */
2893int btrfs_truncate_item(struct btrfs_trans_handle *trans,
2894 struct btrfs_root *root,
2895 struct btrfs_path *path,
2896 u32 new_size, int from_end)
2897{
2898 int ret = 0;
2899 int slot;
2900 int slot_orig;
2901 struct extent_buffer *leaf;
2902 struct btrfs_item *item;
2903 u32 nritems;
2904 unsigned int data_end;
2905 unsigned int old_data_start;
2906 unsigned int old_size;
2907 unsigned int size_diff;
2908 int i;
2909
2910 slot_orig = path->slots[0];
2911 leaf = path->nodes[0];
2912 slot = path->slots[0];
2913
2914 old_size = btrfs_item_size_nr(leaf, slot);
2915 if (old_size == new_size)
2916 return 0;
2917
2918 nritems = btrfs_header_nritems(leaf);
2919 data_end = leaf_data_end(root, leaf);
2920
2921 old_data_start = btrfs_item_offset_nr(leaf, slot);
2922
2923 size_diff = old_size - new_size;
2924
2925 BUG_ON(slot < 0);
2926 BUG_ON(slot >= nritems);
2927
2928 /*
2929 * item0..itemN ... dataN.offset..dataN.size .. data0.size
2930 */
2931 /* first correct the data pointers */
2932 for (i = slot; i < nritems; i++) {
2933 u32 ioff;
2934 item = btrfs_item_nr(leaf, i);
2935
2936 if (!leaf->map_token) {
2937 map_extent_buffer(leaf, (unsigned long)item,
2938 sizeof(struct btrfs_item),
2939 &leaf->map_token, &leaf->kaddr,
2940 &leaf->map_start, &leaf->map_len,
2941 KM_USER1);
2942 }
2943
2944 ioff = btrfs_item_offset(leaf, item);
2945 btrfs_set_item_offset(leaf, item, ioff + size_diff);
2946 }
2947
2948 if (leaf->map_token) {
2949 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
2950 leaf->map_token = NULL;
2951 }
2952
2953 /* shift the data */
2954 if (from_end) {
2955 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
2956 data_end + size_diff, btrfs_leaf_data(leaf) +
2957 data_end, old_data_start + new_size - data_end);
2958 } else {
2959 struct btrfs_disk_key disk_key;
2960 u64 offset;
2961
2962 btrfs_item_key(leaf, &disk_key, slot);
2963
2964 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
2965 unsigned long ptr;
2966 struct btrfs_file_extent_item *fi;
2967
2968 fi = btrfs_item_ptr(leaf, slot,
2969 struct btrfs_file_extent_item);
2970 fi = (struct btrfs_file_extent_item *)(
2971 (unsigned long)fi - size_diff);
2972
2973 if (btrfs_file_extent_type(leaf, fi) ==
2974 BTRFS_FILE_EXTENT_INLINE) {
2975 ptr = btrfs_item_ptr_offset(leaf, slot);
2976 memmove_extent_buffer(leaf, ptr,
2977 (unsigned long)fi,
2978 offsetof(struct btrfs_file_extent_item,
2979 disk_bytenr));
2980 }
2981 }
2982
2983 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
2984 data_end + size_diff, btrfs_leaf_data(leaf) +
2985 data_end, old_data_start - data_end);
2986
2987 offset = btrfs_disk_key_offset(&disk_key);
2988 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
2989 btrfs_set_item_key(leaf, &disk_key, slot);
2990 if (slot == 0)
2991 fixup_low_keys(trans, root, path, &disk_key, 1);
2992 }
2993
2994 item = btrfs_item_nr(leaf, slot);
2995 btrfs_set_item_size(leaf, item, new_size);
2996 btrfs_mark_buffer_dirty(leaf);
2997
2998 ret = 0;
2999 if (btrfs_leaf_free_space(root, leaf) < 0) {
3000 btrfs_print_leaf(root, leaf);
3001 BUG();
3002 }
3003 return ret;
3004}
3005
3006/*
3007 * make the item pointed to by the path bigger, data_size is the new size.
3008 */
3009int btrfs_extend_item(struct btrfs_trans_handle *trans,
3010 struct btrfs_root *root, struct btrfs_path *path,
3011 u32 data_size)
3012{
3013 int ret = 0;
3014 int slot;
3015 int slot_orig;
3016 struct extent_buffer *leaf;
3017 struct btrfs_item *item;
3018 u32 nritems;
3019 unsigned int data_end;
3020 unsigned int old_data;
3021 unsigned int old_size;
3022 int i;
3023
3024 slot_orig = path->slots[0];
3025 leaf = path->nodes[0];
3026
3027 nritems = btrfs_header_nritems(leaf);
3028 data_end = leaf_data_end(root, leaf);
3029
3030 if (btrfs_leaf_free_space(root, leaf) < data_size) {
3031 btrfs_print_leaf(root, leaf);
3032 BUG();
3033 }
3034 slot = path->slots[0];
3035 old_data = btrfs_item_end_nr(leaf, slot);
3036
3037 BUG_ON(slot < 0);
3038 if (slot >= nritems) {
3039 btrfs_print_leaf(root, leaf);
3040 printk(KERN_CRIT "slot %d too large, nritems %d\n",
3041 slot, nritems);
3042 BUG_ON(1);
3043 }
3044
3045 /*
3046 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3047 */
3048 /* first correct the data pointers */
3049 for (i = slot; i < nritems; i++) {
3050 u32 ioff;
3051 item = btrfs_item_nr(leaf, i);
3052
3053 if (!leaf->map_token) {
3054 map_extent_buffer(leaf, (unsigned long)item,
3055 sizeof(struct btrfs_item),
3056 &leaf->map_token, &leaf->kaddr,
3057 &leaf->map_start, &leaf->map_len,
3058 KM_USER1);
3059 }
3060 ioff = btrfs_item_offset(leaf, item);
3061 btrfs_set_item_offset(leaf, item, ioff - data_size);
3062 }
3063
3064 if (leaf->map_token) {
3065 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3066 leaf->map_token = NULL;
3067 }
3068
3069 /* shift the data */
3070 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3071 data_end - data_size, btrfs_leaf_data(leaf) +
3072 data_end, old_data - data_end);
3073
3074 data_end = old_data;
3075 old_size = btrfs_item_size_nr(leaf, slot);
3076 item = btrfs_item_nr(leaf, slot);
3077 btrfs_set_item_size(leaf, item, old_size + data_size);
3078 btrfs_mark_buffer_dirty(leaf);
3079
3080 ret = 0;
3081 if (btrfs_leaf_free_space(root, leaf) < 0) {
3082 btrfs_print_leaf(root, leaf);
3083 BUG();
3084 }
3085 return ret;
3086}
3087
3088/*
3089 * Given a key and some data, insert items into the tree.
3090 * This does all the path init required, making room in the tree if needed.
3091 * Returns the number of keys that were inserted.
3092 */
3093int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3094 struct btrfs_root *root,
3095 struct btrfs_path *path,
3096 struct btrfs_key *cpu_key, u32 *data_size,
3097 int nr)
3098{
3099 struct extent_buffer *leaf;
3100 struct btrfs_item *item;
3101 int ret = 0;
3102 int slot;
3103 int i;
3104 u32 nritems;
3105 u32 total_data = 0;
3106 u32 total_size = 0;
3107 unsigned int data_end;
3108 struct btrfs_disk_key disk_key;
3109 struct btrfs_key found_key;
3110
3111 for (i = 0; i < nr; i++) {
3112 if (total_size + data_size[i] + sizeof(struct btrfs_item) >
3113 BTRFS_LEAF_DATA_SIZE(root)) {
3114 break;
3115 nr = i;
3116 }
3117 total_data += data_size[i];
3118 total_size += data_size[i] + sizeof(struct btrfs_item);
3119 }
3120 BUG_ON(nr == 0);
3121
3122 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3123 if (ret == 0)
3124 return -EEXIST;
3125 if (ret < 0)
3126 goto out;
3127
3128 leaf = path->nodes[0];
3129
3130 nritems = btrfs_header_nritems(leaf);
3131 data_end = leaf_data_end(root, leaf);
3132
3133 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3134 for (i = nr; i >= 0; i--) {
3135 total_data -= data_size[i];
3136 total_size -= data_size[i] + sizeof(struct btrfs_item);
3137 if (total_size < btrfs_leaf_free_space(root, leaf))
3138 break;
3139 }
3140 nr = i;
3141 }
3142
3143 slot = path->slots[0];
3144 BUG_ON(slot < 0);
3145
3146 if (slot != nritems) {
3147 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3148
3149 item = btrfs_item_nr(leaf, slot);
3150 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3151
3152 /* figure out how many keys we can insert in here */
3153 total_data = data_size[0];
3154 for (i = 1; i < nr; i++) {
3155 if (comp_cpu_keys(&found_key, cpu_key + i) <= 0)
3156 break;
3157 total_data += data_size[i];
3158 }
3159 nr = i;
3160
3161 if (old_data < data_end) {
3162 btrfs_print_leaf(root, leaf);
3163 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3164 slot, old_data, data_end);
3165 BUG_ON(1);
3166 }
3167 /*
3168 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3169 */
3170 /* first correct the data pointers */
3171 WARN_ON(leaf->map_token);
3172 for (i = slot; i < nritems; i++) {
3173 u32 ioff;
3174
3175 item = btrfs_item_nr(leaf, i);
3176 if (!leaf->map_token) {
3177 map_extent_buffer(leaf, (unsigned long)item,
3178 sizeof(struct btrfs_item),
3179 &leaf->map_token, &leaf->kaddr,
3180 &leaf->map_start, &leaf->map_len,
3181 KM_USER1);
3182 }
3183
3184 ioff = btrfs_item_offset(leaf, item);
3185 btrfs_set_item_offset(leaf, item, ioff - total_data);
3186 }
3187 if (leaf->map_token) {
3188 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3189 leaf->map_token = NULL;
3190 }
3191
3192 /* shift the items */
3193 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3194 btrfs_item_nr_offset(slot),
3195 (nritems - slot) * sizeof(struct btrfs_item));
3196
3197 /* shift the data */
3198 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3199 data_end - total_data, btrfs_leaf_data(leaf) +
3200 data_end, old_data - data_end);
3201 data_end = old_data;
3202 } else {
3203 /*
3204 * this sucks but it has to be done, if we are inserting at
3205 * the end of the leaf only insert 1 of the items, since we
3206 * have no way of knowing whats on the next leaf and we'd have
3207 * to drop our current locks to figure it out
3208 */
3209 nr = 1;
3210 }
3211
3212 /* setup the item for the new data */
3213 for (i = 0; i < nr; i++) {
3214 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3215 btrfs_set_item_key(leaf, &disk_key, slot + i);
3216 item = btrfs_item_nr(leaf, slot + i);
3217 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3218 data_end -= data_size[i];
3219 btrfs_set_item_size(leaf, item, data_size[i]);
3220 }
3221 btrfs_set_header_nritems(leaf, nritems + nr);
3222 btrfs_mark_buffer_dirty(leaf);
3223
3224 ret = 0;
3225 if (slot == 0) {
3226 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3227 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3228 }
3229
3230 if (btrfs_leaf_free_space(root, leaf) < 0) {
3231 btrfs_print_leaf(root, leaf);
3232 BUG();
3233 }
3234out:
3235 if (!ret)
3236 ret = nr;
3237 return ret;
3238}
3239
3240/*
3241 * Given a key and some data, insert items into the tree.
3242 * This does all the path init required, making room in the tree if needed.
3243 */
3244int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3245 struct btrfs_root *root,
3246 struct btrfs_path *path,
3247 struct btrfs_key *cpu_key, u32 *data_size,
3248 int nr)
3249{
3250 struct extent_buffer *leaf;
3251 struct btrfs_item *item;
3252 int ret = 0;
3253 int slot;
3254 int slot_orig;
3255 int i;
3256 u32 nritems;
3257 u32 total_size = 0;
3258 u32 total_data = 0;
3259 unsigned int data_end;
3260 struct btrfs_disk_key disk_key;
3261
3262 for (i = 0; i < nr; i++)
3263 total_data += data_size[i];
3264
3265 total_size = total_data + (nr * sizeof(struct btrfs_item));
3266 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3267 if (ret == 0)
3268 return -EEXIST;
3269 if (ret < 0)
3270 goto out;
3271
3272 slot_orig = path->slots[0];
3273 leaf = path->nodes[0];
3274
3275 nritems = btrfs_header_nritems(leaf);
3276 data_end = leaf_data_end(root, leaf);
3277
3278 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3279 btrfs_print_leaf(root, leaf);
3280 printk(KERN_CRIT "not enough freespace need %u have %d\n",
3281 total_size, btrfs_leaf_free_space(root, leaf));
3282 BUG();
3283 }
3284
3285 slot = path->slots[0];
3286 BUG_ON(slot < 0);
3287
3288 if (slot != nritems) {
3289 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3290
3291 if (old_data < data_end) {
3292 btrfs_print_leaf(root, leaf);
3293 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3294 slot, old_data, data_end);
3295 BUG_ON(1);
3296 }
3297 /*
3298 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3299 */
3300 /* first correct the data pointers */
3301 WARN_ON(leaf->map_token);
3302 for (i = slot; i < nritems; i++) {
3303 u32 ioff;
3304
3305 item = btrfs_item_nr(leaf, i);
3306 if (!leaf->map_token) {
3307 map_extent_buffer(leaf, (unsigned long)item,
3308 sizeof(struct btrfs_item),
3309 &leaf->map_token, &leaf->kaddr,
3310 &leaf->map_start, &leaf->map_len,
3311 KM_USER1);
3312 }
3313
3314 ioff = btrfs_item_offset(leaf, item);
3315 btrfs_set_item_offset(leaf, item, ioff - total_data);
3316 }
3317 if (leaf->map_token) {
3318 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3319 leaf->map_token = NULL;
3320 }
3321
3322 /* shift the items */
3323 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3324 btrfs_item_nr_offset(slot),
3325 (nritems - slot) * sizeof(struct btrfs_item));
3326
3327 /* shift the data */
3328 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3329 data_end - total_data, btrfs_leaf_data(leaf) +
3330 data_end, old_data - data_end);
3331 data_end = old_data;
3332 }
3333
3334 /* setup the item for the new data */
3335 for (i = 0; i < nr; i++) {
3336 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3337 btrfs_set_item_key(leaf, &disk_key, slot + i);
3338 item = btrfs_item_nr(leaf, slot + i);
3339 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3340 data_end -= data_size[i];
3341 btrfs_set_item_size(leaf, item, data_size[i]);
3342 }
3343 btrfs_set_header_nritems(leaf, nritems + nr);
3344 btrfs_mark_buffer_dirty(leaf);
3345
3346 ret = 0;
3347 if (slot == 0) {
3348 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3349 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3350 }
3351
3352 if (btrfs_leaf_free_space(root, leaf) < 0) {
3353 btrfs_print_leaf(root, leaf);
3354 BUG();
3355 }
3356out:
3357 return ret;
3358}
3359
3360/*
3361 * Given a key and some data, insert an item into the tree.
3362 * This does all the path init required, making room in the tree if needed.
3363 */
3364int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
3365 *root, struct btrfs_key *cpu_key, void *data, u32
3366 data_size)
3367{
3368 int ret = 0;
3369 struct btrfs_path *path;
3370 struct extent_buffer *leaf;
3371 unsigned long ptr;
3372
3373 path = btrfs_alloc_path();
3374 BUG_ON(!path);
3375 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
3376 if (!ret) {
3377 leaf = path->nodes[0];
3378 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3379 write_extent_buffer(leaf, data, ptr, data_size);
3380 btrfs_mark_buffer_dirty(leaf);
3381 }
3382 btrfs_free_path(path);
3383 return ret;
3384}
3385
3386/*
3387 * delete the pointer from a given node.
3388 *
3389 * the tree should have been previously balanced so the deletion does not
3390 * empty a node.
3391 */
3392static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3393 struct btrfs_path *path, int level, int slot)
3394{
3395 struct extent_buffer *parent = path->nodes[level];
3396 u32 nritems;
3397 int ret = 0;
3398 int wret;
3399
3400 nritems = btrfs_header_nritems(parent);
3401 if (slot != nritems - 1) {
3402 memmove_extent_buffer(parent,
3403 btrfs_node_key_ptr_offset(slot),
3404 btrfs_node_key_ptr_offset(slot + 1),
3405 sizeof(struct btrfs_key_ptr) *
3406 (nritems - slot - 1));
3407 }
3408 nritems--;
3409 btrfs_set_header_nritems(parent, nritems);
3410 if (nritems == 0 && parent == root->node) {
3411 BUG_ON(btrfs_header_level(root->node) != 1);
3412 /* just turn the root into a leaf and break */
3413 btrfs_set_header_level(root->node, 0);
3414 } else if (slot == 0) {
3415 struct btrfs_disk_key disk_key;
3416
3417 btrfs_node_key(parent, &disk_key, 0);
3418 wret = fixup_low_keys(trans, root, path, &disk_key, level + 1);
3419 if (wret)
3420 ret = wret;
3421 }
3422 btrfs_mark_buffer_dirty(parent);
3423 return ret;
3424}
3425
3426/*
3427 * a helper function to delete the leaf pointed to by path->slots[1] and
3428 * path->nodes[1]. bytenr is the node block pointer, but since the callers
3429 * already know it, it is faster to have them pass it down than to
3430 * read it out of the node again.
3431 *
3432 * This deletes the pointer in path->nodes[1] and frees the leaf
3433 * block extent. zero is returned if it all worked out, < 0 otherwise.
3434 *
3435 * The path must have already been setup for deleting the leaf, including
3436 * all the proper balancing. path->nodes[1] must be locked.
3437 */
3438noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
3439 struct btrfs_root *root,
3440 struct btrfs_path *path, u64 bytenr)
3441{
3442 int ret;
3443 u64 root_gen = btrfs_header_generation(path->nodes[1]);
3444
3445 ret = del_ptr(trans, root, path, 1, path->slots[1]);
3446 if (ret)
3447 return ret;
3448
3449 ret = btrfs_free_extent(trans, root, bytenr,
3450 btrfs_level_size(root, 0),
3451 path->nodes[1]->start,
3452 btrfs_header_owner(path->nodes[1]),
3453 root_gen, 0, 1);
3454 return ret;
3455}
3456/*
3457 * delete the item at the leaf level in path. If that empties
3458 * the leaf, remove it from the tree
3459 */
3460int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3461 struct btrfs_path *path, int slot, int nr)
3462{
3463 struct extent_buffer *leaf;
3464 struct btrfs_item *item;
3465 int last_off;
3466 int dsize = 0;
3467 int ret = 0;
3468 int wret;
3469 int i;
3470 u32 nritems;
3471
3472 leaf = path->nodes[0];
3473 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
3474
3475 for (i = 0; i < nr; i++)
3476 dsize += btrfs_item_size_nr(leaf, slot + i);
3477
3478 nritems = btrfs_header_nritems(leaf);
3479
3480 if (slot + nr != nritems) {
3481 int data_end = leaf_data_end(root, leaf);
3482
3483 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3484 data_end + dsize,
3485 btrfs_leaf_data(leaf) + data_end,
3486 last_off - data_end);
3487
3488 for (i = slot + nr; i < nritems; i++) {
3489 u32 ioff;
3490
3491 item = btrfs_item_nr(leaf, i);
3492 if (!leaf->map_token) {
3493 map_extent_buffer(leaf, (unsigned long)item,
3494 sizeof(struct btrfs_item),
3495 &leaf->map_token, &leaf->kaddr,
3496 &leaf->map_start, &leaf->map_len,
3497 KM_USER1);
3498 }
3499 ioff = btrfs_item_offset(leaf, item);
3500 btrfs_set_item_offset(leaf, item, ioff + dsize);
3501 }
3502
3503 if (leaf->map_token) {
3504 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3505 leaf->map_token = NULL;
3506 }
3507
3508 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
3509 btrfs_item_nr_offset(slot + nr),
3510 sizeof(struct btrfs_item) *
3511 (nritems - slot - nr));
3512 }
3513 btrfs_set_header_nritems(leaf, nritems - nr);
3514 nritems -= nr;
3515
3516 /* delete the leaf if we've emptied it */
3517 if (nritems == 0) {
3518 if (leaf == root->node) {
3519 btrfs_set_header_level(leaf, 0);
3520 } else {
3521 ret = btrfs_del_leaf(trans, root, path, leaf->start);
3522 BUG_ON(ret);
3523 }
3524 } else {
3525 int used = leaf_space_used(leaf, 0, nritems);
3526 if (slot == 0) {
3527 struct btrfs_disk_key disk_key;
3528
3529 btrfs_item_key(leaf, &disk_key, 0);
3530 wret = fixup_low_keys(trans, root, path,
3531 &disk_key, 1);
3532 if (wret)
3533 ret = wret;
3534 }
3535
3536 /* delete the leaf if it is mostly empty */
3537 if (used < BTRFS_LEAF_DATA_SIZE(root) / 4) {
3538 /* push_leaf_left fixes the path.
3539 * make sure the path still points to our leaf
3540 * for possible call to del_ptr below
3541 */
3542 slot = path->slots[1];
3543 extent_buffer_get(leaf);
3544
3545 wret = push_leaf_left(trans, root, path, 1, 1);
3546 if (wret < 0 && wret != -ENOSPC)
3547 ret = wret;
3548
3549 if (path->nodes[0] == leaf &&
3550 btrfs_header_nritems(leaf)) {
3551 wret = push_leaf_right(trans, root, path, 1, 1);
3552 if (wret < 0 && wret != -ENOSPC)
3553 ret = wret;
3554 }
3555
3556 if (btrfs_header_nritems(leaf) == 0) {
3557 path->slots[1] = slot;
3558 ret = btrfs_del_leaf(trans, root, path,
3559 leaf->start);
3560 BUG_ON(ret);
3561 free_extent_buffer(leaf);
3562 } else {
3563 /* if we're still in the path, make sure
3564 * we're dirty. Otherwise, one of the
3565 * push_leaf functions must have already
3566 * dirtied this buffer
3567 */
3568 if (path->nodes[0] == leaf)
3569 btrfs_mark_buffer_dirty(leaf);
3570 free_extent_buffer(leaf);
3571 }
3572 } else {
3573 btrfs_mark_buffer_dirty(leaf);
3574 }
3575 }
3576 return ret;
3577}
3578
3579/*
3580 * search the tree again to find a leaf with lesser keys
3581 * returns 0 if it found something or 1 if there are no lesser leaves.
3582 * returns < 0 on io errors.
3583 *
3584 * This may release the path, and so you may lose any locks held at the
3585 * time you call it.
3586 */
3587int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
3588{
3589 struct btrfs_key key;
3590 struct btrfs_disk_key found_key;
3591 int ret;
3592
3593 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
3594
3595 if (key.offset > 0)
3596 key.offset--;
3597 else if (key.type > 0)
3598 key.type--;
3599 else if (key.objectid > 0)
3600 key.objectid--;
3601 else
3602 return 1;
3603
3604 btrfs_release_path(root, path);
3605 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3606 if (ret < 0)
3607 return ret;
3608 btrfs_item_key(path->nodes[0], &found_key, 0);
3609 ret = comp_keys(&found_key, &key);
3610 if (ret < 0)
3611 return 0;
3612 return 1;
3613}
3614
3615/*
3616 * A helper function to walk down the tree starting at min_key, and looking
3617 * for nodes or leaves that are either in cache or have a minimum
3618 * transaction id. This is used by the btree defrag code, and tree logging
3619 *
3620 * This does not cow, but it does stuff the starting key it finds back
3621 * into min_key, so you can call btrfs_search_slot with cow=1 on the
3622 * key and get a writable path.
3623 *
3624 * This does lock as it descends, and path->keep_locks should be set
3625 * to 1 by the caller.
3626 *
3627 * This honors path->lowest_level to prevent descent past a given level
3628 * of the tree.
3629 *
3630 * min_trans indicates the oldest transaction that you are interested
3631 * in walking through. Any nodes or leaves older than min_trans are
3632 * skipped over (without reading them).
3633 *
3634 * returns zero if something useful was found, < 0 on error and 1 if there
3635 * was nothing in the tree that matched the search criteria.
3636 */
3637int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
3638 struct btrfs_key *max_key,
3639 struct btrfs_path *path, int cache_only,
3640 u64 min_trans)
3641{
3642 struct extent_buffer *cur;
3643 struct btrfs_key found_key;
3644 int slot;
3645 int sret;
3646 u32 nritems;
3647 int level;
3648 int ret = 1;
3649
3650 WARN_ON(!path->keep_locks);
3651again:
3652 cur = btrfs_lock_root_node(root);
3653 level = btrfs_header_level(cur);
3654 WARN_ON(path->nodes[level]);
3655 path->nodes[level] = cur;
3656 path->locks[level] = 1;
3657
3658 if (btrfs_header_generation(cur) < min_trans) {
3659 ret = 1;
3660 goto out;
3661 }
3662 while (1) {
3663 nritems = btrfs_header_nritems(cur);
3664 level = btrfs_header_level(cur);
3665 sret = bin_search(cur, min_key, level, &slot);
3666
3667 /* at the lowest level, we're done, setup the path and exit */
3668 if (level == path->lowest_level) {
3669 if (slot >= nritems)
3670 goto find_next_key;
3671 ret = 0;
3672 path->slots[level] = slot;
3673 btrfs_item_key_to_cpu(cur, &found_key, slot);
3674 goto out;
3675 }
3676 if (sret && slot > 0)
3677 slot--;
3678 /*
3679 * check this node pointer against the cache_only and
3680 * min_trans parameters. If it isn't in cache or is too
3681 * old, skip to the next one.
3682 */
3683 while (slot < nritems) {
3684 u64 blockptr;
3685 u64 gen;
3686 struct extent_buffer *tmp;
3687 struct btrfs_disk_key disk_key;
3688
3689 blockptr = btrfs_node_blockptr(cur, slot);
3690 gen = btrfs_node_ptr_generation(cur, slot);
3691 if (gen < min_trans) {
3692 slot++;
3693 continue;
3694 }
3695 if (!cache_only)
3696 break;
3697
3698 if (max_key) {
3699 btrfs_node_key(cur, &disk_key, slot);
3700 if (comp_keys(&disk_key, max_key) >= 0) {
3701 ret = 1;
3702 goto out;
3703 }
3704 }
3705
3706 tmp = btrfs_find_tree_block(root, blockptr,
3707 btrfs_level_size(root, level - 1));
3708
3709 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
3710 free_extent_buffer(tmp);
3711 break;
3712 }
3713 if (tmp)
3714 free_extent_buffer(tmp);
3715 slot++;
3716 }
3717find_next_key:
3718 /*
3719 * we didn't find a candidate key in this node, walk forward
3720 * and find another one
3721 */
3722 if (slot >= nritems) {
3723 path->slots[level] = slot;
3724 sret = btrfs_find_next_key(root, path, min_key, level,
3725 cache_only, min_trans);
3726 if (sret == 0) {
3727 btrfs_release_path(root, path);
3728 goto again;
3729 } else {
3730 goto out;
3731 }
3732 }
3733 /* save our key for returning back */
3734 btrfs_node_key_to_cpu(cur, &found_key, slot);
3735 path->slots[level] = slot;
3736 if (level == path->lowest_level) {
3737 ret = 0;
3738 unlock_up(path, level, 1);
3739 goto out;
3740 }
3741 cur = read_node_slot(root, cur, slot);
3742
3743 btrfs_tree_lock(cur);
3744 path->locks[level - 1] = 1;
3745 path->nodes[level - 1] = cur;
3746 unlock_up(path, level, 1);
3747 }
3748out:
3749 if (ret == 0)
3750 memcpy(min_key, &found_key, sizeof(found_key));
3751 return ret;
3752}
3753
3754/*
3755 * this is similar to btrfs_next_leaf, but does not try to preserve
3756 * and fixup the path. It looks for and returns the next key in the
3757 * tree based on the current path and the cache_only and min_trans
3758 * parameters.
3759 *
3760 * 0 is returned if another key is found, < 0 if there are any errors
3761 * and 1 is returned if there are no higher keys in the tree
3762 *
3763 * path->keep_locks should be set to 1 on the search made before
3764 * calling this function.
3765 */
3766int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
3767 struct btrfs_key *key, int lowest_level,
3768 int cache_only, u64 min_trans)
3769{
3770 int level = lowest_level;
3771 int slot;
3772 struct extent_buffer *c;
3773
3774 WARN_ON(!path->keep_locks);
3775 while (level < BTRFS_MAX_LEVEL) {
3776 if (!path->nodes[level])
3777 return 1;
3778
3779 slot = path->slots[level] + 1;
3780 c = path->nodes[level];
3781next:
3782 if (slot >= btrfs_header_nritems(c)) {
3783 level++;
3784 if (level == BTRFS_MAX_LEVEL)
3785 return 1;
3786 continue;
3787 }
3788 if (level == 0)
3789 btrfs_item_key_to_cpu(c, key, slot);
3790 else {
3791 u64 blockptr = btrfs_node_blockptr(c, slot);
3792 u64 gen = btrfs_node_ptr_generation(c, slot);
3793
3794 if (cache_only) {
3795 struct extent_buffer *cur;
3796 cur = btrfs_find_tree_block(root, blockptr,
3797 btrfs_level_size(root, level - 1));
3798 if (!cur || !btrfs_buffer_uptodate(cur, gen)) {
3799 slot++;
3800 if (cur)
3801 free_extent_buffer(cur);
3802 goto next;
3803 }
3804 free_extent_buffer(cur);
3805 }
3806 if (gen < min_trans) {
3807 slot++;
3808 goto next;
3809 }
3810 btrfs_node_key_to_cpu(c, key, slot);
3811 }
3812 return 0;
3813 }
3814 return 1;
3815}
3816
3817/*
3818 * search the tree again to find a leaf with greater keys
3819 * returns 0 if it found something or 1 if there are no greater leaves.
3820 * returns < 0 on io errors.
3821 */
3822int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
3823{
3824 int slot;
3825 int level = 1;
3826 struct extent_buffer *c;
3827 struct extent_buffer *next = NULL;
3828 struct btrfs_key key;
3829 u32 nritems;
3830 int ret;
3831
3832 nritems = btrfs_header_nritems(path->nodes[0]);
3833 if (nritems == 0)
3834 return 1;
3835
3836 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
3837
3838 btrfs_release_path(root, path);
3839 path->keep_locks = 1;
3840 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3841 path->keep_locks = 0;
3842
3843 if (ret < 0)
3844 return ret;
3845
3846 nritems = btrfs_header_nritems(path->nodes[0]);
3847 /*
3848 * by releasing the path above we dropped all our locks. A balance
3849 * could have added more items next to the key that used to be
3850 * at the very end of the block. So, check again here and
3851 * advance the path if there are now more items available.
3852 */
3853 if (nritems > 0 && path->slots[0] < nritems - 1) {
3854 path->slots[0]++;
3855 goto done;
3856 }
3857
3858 while (level < BTRFS_MAX_LEVEL) {
3859 if (!path->nodes[level])
3860 return 1;
3861
3862 slot = path->slots[level] + 1;
3863 c = path->nodes[level];
3864 if (slot >= btrfs_header_nritems(c)) {
3865 level++;
3866 if (level == BTRFS_MAX_LEVEL)
3867 return 1;
3868 continue;
3869 }
3870
3871 if (next) {
3872 btrfs_tree_unlock(next);
3873 free_extent_buffer(next);
3874 }
3875
3876 if (level == 1 && (path->locks[1] || path->skip_locking) &&
3877 path->reada)
3878 reada_for_search(root, path, level, slot, 0);
3879
3880 next = read_node_slot(root, c, slot);
3881 if (!path->skip_locking) {
3882 WARN_ON(!btrfs_tree_locked(c));
3883 btrfs_tree_lock(next);
3884 }
3885 break;
3886 }
3887 path->slots[level] = slot;
3888 while (1) {
3889 level--;
3890 c = path->nodes[level];
3891 if (path->locks[level])
3892 btrfs_tree_unlock(c);
3893 free_extent_buffer(c);
3894 path->nodes[level] = next;
3895 path->slots[level] = 0;
3896 if (!path->skip_locking)
3897 path->locks[level] = 1;
3898 if (!level)
3899 break;
3900 if (level == 1 && path->locks[1] && path->reada)
3901 reada_for_search(root, path, level, slot, 0);
3902 next = read_node_slot(root, next, 0);
3903 if (!path->skip_locking) {
3904 WARN_ON(!btrfs_tree_locked(path->nodes[level]));
3905 btrfs_tree_lock(next);
3906 }
3907 }
3908done:
3909 unlock_up(path, 0, 1);
3910 return 0;
3911}
3912
3913/*
3914 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
3915 * searching until it gets past min_objectid or finds an item of 'type'
3916 *
3917 * returns 0 if something is found, 1 if nothing was found and < 0 on error
3918 */
3919int btrfs_previous_item(struct btrfs_root *root,
3920 struct btrfs_path *path, u64 min_objectid,
3921 int type)
3922{
3923 struct btrfs_key found_key;
3924 struct extent_buffer *leaf;
3925 u32 nritems;
3926 int ret;
3927
3928 while (1) {
3929 if (path->slots[0] == 0) {
3930 ret = btrfs_prev_leaf(root, path);
3931 if (ret != 0)
3932 return ret;
3933 } else {
3934 path->slots[0]--;
3935 }
3936 leaf = path->nodes[0];
3937 nritems = btrfs_header_nritems(leaf);
3938 if (nritems == 0)
3939 return 1;
3940 if (path->slots[0] == nritems)
3941 path->slots[0]--;
3942
3943 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3944 if (found_key.type == type)
3945 return 0;
3946 if (found_key.objectid < min_objectid)
3947 break;
3948 if (found_key.objectid == min_objectid &&
3949 found_key.type < type)
3950 break;
3951 }
3952 return 1;
3953}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
new file mode 100644
index 000000000000..eee060f88113
--- /dev/null
+++ b/fs/btrfs/ctree.h
@@ -0,0 +1,2129 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#ifndef __BTRFS_CTREE__
20#define __BTRFS_CTREE__
21
22#include <linux/version.h>
23#include <linux/mm.h>
24#include <linux/highmem.h>
25#include <linux/fs.h>
26#include <linux/completion.h>
27#include <linux/backing-dev.h>
28#include <linux/wait.h>
29#include <asm/kmap_types.h>
30#include "extent_io.h"
31#include "extent_map.h"
32#include "async-thread.h"
33
34struct btrfs_trans_handle;
35struct btrfs_transaction;
36extern struct kmem_cache *btrfs_trans_handle_cachep;
37extern struct kmem_cache *btrfs_transaction_cachep;
38extern struct kmem_cache *btrfs_bit_radix_cachep;
39extern struct kmem_cache *btrfs_path_cachep;
40struct btrfs_ordered_sum;
41
42#define BTRFS_MAGIC "_BHRfS_M"
43
44#define BTRFS_ACL_NOT_CACHED ((void *)-1)
45
46#ifdef CONFIG_LOCKDEP
47# define BTRFS_MAX_LEVEL 7
48#else
49# define BTRFS_MAX_LEVEL 8
50#endif
51
52/* holds pointers to all of the tree roots */
53#define BTRFS_ROOT_TREE_OBJECTID 1ULL
54
55/* stores information about which extents are in use, and reference counts */
56#define BTRFS_EXTENT_TREE_OBJECTID 2ULL
57
58/*
59 * chunk tree stores translations from logical -> physical block numbering
60 * the super block points to the chunk tree
61 */
62#define BTRFS_CHUNK_TREE_OBJECTID 3ULL
63
64/*
65 * stores information about which areas of a given device are in use.
66 * one per device. The tree of tree roots points to the device tree
67 */
68#define BTRFS_DEV_TREE_OBJECTID 4ULL
69
70/* one per subvolume, storing files and directories */
71#define BTRFS_FS_TREE_OBJECTID 5ULL
72
73/* directory objectid inside the root tree */
74#define BTRFS_ROOT_TREE_DIR_OBJECTID 6ULL
75
76/* holds checksums of all the data extents */
77#define BTRFS_CSUM_TREE_OBJECTID 7ULL
78
79/* orhpan objectid for tracking unlinked/truncated files */
80#define BTRFS_ORPHAN_OBJECTID -5ULL
81
82/* does write ahead logging to speed up fsyncs */
83#define BTRFS_TREE_LOG_OBJECTID -6ULL
84#define BTRFS_TREE_LOG_FIXUP_OBJECTID -7ULL
85
86/* for space balancing */
87#define BTRFS_TREE_RELOC_OBJECTID -8ULL
88#define BTRFS_DATA_RELOC_TREE_OBJECTID -9ULL
89
90/*
91 * extent checksums all have this objectid
92 * this allows them to share the logging tree
93 * for fsyncs
94 */
95#define BTRFS_EXTENT_CSUM_OBJECTID -10ULL
96
97/* dummy objectid represents multiple objectids */
98#define BTRFS_MULTIPLE_OBJECTIDS -255ULL
99
100/*
101 * All files have objectids in this range.
102 */
103#define BTRFS_FIRST_FREE_OBJECTID 256ULL
104#define BTRFS_LAST_FREE_OBJECTID -256ULL
105#define BTRFS_FIRST_CHUNK_TREE_OBJECTID 256ULL
106
107
108/*
109 * the device items go into the chunk tree. The key is in the form
110 * [ 1 BTRFS_DEV_ITEM_KEY device_id ]
111 */
112#define BTRFS_DEV_ITEMS_OBJECTID 1ULL
113
114/*
115 * we can actually store much bigger names, but lets not confuse the rest
116 * of linux
117 */
118#define BTRFS_NAME_LEN 255
119
120/* 32 bytes in various csum fields */
121#define BTRFS_CSUM_SIZE 32
122
123/* csum types */
124#define BTRFS_CSUM_TYPE_CRC32 0
125
126static int btrfs_csum_sizes[] = { 4, 0 };
127
128/* four bytes for CRC32 */
129#define BTRFS_EMPTY_DIR_SIZE 0
130
131#define BTRFS_FT_UNKNOWN 0
132#define BTRFS_FT_REG_FILE 1
133#define BTRFS_FT_DIR 2
134#define BTRFS_FT_CHRDEV 3
135#define BTRFS_FT_BLKDEV 4
136#define BTRFS_FT_FIFO 5
137#define BTRFS_FT_SOCK 6
138#define BTRFS_FT_SYMLINK 7
139#define BTRFS_FT_XATTR 8
140#define BTRFS_FT_MAX 9
141
142/*
143 * the key defines the order in the tree, and so it also defines (optimal)
144 * block layout. objectid corresonds to the inode number. The flags
145 * tells us things about the object, and is a kind of stream selector.
146 * so for a given inode, keys with flags of 1 might refer to the inode
147 * data, flags of 2 may point to file data in the btree and flags == 3
148 * may point to extents.
149 *
150 * offset is the starting byte offset for this key in the stream.
151 *
152 * btrfs_disk_key is in disk byte order. struct btrfs_key is always
153 * in cpu native order. Otherwise they are identical and their sizes
154 * should be the same (ie both packed)
155 */
156struct btrfs_disk_key {
157 __le64 objectid;
158 u8 type;
159 __le64 offset;
160} __attribute__ ((__packed__));
161
162struct btrfs_key {
163 u64 objectid;
164 u8 type;
165 u64 offset;
166} __attribute__ ((__packed__));
167
168struct btrfs_mapping_tree {
169 struct extent_map_tree map_tree;
170};
171
172#define BTRFS_UUID_SIZE 16
173struct btrfs_dev_item {
174 /* the internal btrfs device id */
175 __le64 devid;
176
177 /* size of the device */
178 __le64 total_bytes;
179
180 /* bytes used */
181 __le64 bytes_used;
182
183 /* optimal io alignment for this device */
184 __le32 io_align;
185
186 /* optimal io width for this device */
187 __le32 io_width;
188
189 /* minimal io size for this device */
190 __le32 sector_size;
191
192 /* type and info about this device */
193 __le64 type;
194
195 /* expected generation for this device */
196 __le64 generation;
197
198 /*
199 * starting byte of this partition on the device,
200 * to allowr for stripe alignment in the future
201 */
202 __le64 start_offset;
203
204 /* grouping information for allocation decisions */
205 __le32 dev_group;
206
207 /* seek speed 0-100 where 100 is fastest */
208 u8 seek_speed;
209
210 /* bandwidth 0-100 where 100 is fastest */
211 u8 bandwidth;
212
213 /* btrfs generated uuid for this device */
214 u8 uuid[BTRFS_UUID_SIZE];
215
216 /* uuid of FS who owns this device */
217 u8 fsid[BTRFS_UUID_SIZE];
218} __attribute__ ((__packed__));
219
220struct btrfs_stripe {
221 __le64 devid;
222 __le64 offset;
223 u8 dev_uuid[BTRFS_UUID_SIZE];
224} __attribute__ ((__packed__));
225
226struct btrfs_chunk {
227 /* size of this chunk in bytes */
228 __le64 length;
229
230 /* objectid of the root referencing this chunk */
231 __le64 owner;
232
233 __le64 stripe_len;
234 __le64 type;
235
236 /* optimal io alignment for this chunk */
237 __le32 io_align;
238
239 /* optimal io width for this chunk */
240 __le32 io_width;
241
242 /* minimal io size for this chunk */
243 __le32 sector_size;
244
245 /* 2^16 stripes is quite a lot, a second limit is the size of a single
246 * item in the btree
247 */
248 __le16 num_stripes;
249
250 /* sub stripes only matter for raid10 */
251 __le16 sub_stripes;
252 struct btrfs_stripe stripe;
253 /* additional stripes go here */
254} __attribute__ ((__packed__));
255
256static inline unsigned long btrfs_chunk_item_size(int num_stripes)
257{
258 BUG_ON(num_stripes == 0);
259 return sizeof(struct btrfs_chunk) +
260 sizeof(struct btrfs_stripe) * (num_stripes - 1);
261}
262
263#define BTRFS_FSID_SIZE 16
264#define BTRFS_HEADER_FLAG_WRITTEN (1 << 0)
265
266/*
267 * every tree block (leaf or node) starts with this header.
268 */
269struct btrfs_header {
270 /* these first four must match the super block */
271 u8 csum[BTRFS_CSUM_SIZE];
272 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
273 __le64 bytenr; /* which block this node is supposed to live in */
274 __le64 flags;
275
276 /* allowed to be different from the super from here on down */
277 u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
278 __le64 generation;
279 __le64 owner;
280 __le32 nritems;
281 u8 level;
282} __attribute__ ((__packed__));
283
284#define BTRFS_NODEPTRS_PER_BLOCK(r) (((r)->nodesize - \
285 sizeof(struct btrfs_header)) / \
286 sizeof(struct btrfs_key_ptr))
287#define __BTRFS_LEAF_DATA_SIZE(bs) ((bs) - sizeof(struct btrfs_header))
288#define BTRFS_LEAF_DATA_SIZE(r) (__BTRFS_LEAF_DATA_SIZE(r->leafsize))
289#define BTRFS_MAX_INLINE_DATA_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \
290 sizeof(struct btrfs_item) - \
291 sizeof(struct btrfs_file_extent_item))
292
293#define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32)
294
295/*
296 * this is a very generous portion of the super block, giving us
297 * room to translate 14 chunks with 3 stripes each.
298 */
299#define BTRFS_SYSTEM_CHUNK_ARRAY_SIZE 2048
300#define BTRFS_LABEL_SIZE 256
301
302/*
303 * the super block basically lists the main trees of the FS
304 * it currently lacks any block count etc etc
305 */
306struct btrfs_super_block {
307 u8 csum[BTRFS_CSUM_SIZE];
308 /* the first 4 fields must match struct btrfs_header */
309 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
310 __le64 bytenr; /* this block number */
311 __le64 flags;
312
313 /* allowed to be different from the btrfs_header from here own down */
314 __le64 magic;
315 __le64 generation;
316 __le64 root;
317 __le64 chunk_root;
318 __le64 log_root;
319
320 /* this will help find the new super based on the log root */
321 __le64 log_root_transid;
322 __le64 total_bytes;
323 __le64 bytes_used;
324 __le64 root_dir_objectid;
325 __le64 num_devices;
326 __le32 sectorsize;
327 __le32 nodesize;
328 __le32 leafsize;
329 __le32 stripesize;
330 __le32 sys_chunk_array_size;
331 __le64 chunk_root_generation;
332 __le64 compat_flags;
333 __le64 compat_ro_flags;
334 __le64 incompat_flags;
335 __le16 csum_type;
336 u8 root_level;
337 u8 chunk_root_level;
338 u8 log_root_level;
339 struct btrfs_dev_item dev_item;
340
341 char label[BTRFS_LABEL_SIZE];
342
343 /* future expansion */
344 __le64 reserved[32];
345 u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE];
346} __attribute__ ((__packed__));
347
348/*
349 * Compat flags that we support. If any incompat flags are set other than the
350 * ones specified below then we will fail to mount
351 */
352#define BTRFS_FEATURE_COMPAT_SUPP 0x0
353#define BTRFS_FEATURE_COMPAT_RO_SUPP 0x0
354#define BTRFS_FEATURE_INCOMPAT_SUPP 0x0
355
356/*
357 * A leaf is full of items. offset and size tell us where to find
358 * the item in the leaf (relative to the start of the data area)
359 */
360struct btrfs_item {
361 struct btrfs_disk_key key;
362 __le32 offset;
363 __le32 size;
364} __attribute__ ((__packed__));
365
366/*
367 * leaves have an item area and a data area:
368 * [item0, item1....itemN] [free space] [dataN...data1, data0]
369 *
370 * The data is separate from the items to get the keys closer together
371 * during searches.
372 */
373struct btrfs_leaf {
374 struct btrfs_header header;
375 struct btrfs_item items[];
376} __attribute__ ((__packed__));
377
378/*
379 * all non-leaf blocks are nodes, they hold only keys and pointers to
380 * other blocks
381 */
382struct btrfs_key_ptr {
383 struct btrfs_disk_key key;
384 __le64 blockptr;
385 __le64 generation;
386} __attribute__ ((__packed__));
387
388struct btrfs_node {
389 struct btrfs_header header;
390 struct btrfs_key_ptr ptrs[];
391} __attribute__ ((__packed__));
392
393/*
394 * btrfs_paths remember the path taken from the root down to the leaf.
395 * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point
396 * to any other levels that are present.
397 *
398 * The slots array records the index of the item or block pointer
399 * used while walking the tree.
400 */
401struct btrfs_path {
402 struct extent_buffer *nodes[BTRFS_MAX_LEVEL];
403 int slots[BTRFS_MAX_LEVEL];
404 /* if there is real range locking, this locks field will change */
405 int locks[BTRFS_MAX_LEVEL];
406 int reada;
407 /* keep some upper locks as we walk down */
408 int keep_locks;
409 int skip_locking;
410 int lowest_level;
411
412 /*
413 * set by btrfs_split_item, tells search_slot to keep all locks
414 * and to force calls to keep space in the nodes
415 */
416 int search_for_split;
417};
418
419/*
420 * items in the extent btree are used to record the objectid of the
421 * owner of the block and the number of references
422 */
423struct btrfs_extent_item {
424 __le32 refs;
425} __attribute__ ((__packed__));
426
427struct btrfs_extent_ref {
428 __le64 root;
429 __le64 generation;
430 __le64 objectid;
431 __le32 num_refs;
432} __attribute__ ((__packed__));
433
434/* dev extents record free space on individual devices. The owner
435 * field points back to the chunk allocation mapping tree that allocated
436 * the extent. The chunk tree uuid field is a way to double check the owner
437 */
438struct btrfs_dev_extent {
439 __le64 chunk_tree;
440 __le64 chunk_objectid;
441 __le64 chunk_offset;
442 __le64 length;
443 u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
444} __attribute__ ((__packed__));
445
446struct btrfs_inode_ref {
447 __le64 index;
448 __le16 name_len;
449 /* name goes here */
450} __attribute__ ((__packed__));
451
452struct btrfs_timespec {
453 __le64 sec;
454 __le32 nsec;
455} __attribute__ ((__packed__));
456
457typedef enum {
458 BTRFS_COMPRESS_NONE = 0,
459 BTRFS_COMPRESS_ZLIB = 1,
460 BTRFS_COMPRESS_LAST = 2,
461} btrfs_compression_type;
462
463/* we don't understand any encryption methods right now */
464typedef enum {
465 BTRFS_ENCRYPTION_NONE = 0,
466 BTRFS_ENCRYPTION_LAST = 1,
467} btrfs_encryption_type;
468
469struct btrfs_inode_item {
470 /* nfs style generation number */
471 __le64 generation;
472 /* transid that last touched this inode */
473 __le64 transid;
474 __le64 size;
475 __le64 nbytes;
476 __le64 block_group;
477 __le32 nlink;
478 __le32 uid;
479 __le32 gid;
480 __le32 mode;
481 __le64 rdev;
482 __le64 flags;
483
484 /* modification sequence number for NFS */
485 __le64 sequence;
486
487 /*
488 * a little future expansion, for more than this we can
489 * just grow the inode item and version it
490 */
491 __le64 reserved[4];
492 struct btrfs_timespec atime;
493 struct btrfs_timespec ctime;
494 struct btrfs_timespec mtime;
495 struct btrfs_timespec otime;
496} __attribute__ ((__packed__));
497
498struct btrfs_dir_log_item {
499 __le64 end;
500} __attribute__ ((__packed__));
501
502struct btrfs_dir_item {
503 struct btrfs_disk_key location;
504 __le64 transid;
505 __le16 data_len;
506 __le16 name_len;
507 u8 type;
508} __attribute__ ((__packed__));
509
510struct btrfs_root_item {
511 struct btrfs_inode_item inode;
512 __le64 generation;
513 __le64 root_dirid;
514 __le64 bytenr;
515 __le64 byte_limit;
516 __le64 bytes_used;
517 __le64 last_snapshot;
518 __le64 flags;
519 __le32 refs;
520 struct btrfs_disk_key drop_progress;
521 u8 drop_level;
522 u8 level;
523} __attribute__ ((__packed__));
524
525/*
526 * this is used for both forward and backward root refs
527 */
528struct btrfs_root_ref {
529 __le64 dirid;
530 __le64 sequence;
531 __le16 name_len;
532} __attribute__ ((__packed__));
533
534#define BTRFS_FILE_EXTENT_INLINE 0
535#define BTRFS_FILE_EXTENT_REG 1
536#define BTRFS_FILE_EXTENT_PREALLOC 2
537
538struct btrfs_file_extent_item {
539 /*
540 * transaction id that created this extent
541 */
542 __le64 generation;
543 /*
544 * max number of bytes to hold this extent in ram
545 * when we split a compressed extent we can't know how big
546 * each of the resulting pieces will be. So, this is
547 * an upper limit on the size of the extent in ram instead of
548 * an exact limit.
549 */
550 __le64 ram_bytes;
551
552 /*
553 * 32 bits for the various ways we might encode the data,
554 * including compression and encryption. If any of these
555 * are set to something a given disk format doesn't understand
556 * it is treated like an incompat flag for reading and writing,
557 * but not for stat.
558 */
559 u8 compression;
560 u8 encryption;
561 __le16 other_encoding; /* spare for later use */
562
563 /* are we inline data or a real extent? */
564 u8 type;
565
566 /*
567 * disk space consumed by the extent, checksum blocks are included
568 * in these numbers
569 */
570 __le64 disk_bytenr;
571 __le64 disk_num_bytes;
572 /*
573 * the logical offset in file blocks (no csums)
574 * this extent record is for. This allows a file extent to point
575 * into the middle of an existing extent on disk, sharing it
576 * between two snapshots (useful if some bytes in the middle of the
577 * extent have changed
578 */
579 __le64 offset;
580 /*
581 * the logical number of file blocks (no csums included). This
582 * always reflects the size uncompressed and without encoding.
583 */
584 __le64 num_bytes;
585
586} __attribute__ ((__packed__));
587
588struct btrfs_csum_item {
589 u8 csum;
590} __attribute__ ((__packed__));
591
592/* different types of block groups (and chunks) */
593#define BTRFS_BLOCK_GROUP_DATA (1 << 0)
594#define BTRFS_BLOCK_GROUP_SYSTEM (1 << 1)
595#define BTRFS_BLOCK_GROUP_METADATA (1 << 2)
596#define BTRFS_BLOCK_GROUP_RAID0 (1 << 3)
597#define BTRFS_BLOCK_GROUP_RAID1 (1 << 4)
598#define BTRFS_BLOCK_GROUP_DUP (1 << 5)
599#define BTRFS_BLOCK_GROUP_RAID10 (1 << 6)
600
601struct btrfs_block_group_item {
602 __le64 used;
603 __le64 chunk_objectid;
604 __le64 flags;
605} __attribute__ ((__packed__));
606
607struct btrfs_space_info {
608 u64 flags;
609 u64 total_bytes;
610 u64 bytes_used;
611 u64 bytes_pinned;
612 u64 bytes_reserved;
613 u64 bytes_readonly;
614 int full;
615 int force_alloc;
616 struct list_head list;
617
618 /* for block groups in our same type */
619 struct list_head block_groups;
620 spinlock_t lock;
621 struct rw_semaphore groups_sem;
622};
623
624struct btrfs_free_space {
625 struct rb_node bytes_index;
626 struct rb_node offset_index;
627 u64 offset;
628 u64 bytes;
629};
630
631struct btrfs_block_group_cache {
632 struct btrfs_key key;
633 struct btrfs_block_group_item item;
634 spinlock_t lock;
635 struct mutex alloc_mutex;
636 struct mutex cache_mutex;
637 u64 pinned;
638 u64 reserved;
639 u64 flags;
640 int cached;
641 int ro;
642 int dirty;
643
644 struct btrfs_space_info *space_info;
645
646 /* free space cache stuff */
647 struct rb_root free_space_bytes;
648 struct rb_root free_space_offset;
649
650 /* block group cache stuff */
651 struct rb_node cache_node;
652
653 /* for block groups in the same raid type */
654 struct list_head list;
655
656 /* usage count */
657 atomic_t count;
658};
659
660struct btrfs_leaf_ref_tree {
661 struct rb_root root;
662 struct list_head list;
663 spinlock_t lock;
664};
665
666struct btrfs_device;
667struct btrfs_fs_devices;
668struct btrfs_fs_info {
669 u8 fsid[BTRFS_FSID_SIZE];
670 u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
671 struct btrfs_root *extent_root;
672 struct btrfs_root *tree_root;
673 struct btrfs_root *chunk_root;
674 struct btrfs_root *dev_root;
675 struct btrfs_root *fs_root;
676 struct btrfs_root *csum_root;
677
678 /* the log root tree is a directory of all the other log roots */
679 struct btrfs_root *log_root_tree;
680 struct radix_tree_root fs_roots_radix;
681
682 /* block group cache stuff */
683 spinlock_t block_group_cache_lock;
684 struct rb_root block_group_cache_tree;
685
686 struct extent_io_tree pinned_extents;
687 struct extent_io_tree pending_del;
688 struct extent_io_tree extent_ins;
689
690 /* logical->physical extent mapping */
691 struct btrfs_mapping_tree mapping_tree;
692
693 u64 generation;
694 u64 last_trans_committed;
695 u64 last_trans_new_blockgroup;
696 u64 open_ioctl_trans;
697 unsigned long mount_opt;
698 u64 max_extent;
699 u64 max_inline;
700 u64 alloc_start;
701 struct btrfs_transaction *running_transaction;
702 wait_queue_head_t transaction_throttle;
703 wait_queue_head_t transaction_wait;
704
705 wait_queue_head_t async_submit_wait;
706 wait_queue_head_t tree_log_wait;
707
708 struct btrfs_super_block super_copy;
709 struct btrfs_super_block super_for_commit;
710 struct block_device *__bdev;
711 struct super_block *sb;
712 struct inode *btree_inode;
713 struct backing_dev_info bdi;
714 spinlock_t hash_lock;
715 struct mutex trans_mutex;
716 struct mutex tree_log_mutex;
717 struct mutex transaction_kthread_mutex;
718 struct mutex cleaner_mutex;
719 struct mutex extent_ins_mutex;
720 struct mutex pinned_mutex;
721 struct mutex chunk_mutex;
722 struct mutex drop_mutex;
723 struct mutex volume_mutex;
724 struct mutex tree_reloc_mutex;
725 struct list_head trans_list;
726 struct list_head hashers;
727 struct list_head dead_roots;
728
729 atomic_t nr_async_submits;
730 atomic_t async_submit_draining;
731 atomic_t nr_async_bios;
732 atomic_t async_delalloc_pages;
733 atomic_t tree_log_writers;
734 atomic_t tree_log_commit;
735 unsigned long tree_log_batch;
736 u64 tree_log_transid;
737
738 /*
739 * this is used by the balancing code to wait for all the pending
740 * ordered extents
741 */
742 spinlock_t ordered_extent_lock;
743 struct list_head ordered_extents;
744 struct list_head delalloc_inodes;
745
746 /*
747 * there is a pool of worker threads for checksumming during writes
748 * and a pool for checksumming after reads. This is because readers
749 * can run with FS locks held, and the writers may be waiting for
750 * those locks. We don't want ordering in the pending list to cause
751 * deadlocks, and so the two are serviced separately.
752 *
753 * A third pool does submit_bio to avoid deadlocking with the other
754 * two
755 */
756 struct btrfs_workers workers;
757 struct btrfs_workers delalloc_workers;
758 struct btrfs_workers endio_workers;
759 struct btrfs_workers endio_meta_workers;
760 struct btrfs_workers endio_meta_write_workers;
761 struct btrfs_workers endio_write_workers;
762 struct btrfs_workers submit_workers;
763 /*
764 * fixup workers take dirty pages that didn't properly go through
765 * the cow mechanism and make them safe to write. It happens
766 * for the sys_munmap function call path
767 */
768 struct btrfs_workers fixup_workers;
769 struct task_struct *transaction_kthread;
770 struct task_struct *cleaner_kthread;
771 int thread_pool_size;
772
773 /* tree relocation relocated fields */
774 struct list_head dead_reloc_roots;
775 struct btrfs_leaf_ref_tree reloc_ref_tree;
776 struct btrfs_leaf_ref_tree shared_ref_tree;
777
778 struct kobject super_kobj;
779 struct completion kobj_unregister;
780 int do_barriers;
781 int closing;
782 int log_root_recovering;
783 atomic_t throttles;
784 atomic_t throttle_gen;
785
786 u64 total_pinned;
787 struct list_head dirty_cowonly_roots;
788
789 struct btrfs_fs_devices *fs_devices;
790 struct list_head space_info;
791 spinlock_t delalloc_lock;
792 spinlock_t new_trans_lock;
793 u64 delalloc_bytes;
794 u64 last_alloc;
795 u64 last_data_alloc;
796
797 spinlock_t ref_cache_lock;
798 u64 total_ref_cache_size;
799
800 u64 avail_data_alloc_bits;
801 u64 avail_metadata_alloc_bits;
802 u64 avail_system_alloc_bits;
803 u64 data_alloc_profile;
804 u64 metadata_alloc_profile;
805 u64 system_alloc_profile;
806
807 void *bdev_holder;
808};
809
810/*
811 * in ram representation of the tree. extent_root is used for all allocations
812 * and for the extent tree extent_root root.
813 */
814struct btrfs_dirty_root;
815struct btrfs_root {
816 struct extent_buffer *node;
817
818 /* the node lock is held while changing the node pointer */
819 spinlock_t node_lock;
820
821 struct extent_buffer *commit_root;
822 struct btrfs_leaf_ref_tree *ref_tree;
823 struct btrfs_leaf_ref_tree ref_tree_struct;
824 struct btrfs_dirty_root *dirty_root;
825 struct btrfs_root *log_root;
826 struct btrfs_root *reloc_root;
827
828 struct btrfs_root_item root_item;
829 struct btrfs_key root_key;
830 struct btrfs_fs_info *fs_info;
831 struct extent_io_tree dirty_log_pages;
832
833 struct kobject root_kobj;
834 struct completion kobj_unregister;
835 struct mutex objectid_mutex;
836 struct mutex log_mutex;
837
838 u64 objectid;
839 u64 last_trans;
840
841 /* data allocations are done in sectorsize units */
842 u32 sectorsize;
843
844 /* node allocations are done in nodesize units */
845 u32 nodesize;
846
847 /* leaf allocations are done in leafsize units */
848 u32 leafsize;
849
850 u32 stripesize;
851
852 u32 type;
853 u64 highest_inode;
854 u64 last_inode_alloc;
855 int ref_cows;
856 int track_dirty;
857 u64 defrag_trans_start;
858 struct btrfs_key defrag_progress;
859 struct btrfs_key defrag_max;
860 int defrag_running;
861 int defrag_level;
862 char *name;
863 int in_sysfs;
864
865 /* the dirty list is only used by non-reference counted roots */
866 struct list_head dirty_list;
867
868 spinlock_t list_lock;
869 struct list_head dead_list;
870 struct list_head orphan_list;
871
872 /*
873 * right now this just gets used so that a root has its own devid
874 * for stat. It may be used for more later
875 */
876 struct super_block anon_super;
877};
878
879/*
880
881 * inode items have the data typically returned from stat and store other
882 * info about object characteristics. There is one for every file and dir in
883 * the FS
884 */
885#define BTRFS_INODE_ITEM_KEY 1
886#define BTRFS_INODE_REF_KEY 12
887#define BTRFS_XATTR_ITEM_KEY 24
888#define BTRFS_ORPHAN_ITEM_KEY 48
889/* reserve 2-15 close to the inode for later flexibility */
890
891/*
892 * dir items are the name -> inode pointers in a directory. There is one
893 * for every name in a directory.
894 */
895#define BTRFS_DIR_LOG_ITEM_KEY 60
896#define BTRFS_DIR_LOG_INDEX_KEY 72
897#define BTRFS_DIR_ITEM_KEY 84
898#define BTRFS_DIR_INDEX_KEY 96
899/*
900 * extent data is for file data
901 */
902#define BTRFS_EXTENT_DATA_KEY 108
903
904/*
905 * extent csums are stored in a separate tree and hold csums for
906 * an entire extent on disk.
907 */
908#define BTRFS_EXTENT_CSUM_KEY 128
909
910/*
911 * root items point to tree roots. There are typically in the root
912 * tree used by the super block to find all the other trees
913 */
914#define BTRFS_ROOT_ITEM_KEY 132
915
916/*
917 * root backrefs tie subvols and snapshots to the directory entries that
918 * reference them
919 */
920#define BTRFS_ROOT_BACKREF_KEY 144
921
922/*
923 * root refs make a fast index for listing all of the snapshots and
924 * subvolumes referenced by a given root. They point directly to the
925 * directory item in the root that references the subvol
926 */
927#define BTRFS_ROOT_REF_KEY 156
928
929/*
930 * extent items are in the extent map tree. These record which blocks
931 * are used, and how many references there are to each block
932 */
933#define BTRFS_EXTENT_ITEM_KEY 168
934#define BTRFS_EXTENT_REF_KEY 180
935
936/*
937 * block groups give us hints into the extent allocation trees. Which
938 * blocks are free etc etc
939 */
940#define BTRFS_BLOCK_GROUP_ITEM_KEY 192
941
942#define BTRFS_DEV_EXTENT_KEY 204
943#define BTRFS_DEV_ITEM_KEY 216
944#define BTRFS_CHUNK_ITEM_KEY 228
945
946/*
947 * string items are for debugging. They just store a short string of
948 * data in the FS
949 */
950#define BTRFS_STRING_ITEM_KEY 253
951
952#define BTRFS_MOUNT_NODATASUM (1 << 0)
953#define BTRFS_MOUNT_NODATACOW (1 << 1)
954#define BTRFS_MOUNT_NOBARRIER (1 << 2)
955#define BTRFS_MOUNT_SSD (1 << 3)
956#define BTRFS_MOUNT_DEGRADED (1 << 4)
957#define BTRFS_MOUNT_COMPRESS (1 << 5)
958
959#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
960#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
961#define btrfs_test_opt(root, opt) ((root)->fs_info->mount_opt & \
962 BTRFS_MOUNT_##opt)
963/*
964 * Inode flags
965 */
966#define BTRFS_INODE_NODATASUM (1 << 0)
967#define BTRFS_INODE_NODATACOW (1 << 1)
968#define BTRFS_INODE_READONLY (1 << 2)
969#define BTRFS_INODE_NOCOMPRESS (1 << 3)
970#define BTRFS_INODE_PREALLOC (1 << 4)
971#define btrfs_clear_flag(inode, flag) (BTRFS_I(inode)->flags &= \
972 ~BTRFS_INODE_##flag)
973#define btrfs_set_flag(inode, flag) (BTRFS_I(inode)->flags |= \
974 BTRFS_INODE_##flag)
975#define btrfs_test_flag(inode, flag) (BTRFS_I(inode)->flags & \
976 BTRFS_INODE_##flag)
977/* some macros to generate set/get funcs for the struct fields. This
978 * assumes there is a lefoo_to_cpu for every type, so lets make a simple
979 * one for u8:
980 */
981#define le8_to_cpu(v) (v)
982#define cpu_to_le8(v) (v)
983#define __le8 u8
984
985#define read_eb_member(eb, ptr, type, member, result) ( \
986 read_extent_buffer(eb, (char *)(result), \
987 ((unsigned long)(ptr)) + \
988 offsetof(type, member), \
989 sizeof(((type *)0)->member)))
990
991#define write_eb_member(eb, ptr, type, member, result) ( \
992 write_extent_buffer(eb, (char *)(result), \
993 ((unsigned long)(ptr)) + \
994 offsetof(type, member), \
995 sizeof(((type *)0)->member)))
996
997#ifndef BTRFS_SETGET_FUNCS
998#define BTRFS_SETGET_FUNCS(name, type, member, bits) \
999u##bits btrfs_##name(struct extent_buffer *eb, type *s); \
1000void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val);
1001#endif
1002
1003#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
1004static inline u##bits btrfs_##name(struct extent_buffer *eb) \
1005{ \
1006 type *p = kmap_atomic(eb->first_page, KM_USER0); \
1007 u##bits res = le##bits##_to_cpu(p->member); \
1008 kunmap_atomic(p, KM_USER0); \
1009 return res; \
1010} \
1011static inline void btrfs_set_##name(struct extent_buffer *eb, \
1012 u##bits val) \
1013{ \
1014 type *p = kmap_atomic(eb->first_page, KM_USER0); \
1015 p->member = cpu_to_le##bits(val); \
1016 kunmap_atomic(p, KM_USER0); \
1017}
1018
1019#define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \
1020static inline u##bits btrfs_##name(type *s) \
1021{ \
1022 return le##bits##_to_cpu(s->member); \
1023} \
1024static inline void btrfs_set_##name(type *s, u##bits val) \
1025{ \
1026 s->member = cpu_to_le##bits(val); \
1027}
1028
1029BTRFS_SETGET_FUNCS(device_type, struct btrfs_dev_item, type, 64);
1030BTRFS_SETGET_FUNCS(device_total_bytes, struct btrfs_dev_item, total_bytes, 64);
1031BTRFS_SETGET_FUNCS(device_bytes_used, struct btrfs_dev_item, bytes_used, 64);
1032BTRFS_SETGET_FUNCS(device_io_align, struct btrfs_dev_item, io_align, 32);
1033BTRFS_SETGET_FUNCS(device_io_width, struct btrfs_dev_item, io_width, 32);
1034BTRFS_SETGET_FUNCS(device_start_offset, struct btrfs_dev_item,
1035 start_offset, 64);
1036BTRFS_SETGET_FUNCS(device_sector_size, struct btrfs_dev_item, sector_size, 32);
1037BTRFS_SETGET_FUNCS(device_id, struct btrfs_dev_item, devid, 64);
1038BTRFS_SETGET_FUNCS(device_group, struct btrfs_dev_item, dev_group, 32);
1039BTRFS_SETGET_FUNCS(device_seek_speed, struct btrfs_dev_item, seek_speed, 8);
1040BTRFS_SETGET_FUNCS(device_bandwidth, struct btrfs_dev_item, bandwidth, 8);
1041BTRFS_SETGET_FUNCS(device_generation, struct btrfs_dev_item, generation, 64);
1042
1043BTRFS_SETGET_STACK_FUNCS(stack_device_type, struct btrfs_dev_item, type, 64);
1044BTRFS_SETGET_STACK_FUNCS(stack_device_total_bytes, struct btrfs_dev_item,
1045 total_bytes, 64);
1046BTRFS_SETGET_STACK_FUNCS(stack_device_bytes_used, struct btrfs_dev_item,
1047 bytes_used, 64);
1048BTRFS_SETGET_STACK_FUNCS(stack_device_io_align, struct btrfs_dev_item,
1049 io_align, 32);
1050BTRFS_SETGET_STACK_FUNCS(stack_device_io_width, struct btrfs_dev_item,
1051 io_width, 32);
1052BTRFS_SETGET_STACK_FUNCS(stack_device_sector_size, struct btrfs_dev_item,
1053 sector_size, 32);
1054BTRFS_SETGET_STACK_FUNCS(stack_device_id, struct btrfs_dev_item, devid, 64);
1055BTRFS_SETGET_STACK_FUNCS(stack_device_group, struct btrfs_dev_item,
1056 dev_group, 32);
1057BTRFS_SETGET_STACK_FUNCS(stack_device_seek_speed, struct btrfs_dev_item,
1058 seek_speed, 8);
1059BTRFS_SETGET_STACK_FUNCS(stack_device_bandwidth, struct btrfs_dev_item,
1060 bandwidth, 8);
1061BTRFS_SETGET_STACK_FUNCS(stack_device_generation, struct btrfs_dev_item,
1062 generation, 64);
1063
1064static inline char *btrfs_device_uuid(struct btrfs_dev_item *d)
1065{
1066 return (char *)d + offsetof(struct btrfs_dev_item, uuid);
1067}
1068
1069static inline char *btrfs_device_fsid(struct btrfs_dev_item *d)
1070{
1071 return (char *)d + offsetof(struct btrfs_dev_item, fsid);
1072}
1073
1074BTRFS_SETGET_FUNCS(chunk_length, struct btrfs_chunk, length, 64);
1075BTRFS_SETGET_FUNCS(chunk_owner, struct btrfs_chunk, owner, 64);
1076BTRFS_SETGET_FUNCS(chunk_stripe_len, struct btrfs_chunk, stripe_len, 64);
1077BTRFS_SETGET_FUNCS(chunk_io_align, struct btrfs_chunk, io_align, 32);
1078BTRFS_SETGET_FUNCS(chunk_io_width, struct btrfs_chunk, io_width, 32);
1079BTRFS_SETGET_FUNCS(chunk_sector_size, struct btrfs_chunk, sector_size, 32);
1080BTRFS_SETGET_FUNCS(chunk_type, struct btrfs_chunk, type, 64);
1081BTRFS_SETGET_FUNCS(chunk_num_stripes, struct btrfs_chunk, num_stripes, 16);
1082BTRFS_SETGET_FUNCS(chunk_sub_stripes, struct btrfs_chunk, sub_stripes, 16);
1083BTRFS_SETGET_FUNCS(stripe_devid, struct btrfs_stripe, devid, 64);
1084BTRFS_SETGET_FUNCS(stripe_offset, struct btrfs_stripe, offset, 64);
1085
1086static inline char *btrfs_stripe_dev_uuid(struct btrfs_stripe *s)
1087{
1088 return (char *)s + offsetof(struct btrfs_stripe, dev_uuid);
1089}
1090
1091BTRFS_SETGET_STACK_FUNCS(stack_chunk_length, struct btrfs_chunk, length, 64);
1092BTRFS_SETGET_STACK_FUNCS(stack_chunk_owner, struct btrfs_chunk, owner, 64);
1093BTRFS_SETGET_STACK_FUNCS(stack_chunk_stripe_len, struct btrfs_chunk,
1094 stripe_len, 64);
1095BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_align, struct btrfs_chunk,
1096 io_align, 32);
1097BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_width, struct btrfs_chunk,
1098 io_width, 32);
1099BTRFS_SETGET_STACK_FUNCS(stack_chunk_sector_size, struct btrfs_chunk,
1100 sector_size, 32);
1101BTRFS_SETGET_STACK_FUNCS(stack_chunk_type, struct btrfs_chunk, type, 64);
1102BTRFS_SETGET_STACK_FUNCS(stack_chunk_num_stripes, struct btrfs_chunk,
1103 num_stripes, 16);
1104BTRFS_SETGET_STACK_FUNCS(stack_chunk_sub_stripes, struct btrfs_chunk,
1105 sub_stripes, 16);
1106BTRFS_SETGET_STACK_FUNCS(stack_stripe_devid, struct btrfs_stripe, devid, 64);
1107BTRFS_SETGET_STACK_FUNCS(stack_stripe_offset, struct btrfs_stripe, offset, 64);
1108
1109static inline struct btrfs_stripe *btrfs_stripe_nr(struct btrfs_chunk *c,
1110 int nr)
1111{
1112 unsigned long offset = (unsigned long)c;
1113 offset += offsetof(struct btrfs_chunk, stripe);
1114 offset += nr * sizeof(struct btrfs_stripe);
1115 return (struct btrfs_stripe *)offset;
1116}
1117
1118static inline char *btrfs_stripe_dev_uuid_nr(struct btrfs_chunk *c, int nr)
1119{
1120 return btrfs_stripe_dev_uuid(btrfs_stripe_nr(c, nr));
1121}
1122
1123static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb,
1124 struct btrfs_chunk *c, int nr)
1125{
1126 return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr));
1127}
1128
1129static inline void btrfs_set_stripe_offset_nr(struct extent_buffer *eb,
1130 struct btrfs_chunk *c, int nr,
1131 u64 val)
1132{
1133 btrfs_set_stripe_offset(eb, btrfs_stripe_nr(c, nr), val);
1134}
1135
1136static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb,
1137 struct btrfs_chunk *c, int nr)
1138{
1139 return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr));
1140}
1141
1142static inline void btrfs_set_stripe_devid_nr(struct extent_buffer *eb,
1143 struct btrfs_chunk *c, int nr,
1144 u64 val)
1145{
1146 btrfs_set_stripe_devid(eb, btrfs_stripe_nr(c, nr), val);
1147}
1148
1149/* struct btrfs_block_group_item */
1150BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item,
1151 used, 64);
1152BTRFS_SETGET_FUNCS(disk_block_group_used, struct btrfs_block_group_item,
1153 used, 64);
1154BTRFS_SETGET_STACK_FUNCS(block_group_chunk_objectid,
1155 struct btrfs_block_group_item, chunk_objectid, 64);
1156
1157BTRFS_SETGET_FUNCS(disk_block_group_chunk_objectid,
1158 struct btrfs_block_group_item, chunk_objectid, 64);
1159BTRFS_SETGET_FUNCS(disk_block_group_flags,
1160 struct btrfs_block_group_item, flags, 64);
1161BTRFS_SETGET_STACK_FUNCS(block_group_flags,
1162 struct btrfs_block_group_item, flags, 64);
1163
1164/* struct btrfs_inode_ref */
1165BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16);
1166BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64);
1167
1168/* struct btrfs_inode_item */
1169BTRFS_SETGET_FUNCS(inode_generation, struct btrfs_inode_item, generation, 64);
1170BTRFS_SETGET_FUNCS(inode_sequence, struct btrfs_inode_item, sequence, 64);
1171BTRFS_SETGET_FUNCS(inode_transid, struct btrfs_inode_item, transid, 64);
1172BTRFS_SETGET_FUNCS(inode_size, struct btrfs_inode_item, size, 64);
1173BTRFS_SETGET_FUNCS(inode_nbytes, struct btrfs_inode_item, nbytes, 64);
1174BTRFS_SETGET_FUNCS(inode_block_group, struct btrfs_inode_item, block_group, 64);
1175BTRFS_SETGET_FUNCS(inode_nlink, struct btrfs_inode_item, nlink, 32);
1176BTRFS_SETGET_FUNCS(inode_uid, struct btrfs_inode_item, uid, 32);
1177BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32);
1178BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32);
1179BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64);
1180BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 64);
1181
1182static inline struct btrfs_timespec *
1183btrfs_inode_atime(struct btrfs_inode_item *inode_item)
1184{
1185 unsigned long ptr = (unsigned long)inode_item;
1186 ptr += offsetof(struct btrfs_inode_item, atime);
1187 return (struct btrfs_timespec *)ptr;
1188}
1189
1190static inline struct btrfs_timespec *
1191btrfs_inode_mtime(struct btrfs_inode_item *inode_item)
1192{
1193 unsigned long ptr = (unsigned long)inode_item;
1194 ptr += offsetof(struct btrfs_inode_item, mtime);
1195 return (struct btrfs_timespec *)ptr;
1196}
1197
1198static inline struct btrfs_timespec *
1199btrfs_inode_ctime(struct btrfs_inode_item *inode_item)
1200{
1201 unsigned long ptr = (unsigned long)inode_item;
1202 ptr += offsetof(struct btrfs_inode_item, ctime);
1203 return (struct btrfs_timespec *)ptr;
1204}
1205
1206static inline struct btrfs_timespec *
1207btrfs_inode_otime(struct btrfs_inode_item *inode_item)
1208{
1209 unsigned long ptr = (unsigned long)inode_item;
1210 ptr += offsetof(struct btrfs_inode_item, otime);
1211 return (struct btrfs_timespec *)ptr;
1212}
1213
1214BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64);
1215BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32);
1216
1217/* struct btrfs_dev_extent */
1218BTRFS_SETGET_FUNCS(dev_extent_chunk_tree, struct btrfs_dev_extent,
1219 chunk_tree, 64);
1220BTRFS_SETGET_FUNCS(dev_extent_chunk_objectid, struct btrfs_dev_extent,
1221 chunk_objectid, 64);
1222BTRFS_SETGET_FUNCS(dev_extent_chunk_offset, struct btrfs_dev_extent,
1223 chunk_offset, 64);
1224BTRFS_SETGET_FUNCS(dev_extent_length, struct btrfs_dev_extent, length, 64);
1225
1226static inline u8 *btrfs_dev_extent_chunk_tree_uuid(struct btrfs_dev_extent *dev)
1227{
1228 unsigned long ptr = offsetof(struct btrfs_dev_extent, chunk_tree_uuid);
1229 return (u8 *)((unsigned long)dev + ptr);
1230}
1231
1232/* struct btrfs_extent_ref */
1233BTRFS_SETGET_FUNCS(ref_root, struct btrfs_extent_ref, root, 64);
1234BTRFS_SETGET_FUNCS(ref_generation, struct btrfs_extent_ref, generation, 64);
1235BTRFS_SETGET_FUNCS(ref_objectid, struct btrfs_extent_ref, objectid, 64);
1236BTRFS_SETGET_FUNCS(ref_num_refs, struct btrfs_extent_ref, num_refs, 32);
1237
1238BTRFS_SETGET_STACK_FUNCS(stack_ref_root, struct btrfs_extent_ref, root, 64);
1239BTRFS_SETGET_STACK_FUNCS(stack_ref_generation, struct btrfs_extent_ref,
1240 generation, 64);
1241BTRFS_SETGET_STACK_FUNCS(stack_ref_objectid, struct btrfs_extent_ref,
1242 objectid, 64);
1243BTRFS_SETGET_STACK_FUNCS(stack_ref_num_refs, struct btrfs_extent_ref,
1244 num_refs, 32);
1245
1246/* struct btrfs_extent_item */
1247BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 32);
1248BTRFS_SETGET_STACK_FUNCS(stack_extent_refs, struct btrfs_extent_item,
1249 refs, 32);
1250
1251/* struct btrfs_node */
1252BTRFS_SETGET_FUNCS(key_blockptr, struct btrfs_key_ptr, blockptr, 64);
1253BTRFS_SETGET_FUNCS(key_generation, struct btrfs_key_ptr, generation, 64);
1254
1255static inline u64 btrfs_node_blockptr(struct extent_buffer *eb, int nr)
1256{
1257 unsigned long ptr;
1258 ptr = offsetof(struct btrfs_node, ptrs) +
1259 sizeof(struct btrfs_key_ptr) * nr;
1260 return btrfs_key_blockptr(eb, (struct btrfs_key_ptr *)ptr);
1261}
1262
1263static inline void btrfs_set_node_blockptr(struct extent_buffer *eb,
1264 int nr, u64 val)
1265{
1266 unsigned long ptr;
1267 ptr = offsetof(struct btrfs_node, ptrs) +
1268 sizeof(struct btrfs_key_ptr) * nr;
1269 btrfs_set_key_blockptr(eb, (struct btrfs_key_ptr *)ptr, val);
1270}
1271
1272static inline u64 btrfs_node_ptr_generation(struct extent_buffer *eb, int nr)
1273{
1274 unsigned long ptr;
1275 ptr = offsetof(struct btrfs_node, ptrs) +
1276 sizeof(struct btrfs_key_ptr) * nr;
1277 return btrfs_key_generation(eb, (struct btrfs_key_ptr *)ptr);
1278}
1279
1280static inline void btrfs_set_node_ptr_generation(struct extent_buffer *eb,
1281 int nr, u64 val)
1282{
1283 unsigned long ptr;
1284 ptr = offsetof(struct btrfs_node, ptrs) +
1285 sizeof(struct btrfs_key_ptr) * nr;
1286 btrfs_set_key_generation(eb, (struct btrfs_key_ptr *)ptr, val);
1287}
1288
1289static inline unsigned long btrfs_node_key_ptr_offset(int nr)
1290{
1291 return offsetof(struct btrfs_node, ptrs) +
1292 sizeof(struct btrfs_key_ptr) * nr;
1293}
1294
1295void btrfs_node_key(struct extent_buffer *eb,
1296 struct btrfs_disk_key *disk_key, int nr);
1297
1298static inline void btrfs_set_node_key(struct extent_buffer *eb,
1299 struct btrfs_disk_key *disk_key, int nr)
1300{
1301 unsigned long ptr;
1302 ptr = btrfs_node_key_ptr_offset(nr);
1303 write_eb_member(eb, (struct btrfs_key_ptr *)ptr,
1304 struct btrfs_key_ptr, key, disk_key);
1305}
1306
1307/* struct btrfs_item */
1308BTRFS_SETGET_FUNCS(item_offset, struct btrfs_item, offset, 32);
1309BTRFS_SETGET_FUNCS(item_size, struct btrfs_item, size, 32);
1310
1311static inline unsigned long btrfs_item_nr_offset(int nr)
1312{
1313 return offsetof(struct btrfs_leaf, items) +
1314 sizeof(struct btrfs_item) * nr;
1315}
1316
1317static inline struct btrfs_item *btrfs_item_nr(struct extent_buffer *eb,
1318 int nr)
1319{
1320 return (struct btrfs_item *)btrfs_item_nr_offset(nr);
1321}
1322
1323static inline u32 btrfs_item_end(struct extent_buffer *eb,
1324 struct btrfs_item *item)
1325{
1326 return btrfs_item_offset(eb, item) + btrfs_item_size(eb, item);
1327}
1328
1329static inline u32 btrfs_item_end_nr(struct extent_buffer *eb, int nr)
1330{
1331 return btrfs_item_end(eb, btrfs_item_nr(eb, nr));
1332}
1333
1334static inline u32 btrfs_item_offset_nr(struct extent_buffer *eb, int nr)
1335{
1336 return btrfs_item_offset(eb, btrfs_item_nr(eb, nr));
1337}
1338
1339static inline u32 btrfs_item_size_nr(struct extent_buffer *eb, int nr)
1340{
1341 return btrfs_item_size(eb, btrfs_item_nr(eb, nr));
1342}
1343
1344static inline void btrfs_item_key(struct extent_buffer *eb,
1345 struct btrfs_disk_key *disk_key, int nr)
1346{
1347 struct btrfs_item *item = btrfs_item_nr(eb, nr);
1348 read_eb_member(eb, item, struct btrfs_item, key, disk_key);
1349}
1350
1351static inline void btrfs_set_item_key(struct extent_buffer *eb,
1352 struct btrfs_disk_key *disk_key, int nr)
1353{
1354 struct btrfs_item *item = btrfs_item_nr(eb, nr);
1355 write_eb_member(eb, item, struct btrfs_item, key, disk_key);
1356}
1357
1358BTRFS_SETGET_FUNCS(dir_log_end, struct btrfs_dir_log_item, end, 64);
1359
1360/*
1361 * struct btrfs_root_ref
1362 */
1363BTRFS_SETGET_FUNCS(root_ref_dirid, struct btrfs_root_ref, dirid, 64);
1364BTRFS_SETGET_FUNCS(root_ref_sequence, struct btrfs_root_ref, sequence, 64);
1365BTRFS_SETGET_FUNCS(root_ref_name_len, struct btrfs_root_ref, name_len, 16);
1366
1367/* struct btrfs_dir_item */
1368BTRFS_SETGET_FUNCS(dir_data_len, struct btrfs_dir_item, data_len, 16);
1369BTRFS_SETGET_FUNCS(dir_type, struct btrfs_dir_item, type, 8);
1370BTRFS_SETGET_FUNCS(dir_name_len, struct btrfs_dir_item, name_len, 16);
1371BTRFS_SETGET_FUNCS(dir_transid, struct btrfs_dir_item, transid, 64);
1372
1373static inline void btrfs_dir_item_key(struct extent_buffer *eb,
1374 struct btrfs_dir_item *item,
1375 struct btrfs_disk_key *key)
1376{
1377 read_eb_member(eb, item, struct btrfs_dir_item, location, key);
1378}
1379
1380static inline void btrfs_set_dir_item_key(struct extent_buffer *eb,
1381 struct btrfs_dir_item *item,
1382 struct btrfs_disk_key *key)
1383{
1384 write_eb_member(eb, item, struct btrfs_dir_item, location, key);
1385}
1386
1387/* struct btrfs_disk_key */
1388BTRFS_SETGET_STACK_FUNCS(disk_key_objectid, struct btrfs_disk_key,
1389 objectid, 64);
1390BTRFS_SETGET_STACK_FUNCS(disk_key_offset, struct btrfs_disk_key, offset, 64);
1391BTRFS_SETGET_STACK_FUNCS(disk_key_type, struct btrfs_disk_key, type, 8);
1392
1393static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu,
1394 struct btrfs_disk_key *disk)
1395{
1396 cpu->offset = le64_to_cpu(disk->offset);
1397 cpu->type = disk->type;
1398 cpu->objectid = le64_to_cpu(disk->objectid);
1399}
1400
1401static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk,
1402 struct btrfs_key *cpu)
1403{
1404 disk->offset = cpu_to_le64(cpu->offset);
1405 disk->type = cpu->type;
1406 disk->objectid = cpu_to_le64(cpu->objectid);
1407}
1408
1409static inline void btrfs_node_key_to_cpu(struct extent_buffer *eb,
1410 struct btrfs_key *key, int nr)
1411{
1412 struct btrfs_disk_key disk_key;
1413 btrfs_node_key(eb, &disk_key, nr);
1414 btrfs_disk_key_to_cpu(key, &disk_key);
1415}
1416
1417static inline void btrfs_item_key_to_cpu(struct extent_buffer *eb,
1418 struct btrfs_key *key, int nr)
1419{
1420 struct btrfs_disk_key disk_key;
1421 btrfs_item_key(eb, &disk_key, nr);
1422 btrfs_disk_key_to_cpu(key, &disk_key);
1423}
1424
1425static inline void btrfs_dir_item_key_to_cpu(struct extent_buffer *eb,
1426 struct btrfs_dir_item *item,
1427 struct btrfs_key *key)
1428{
1429 struct btrfs_disk_key disk_key;
1430 btrfs_dir_item_key(eb, item, &disk_key);
1431 btrfs_disk_key_to_cpu(key, &disk_key);
1432}
1433
1434
1435static inline u8 btrfs_key_type(struct btrfs_key *key)
1436{
1437 return key->type;
1438}
1439
1440static inline void btrfs_set_key_type(struct btrfs_key *key, u8 val)
1441{
1442 key->type = val;
1443}
1444
1445/* struct btrfs_header */
1446BTRFS_SETGET_HEADER_FUNCS(header_bytenr, struct btrfs_header, bytenr, 64);
1447BTRFS_SETGET_HEADER_FUNCS(header_generation, struct btrfs_header,
1448 generation, 64);
1449BTRFS_SETGET_HEADER_FUNCS(header_owner, struct btrfs_header, owner, 64);
1450BTRFS_SETGET_HEADER_FUNCS(header_nritems, struct btrfs_header, nritems, 32);
1451BTRFS_SETGET_HEADER_FUNCS(header_flags, struct btrfs_header, flags, 64);
1452BTRFS_SETGET_HEADER_FUNCS(header_level, struct btrfs_header, level, 8);
1453
1454static inline int btrfs_header_flag(struct extent_buffer *eb, u64 flag)
1455{
1456 return (btrfs_header_flags(eb) & flag) == flag;
1457}
1458
1459static inline int btrfs_set_header_flag(struct extent_buffer *eb, u64 flag)
1460{
1461 u64 flags = btrfs_header_flags(eb);
1462 btrfs_set_header_flags(eb, flags | flag);
1463 return (flags & flag) == flag;
1464}
1465
1466static inline int btrfs_clear_header_flag(struct extent_buffer *eb, u64 flag)
1467{
1468 u64 flags = btrfs_header_flags(eb);
1469 btrfs_set_header_flags(eb, flags & ~flag);
1470 return (flags & flag) == flag;
1471}
1472
1473static inline u8 *btrfs_header_fsid(struct extent_buffer *eb)
1474{
1475 unsigned long ptr = offsetof(struct btrfs_header, fsid);
1476 return (u8 *)ptr;
1477}
1478
1479static inline u8 *btrfs_header_chunk_tree_uuid(struct extent_buffer *eb)
1480{
1481 unsigned long ptr = offsetof(struct btrfs_header, chunk_tree_uuid);
1482 return (u8 *)ptr;
1483}
1484
1485static inline u8 *btrfs_super_fsid(struct extent_buffer *eb)
1486{
1487 unsigned long ptr = offsetof(struct btrfs_super_block, fsid);
1488 return (u8 *)ptr;
1489}
1490
1491static inline u8 *btrfs_header_csum(struct extent_buffer *eb)
1492{
1493 unsigned long ptr = offsetof(struct btrfs_header, csum);
1494 return (u8 *)ptr;
1495}
1496
1497static inline struct btrfs_node *btrfs_buffer_node(struct extent_buffer *eb)
1498{
1499 return NULL;
1500}
1501
1502static inline struct btrfs_leaf *btrfs_buffer_leaf(struct extent_buffer *eb)
1503{
1504 return NULL;
1505}
1506
1507static inline struct btrfs_header *btrfs_buffer_header(struct extent_buffer *eb)
1508{
1509 return NULL;
1510}
1511
1512static inline int btrfs_is_leaf(struct extent_buffer *eb)
1513{
1514 return btrfs_header_level(eb) == 0;
1515}
1516
1517/* struct btrfs_root_item */
1518BTRFS_SETGET_FUNCS(disk_root_generation, struct btrfs_root_item,
1519 generation, 64);
1520BTRFS_SETGET_FUNCS(disk_root_refs, struct btrfs_root_item, refs, 32);
1521BTRFS_SETGET_FUNCS(disk_root_bytenr, struct btrfs_root_item, bytenr, 64);
1522BTRFS_SETGET_FUNCS(disk_root_level, struct btrfs_root_item, level, 8);
1523
1524BTRFS_SETGET_STACK_FUNCS(root_generation, struct btrfs_root_item,
1525 generation, 64);
1526BTRFS_SETGET_STACK_FUNCS(root_bytenr, struct btrfs_root_item, bytenr, 64);
1527BTRFS_SETGET_STACK_FUNCS(root_level, struct btrfs_root_item, level, 8);
1528BTRFS_SETGET_STACK_FUNCS(root_dirid, struct btrfs_root_item, root_dirid, 64);
1529BTRFS_SETGET_STACK_FUNCS(root_refs, struct btrfs_root_item, refs, 32);
1530BTRFS_SETGET_STACK_FUNCS(root_flags, struct btrfs_root_item, flags, 64);
1531BTRFS_SETGET_STACK_FUNCS(root_used, struct btrfs_root_item, bytes_used, 64);
1532BTRFS_SETGET_STACK_FUNCS(root_limit, struct btrfs_root_item, byte_limit, 64);
1533BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item,
1534 last_snapshot, 64);
1535
1536/* struct btrfs_super_block */
1537
1538BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64);
1539BTRFS_SETGET_STACK_FUNCS(super_flags, struct btrfs_super_block, flags, 64);
1540BTRFS_SETGET_STACK_FUNCS(super_generation, struct btrfs_super_block,
1541 generation, 64);
1542BTRFS_SETGET_STACK_FUNCS(super_root, struct btrfs_super_block, root, 64);
1543BTRFS_SETGET_STACK_FUNCS(super_sys_array_size,
1544 struct btrfs_super_block, sys_chunk_array_size, 32);
1545BTRFS_SETGET_STACK_FUNCS(super_chunk_root_generation,
1546 struct btrfs_super_block, chunk_root_generation, 64);
1547BTRFS_SETGET_STACK_FUNCS(super_root_level, struct btrfs_super_block,
1548 root_level, 8);
1549BTRFS_SETGET_STACK_FUNCS(super_chunk_root, struct btrfs_super_block,
1550 chunk_root, 64);
1551BTRFS_SETGET_STACK_FUNCS(super_chunk_root_level, struct btrfs_super_block,
1552 chunk_root_level, 8);
1553BTRFS_SETGET_STACK_FUNCS(super_log_root, struct btrfs_super_block,
1554 log_root, 64);
1555BTRFS_SETGET_STACK_FUNCS(super_log_root_transid, struct btrfs_super_block,
1556 log_root_transid, 64);
1557BTRFS_SETGET_STACK_FUNCS(super_log_root_level, struct btrfs_super_block,
1558 log_root_level, 8);
1559BTRFS_SETGET_STACK_FUNCS(super_total_bytes, struct btrfs_super_block,
1560 total_bytes, 64);
1561BTRFS_SETGET_STACK_FUNCS(super_bytes_used, struct btrfs_super_block,
1562 bytes_used, 64);
1563BTRFS_SETGET_STACK_FUNCS(super_sectorsize, struct btrfs_super_block,
1564 sectorsize, 32);
1565BTRFS_SETGET_STACK_FUNCS(super_nodesize, struct btrfs_super_block,
1566 nodesize, 32);
1567BTRFS_SETGET_STACK_FUNCS(super_leafsize, struct btrfs_super_block,
1568 leafsize, 32);
1569BTRFS_SETGET_STACK_FUNCS(super_stripesize, struct btrfs_super_block,
1570 stripesize, 32);
1571BTRFS_SETGET_STACK_FUNCS(super_root_dir, struct btrfs_super_block,
1572 root_dir_objectid, 64);
1573BTRFS_SETGET_STACK_FUNCS(super_num_devices, struct btrfs_super_block,
1574 num_devices, 64);
1575BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block,
1576 compat_flags, 64);
1577BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block,
1578 compat_flags, 64);
1579BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block,
1580 incompat_flags, 64);
1581BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block,
1582 csum_type, 16);
1583
1584static inline int btrfs_super_csum_size(struct btrfs_super_block *s)
1585{
1586 int t = btrfs_super_csum_type(s);
1587 BUG_ON(t >= ARRAY_SIZE(btrfs_csum_sizes));
1588 return btrfs_csum_sizes[t];
1589}
1590
1591static inline unsigned long btrfs_leaf_data(struct extent_buffer *l)
1592{
1593 return offsetof(struct btrfs_leaf, items);
1594}
1595
1596/* struct btrfs_file_extent_item */
1597BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8);
1598
1599static inline unsigned long
1600btrfs_file_extent_inline_start(struct btrfs_file_extent_item *e)
1601{
1602 unsigned long offset = (unsigned long)e;
1603 offset += offsetof(struct btrfs_file_extent_item, disk_bytenr);
1604 return offset;
1605}
1606
1607static inline u32 btrfs_file_extent_calc_inline_size(u32 datasize)
1608{
1609 return offsetof(struct btrfs_file_extent_item, disk_bytenr) + datasize;
1610}
1611
1612BTRFS_SETGET_FUNCS(file_extent_disk_bytenr, struct btrfs_file_extent_item,
1613 disk_bytenr, 64);
1614BTRFS_SETGET_FUNCS(file_extent_generation, struct btrfs_file_extent_item,
1615 generation, 64);
1616BTRFS_SETGET_FUNCS(file_extent_disk_num_bytes, struct btrfs_file_extent_item,
1617 disk_num_bytes, 64);
1618BTRFS_SETGET_FUNCS(file_extent_offset, struct btrfs_file_extent_item,
1619 offset, 64);
1620BTRFS_SETGET_FUNCS(file_extent_num_bytes, struct btrfs_file_extent_item,
1621 num_bytes, 64);
1622BTRFS_SETGET_FUNCS(file_extent_ram_bytes, struct btrfs_file_extent_item,
1623 ram_bytes, 64);
1624BTRFS_SETGET_FUNCS(file_extent_compression, struct btrfs_file_extent_item,
1625 compression, 8);
1626BTRFS_SETGET_FUNCS(file_extent_encryption, struct btrfs_file_extent_item,
1627 encryption, 8);
1628BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item,
1629 other_encoding, 16);
1630
1631/* this returns the number of file bytes represented by the inline item.
1632 * If an item is compressed, this is the uncompressed size
1633 */
1634static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb,
1635 struct btrfs_file_extent_item *e)
1636{
1637 return btrfs_file_extent_ram_bytes(eb, e);
1638}
1639
1640/*
1641 * this returns the number of bytes used by the item on disk, minus the
1642 * size of any extent headers. If a file is compressed on disk, this is
1643 * the compressed size
1644 */
1645static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb,
1646 struct btrfs_item *e)
1647{
1648 unsigned long offset;
1649 offset = offsetof(struct btrfs_file_extent_item, disk_bytenr);
1650 return btrfs_item_size(eb, e) - offset;
1651}
1652
1653static inline struct btrfs_root *btrfs_sb(struct super_block *sb)
1654{
1655 return sb->s_fs_info;
1656}
1657
1658static inline int btrfs_set_root_name(struct btrfs_root *root,
1659 const char *name, int len)
1660{
1661 /* if we already have a name just free it */
1662 kfree(root->name);
1663
1664 root->name = kmalloc(len+1, GFP_KERNEL);
1665 if (!root->name)
1666 return -ENOMEM;
1667
1668 memcpy(root->name, name, len);
1669 root->name[len] = '\0';
1670
1671 return 0;
1672}
1673
1674static inline u32 btrfs_level_size(struct btrfs_root *root, int level)
1675{
1676 if (level == 0)
1677 return root->leafsize;
1678 return root->nodesize;
1679}
1680
1681/* helper function to cast into the data area of the leaf. */
1682#define btrfs_item_ptr(leaf, slot, type) \
1683 ((type *)(btrfs_leaf_data(leaf) + \
1684 btrfs_item_offset_nr(leaf, slot)))
1685
1686#define btrfs_item_ptr_offset(leaf, slot) \
1687 ((unsigned long)(btrfs_leaf_data(leaf) + \
1688 btrfs_item_offset_nr(leaf, slot)))
1689
1690static inline struct dentry *fdentry(struct file *file)
1691{
1692 return file->f_path.dentry;
1693}
1694
1695/* extent-tree.c */
1696int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len);
1697int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans,
1698 struct btrfs_root *root, u64 bytenr,
1699 u64 num_bytes, u32 *refs);
1700int btrfs_update_pinned_extents(struct btrfs_root *root,
1701 u64 bytenr, u64 num, int pin);
1702int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
1703 struct btrfs_root *root, struct extent_buffer *leaf);
1704int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
1705 struct btrfs_root *root, u64 objectid, u64 bytenr);
1706int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
1707 struct btrfs_root *root);
1708int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy);
1709struct btrfs_block_group_cache *btrfs_lookup_block_group(
1710 struct btrfs_fs_info *info,
1711 u64 bytenr);
1712u64 btrfs_find_block_group(struct btrfs_root *root,
1713 u64 search_start, u64 search_hint, int owner);
1714struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
1715 struct btrfs_root *root,
1716 u32 blocksize, u64 parent,
1717 u64 root_objectid,
1718 u64 ref_generation,
1719 int level,
1720 u64 hint,
1721 u64 empty_size);
1722struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
1723 struct btrfs_root *root,
1724 u64 bytenr, u32 blocksize);
1725int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
1726 struct btrfs_root *root,
1727 u64 num_bytes, u64 parent, u64 min_bytes,
1728 u64 root_objectid, u64 ref_generation,
1729 u64 owner, u64 empty_size, u64 hint_byte,
1730 u64 search_end, struct btrfs_key *ins, u64 data);
1731int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
1732 struct btrfs_root *root, u64 parent,
1733 u64 root_objectid, u64 ref_generation,
1734 u64 owner, struct btrfs_key *ins);
1735int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
1736 struct btrfs_root *root, u64 parent,
1737 u64 root_objectid, u64 ref_generation,
1738 u64 owner, struct btrfs_key *ins);
1739int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
1740 struct btrfs_root *root,
1741 u64 num_bytes, u64 min_alloc_size,
1742 u64 empty_size, u64 hint_byte,
1743 u64 search_end, struct btrfs_key *ins,
1744 u64 data);
1745int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1746 struct extent_buffer *orig_buf, struct extent_buffer *buf,
1747 u32 *nr_extents);
1748int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1749 struct extent_buffer *buf, u32 nr_extents);
1750int btrfs_update_ref(struct btrfs_trans_handle *trans,
1751 struct btrfs_root *root, struct extent_buffer *orig_buf,
1752 struct extent_buffer *buf, int start_slot, int nr);
1753int btrfs_free_extent(struct btrfs_trans_handle *trans,
1754 struct btrfs_root *root,
1755 u64 bytenr, u64 num_bytes, u64 parent,
1756 u64 root_objectid, u64 ref_generation,
1757 u64 owner_objectid, int pin);
1758int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);
1759int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
1760 struct btrfs_root *root,
1761 struct extent_io_tree *unpin);
1762int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1763 struct btrfs_root *root,
1764 u64 bytenr, u64 num_bytes, u64 parent,
1765 u64 root_objectid, u64 ref_generation,
1766 u64 owner_objectid);
1767int btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
1768 struct btrfs_root *root, u64 bytenr,
1769 u64 orig_parent, u64 parent,
1770 u64 root_objectid, u64 ref_generation,
1771 u64 owner_objectid);
1772int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1773 struct btrfs_root *root);
1774int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr);
1775int btrfs_free_block_groups(struct btrfs_fs_info *info);
1776int btrfs_read_block_groups(struct btrfs_root *root);
1777int btrfs_make_block_group(struct btrfs_trans_handle *trans,
1778 struct btrfs_root *root, u64 bytes_used,
1779 u64 type, u64 chunk_objectid, u64 chunk_offset,
1780 u64 size);
1781int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
1782 struct btrfs_root *root, u64 group_start);
1783int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start);
1784int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
1785 struct btrfs_root *root);
1786int btrfs_drop_dead_reloc_roots(struct btrfs_root *root);
1787int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
1788 struct btrfs_root *root,
1789 struct extent_buffer *buf, u64 orig_start);
1790int btrfs_add_dead_reloc_root(struct btrfs_root *root);
1791int btrfs_cleanup_reloc_trees(struct btrfs_root *root);
1792int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len);
1793u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags);
1794/* ctree.c */
1795int btrfs_previous_item(struct btrfs_root *root,
1796 struct btrfs_path *path, u64 min_objectid,
1797 int type);
1798int btrfs_merge_path(struct btrfs_trans_handle *trans,
1799 struct btrfs_root *root,
1800 struct btrfs_key *node_keys,
1801 u64 *nodes, int lowest_level);
1802int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1803 struct btrfs_root *root, struct btrfs_path *path,
1804 struct btrfs_key *new_key);
1805struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
1806struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
1807int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
1808 struct btrfs_key *key, int lowest_level,
1809 int cache_only, u64 min_trans);
1810int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
1811 struct btrfs_key *max_key,
1812 struct btrfs_path *path, int cache_only,
1813 u64 min_trans);
1814int btrfs_cow_block(struct btrfs_trans_handle *trans,
1815 struct btrfs_root *root, struct extent_buffer *buf,
1816 struct extent_buffer *parent, int parent_slot,
1817 struct extent_buffer **cow_ret, u64 prealloc_dest);
1818int btrfs_copy_root(struct btrfs_trans_handle *trans,
1819 struct btrfs_root *root,
1820 struct extent_buffer *buf,
1821 struct extent_buffer **cow_ret, u64 new_root_objectid);
1822int btrfs_extend_item(struct btrfs_trans_handle *trans, struct btrfs_root
1823 *root, struct btrfs_path *path, u32 data_size);
1824int btrfs_truncate_item(struct btrfs_trans_handle *trans,
1825 struct btrfs_root *root,
1826 struct btrfs_path *path,
1827 u32 new_size, int from_end);
1828int btrfs_split_item(struct btrfs_trans_handle *trans,
1829 struct btrfs_root *root,
1830 struct btrfs_path *path,
1831 struct btrfs_key *new_key,
1832 unsigned long split_offset);
1833int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1834 *root, struct btrfs_key *key, struct btrfs_path *p, int
1835 ins_len, int cow);
1836int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1837 struct btrfs_root *root, struct extent_buffer *parent,
1838 int start_slot, int cache_only, u64 *last_ret,
1839 struct btrfs_key *progress);
1840void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p);
1841struct btrfs_path *btrfs_alloc_path(void);
1842void btrfs_free_path(struct btrfs_path *p);
1843void btrfs_init_path(struct btrfs_path *p);
1844int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1845 struct btrfs_path *path, int slot, int nr);
1846int btrfs_del_leaf(struct btrfs_trans_handle *trans,
1847 struct btrfs_root *root,
1848 struct btrfs_path *path, u64 bytenr);
1849static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
1850 struct btrfs_root *root,
1851 struct btrfs_path *path)
1852{
1853 return btrfs_del_items(trans, root, path, path->slots[0], 1);
1854}
1855
1856int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
1857 *root, struct btrfs_key *key, void *data, u32 data_size);
1858int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
1859 struct btrfs_root *root,
1860 struct btrfs_path *path,
1861 struct btrfs_key *cpu_key, u32 *data_size,
1862 int nr);
1863int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
1864 struct btrfs_root *root,
1865 struct btrfs_path *path,
1866 struct btrfs_key *cpu_key, u32 *data_size, int nr);
1867
1868static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
1869 struct btrfs_root *root,
1870 struct btrfs_path *path,
1871 struct btrfs_key *key,
1872 u32 data_size)
1873{
1874 return btrfs_insert_empty_items(trans, root, path, key, &data_size, 1);
1875}
1876
1877int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
1878int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
1879int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
1880int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
1881 *root);
1882int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
1883 struct btrfs_root *root,
1884 struct extent_buffer *node,
1885 struct extent_buffer *parent);
1886/* root-item.c */
1887int btrfs_find_root_ref(struct btrfs_root *tree_root,
1888 struct btrfs_path *path,
1889 u64 root_id, u64 ref_id);
1890int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
1891 struct btrfs_root *tree_root,
1892 u64 root_id, u8 type, u64 ref_id,
1893 u64 dirid, u64 sequence,
1894 const char *name, int name_len);
1895int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1896 struct btrfs_key *key);
1897int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root
1898 *root, struct btrfs_key *key, struct btrfs_root_item
1899 *item);
1900int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
1901 *root, struct btrfs_key *key, struct btrfs_root_item
1902 *item);
1903int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
1904 btrfs_root_item *item, struct btrfs_key *key);
1905int btrfs_search_root(struct btrfs_root *root, u64 search_start,
1906 u64 *found_objectid);
1907int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid,
1908 struct btrfs_root *latest_root);
1909/* dir-item.c */
1910int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
1911 struct btrfs_root *root, const char *name,
1912 int name_len, u64 dir,
1913 struct btrfs_key *location, u8 type, u64 index);
1914struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
1915 struct btrfs_root *root,
1916 struct btrfs_path *path, u64 dir,
1917 const char *name, int name_len,
1918 int mod);
1919struct btrfs_dir_item *
1920btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
1921 struct btrfs_root *root,
1922 struct btrfs_path *path, u64 dir,
1923 u64 objectid, const char *name, int name_len,
1924 int mod);
1925struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
1926 struct btrfs_path *path,
1927 const char *name, int name_len);
1928int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
1929 struct btrfs_root *root,
1930 struct btrfs_path *path,
1931 struct btrfs_dir_item *di);
1932int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
1933 struct btrfs_root *root, const char *name,
1934 u16 name_len, const void *data, u16 data_len,
1935 u64 dir);
1936struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
1937 struct btrfs_root *root,
1938 struct btrfs_path *path, u64 dir,
1939 const char *name, u16 name_len,
1940 int mod);
1941
1942/* orphan.c */
1943int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
1944 struct btrfs_root *root, u64 offset);
1945int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
1946 struct btrfs_root *root, u64 offset);
1947
1948/* inode-map.c */
1949int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
1950 struct btrfs_root *fs_root,
1951 u64 dirid, u64 *objectid);
1952int btrfs_find_highest_inode(struct btrfs_root *fs_root, u64 *objectid);
1953
1954/* inode-item.c */
1955int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
1956 struct btrfs_root *root,
1957 const char *name, int name_len,
1958 u64 inode_objectid, u64 ref_objectid, u64 index);
1959int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
1960 struct btrfs_root *root,
1961 const char *name, int name_len,
1962 u64 inode_objectid, u64 ref_objectid, u64 *index);
1963int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans,
1964 struct btrfs_root *root,
1965 struct btrfs_path *path, u64 objectid);
1966int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root
1967 *root, struct btrfs_path *path,
1968 struct btrfs_key *location, int mod);
1969
1970/* file-item.c */
1971int btrfs_del_csums(struct btrfs_trans_handle *trans,
1972 struct btrfs_root *root, u64 bytenr, u64 len);
1973int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
1974 struct bio *bio, u32 *dst);
1975int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
1976 struct btrfs_root *root,
1977 u64 objectid, u64 pos,
1978 u64 disk_offset, u64 disk_num_bytes,
1979 u64 num_bytes, u64 offset, u64 ram_bytes,
1980 u8 compression, u8 encryption, u16 other_encoding);
1981int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
1982 struct btrfs_root *root,
1983 struct btrfs_path *path, u64 objectid,
1984 u64 bytenr, int mod);
1985int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
1986 struct btrfs_root *root,
1987 struct btrfs_ordered_sum *sums);
1988int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
1989 struct bio *bio, u64 file_start, int contig);
1990int btrfs_csum_file_bytes(struct btrfs_root *root, struct inode *inode,
1991 u64 start, unsigned long len);
1992struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
1993 struct btrfs_root *root,
1994 struct btrfs_path *path,
1995 u64 bytenr, int cow);
1996int btrfs_csum_truncate(struct btrfs_trans_handle *trans,
1997 struct btrfs_root *root, struct btrfs_path *path,
1998 u64 isize);
1999int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start,
2000 u64 end, struct list_head *list);
2001/* inode.c */
2002
2003/* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */
2004#if defined(ClearPageFsMisc) && !defined(ClearPageChecked)
2005#define ClearPageChecked ClearPageFsMisc
2006#define SetPageChecked SetPageFsMisc
2007#define PageChecked PageFsMisc
2008#endif
2009
2010struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
2011int btrfs_set_inode_index(struct inode *dir, u64 *index);
2012int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2013 struct btrfs_root *root,
2014 struct inode *dir, struct inode *inode,
2015 const char *name, int name_len);
2016int btrfs_add_link(struct btrfs_trans_handle *trans,
2017 struct inode *parent_inode, struct inode *inode,
2018 const char *name, int name_len, int add_backref, u64 index);
2019int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2020 struct btrfs_root *root,
2021 struct inode *inode, u64 new_size,
2022 u32 min_type);
2023
2024int btrfs_start_delalloc_inodes(struct btrfs_root *root);
2025int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end);
2026int btrfs_writepages(struct address_space *mapping,
2027 struct writeback_control *wbc);
2028int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
2029 struct btrfs_root *new_root, struct dentry *dentry,
2030 u64 new_dirid, u64 alloc_hint);
2031int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
2032 size_t size, struct bio *bio, unsigned long bio_flags);
2033
2034unsigned long btrfs_force_ra(struct address_space *mapping,
2035 struct file_ra_state *ra, struct file *file,
2036 pgoff_t offset, pgoff_t last_index);
2037int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
2038 int for_del);
2039int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page);
2040int btrfs_readpage(struct file *file, struct page *page);
2041void btrfs_delete_inode(struct inode *inode);
2042void btrfs_put_inode(struct inode *inode);
2043void btrfs_read_locked_inode(struct inode *inode);
2044int btrfs_write_inode(struct inode *inode, int wait);
2045void btrfs_dirty_inode(struct inode *inode);
2046struct inode *btrfs_alloc_inode(struct super_block *sb);
2047void btrfs_destroy_inode(struct inode *inode);
2048int btrfs_init_cachep(void);
2049void btrfs_destroy_cachep(void);
2050long btrfs_ioctl_trans_end(struct file *file);
2051struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
2052 struct btrfs_root *root, int wait);
2053struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
2054 struct btrfs_root *root);
2055struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
2056 struct btrfs_root *root, int *is_new);
2057int btrfs_commit_write(struct file *file, struct page *page,
2058 unsigned from, unsigned to);
2059struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
2060 size_t page_offset, u64 start, u64 end,
2061 int create);
2062int btrfs_update_inode(struct btrfs_trans_handle *trans,
2063 struct btrfs_root *root,
2064 struct inode *inode);
2065int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
2066int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
2067void btrfs_orphan_cleanup(struct btrfs_root *root);
2068int btrfs_cont_expand(struct inode *inode, loff_t size);
2069
2070/* ioctl.c */
2071long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
2072
2073/* file.c */
2074int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync);
2075int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
2076 int skip_pinned);
2077int btrfs_check_file(struct btrfs_root *root, struct inode *inode);
2078extern struct file_operations btrfs_file_operations;
2079int btrfs_drop_extents(struct btrfs_trans_handle *trans,
2080 struct btrfs_root *root, struct inode *inode,
2081 u64 start, u64 end, u64 inline_limit, u64 *hint_block);
2082int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
2083 struct btrfs_root *root,
2084 struct inode *inode, u64 start, u64 end);
2085int btrfs_release_file(struct inode *inode, struct file *file);
2086
2087/* tree-defrag.c */
2088int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
2089 struct btrfs_root *root, int cache_only);
2090
2091/* sysfs.c */
2092int btrfs_init_sysfs(void);
2093void btrfs_exit_sysfs(void);
2094int btrfs_sysfs_add_super(struct btrfs_fs_info *fs);
2095int btrfs_sysfs_add_root(struct btrfs_root *root);
2096void btrfs_sysfs_del_root(struct btrfs_root *root);
2097void btrfs_sysfs_del_super(struct btrfs_fs_info *root);
2098
2099/* xattr.c */
2100ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
2101
2102/* super.c */
2103u64 btrfs_parse_size(char *str);
2104int btrfs_parse_options(struct btrfs_root *root, char *options);
2105int btrfs_sync_fs(struct super_block *sb, int wait);
2106
2107/* acl.c */
2108int btrfs_check_acl(struct inode *inode, int mask);
2109int btrfs_init_acl(struct inode *inode, struct inode *dir);
2110int btrfs_acl_chmod(struct inode *inode);
2111
2112/* free-space-cache.c */
2113int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
2114 u64 bytenr, u64 size);
2115int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group,
2116 u64 offset, u64 bytes);
2117int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
2118 u64 bytenr, u64 size);
2119int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group,
2120 u64 offset, u64 bytes);
2121void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
2122 *block_group);
2123struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache
2124 *block_group, u64 offset,
2125 u64 bytes);
2126void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
2127 u64 bytes);
2128u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group);
2129#endif
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
new file mode 100644
index 000000000000..926a0b287a7d
--- /dev/null
+++ b/fs/btrfs/dir-item.c
@@ -0,0 +1,386 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include "ctree.h"
20#include "disk-io.h"
21#include "hash.h"
22#include "transaction.h"
23
24/*
25 * insert a name into a directory, doing overflow properly if there is a hash
26 * collision. data_size indicates how big the item inserted should be. On
27 * success a struct btrfs_dir_item pointer is returned, otherwise it is
28 * an ERR_PTR.
29 *
30 * The name is not copied into the dir item, you have to do that yourself.
31 */
32static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
33 *trans,
34 struct btrfs_root *root,
35 struct btrfs_path *path,
36 struct btrfs_key *cpu_key,
37 u32 data_size,
38 const char *name,
39 int name_len)
40{
41 int ret;
42 char *ptr;
43 struct btrfs_item *item;
44 struct extent_buffer *leaf;
45
46 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
47 if (ret == -EEXIST) {
48 struct btrfs_dir_item *di;
49 di = btrfs_match_dir_item_name(root, path, name, name_len);
50 if (di)
51 return ERR_PTR(-EEXIST);
52 ret = btrfs_extend_item(trans, root, path, data_size);
53 WARN_ON(ret > 0);
54 }
55 if (ret < 0)
56 return ERR_PTR(ret);
57 WARN_ON(ret > 0);
58 leaf = path->nodes[0];
59 item = btrfs_item_nr(leaf, path->slots[0]);
60 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
61 BUG_ON(data_size > btrfs_item_size(leaf, item));
62 ptr += btrfs_item_size(leaf, item) - data_size;
63 return (struct btrfs_dir_item *)ptr;
64}
65
66/*
67 * xattrs work a lot like directories, this inserts an xattr item
68 * into the tree
69 */
70int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
71 struct btrfs_root *root, const char *name,
72 u16 name_len, const void *data, u16 data_len,
73 u64 dir)
74{
75 int ret = 0;
76 struct btrfs_path *path;
77 struct btrfs_dir_item *dir_item;
78 unsigned long name_ptr, data_ptr;
79 struct btrfs_key key, location;
80 struct btrfs_disk_key disk_key;
81 struct extent_buffer *leaf;
82 u32 data_size;
83
84 key.objectid = dir;
85 btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
86 key.offset = btrfs_name_hash(name, name_len);
87 path = btrfs_alloc_path();
88 if (!path)
89 return -ENOMEM;
90 if (name_len + data_len + sizeof(struct btrfs_dir_item) >
91 BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item))
92 return -ENOSPC;
93
94 data_size = sizeof(*dir_item) + name_len + data_len;
95 dir_item = insert_with_overflow(trans, root, path, &key, data_size,
96 name, name_len);
97 /*
98 * FIXME: at some point we should handle xattr's that are larger than
99 * what we can fit in our leaf. We set location to NULL b/c we arent
100 * pointing at anything else, that will change if we store the xattr
101 * data in a separate inode.
102 */
103 BUG_ON(IS_ERR(dir_item));
104 memset(&location, 0, sizeof(location));
105
106 leaf = path->nodes[0];
107 btrfs_cpu_key_to_disk(&disk_key, &location);
108 btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
109 btrfs_set_dir_type(leaf, dir_item, BTRFS_FT_XATTR);
110 btrfs_set_dir_name_len(leaf, dir_item, name_len);
111 btrfs_set_dir_transid(leaf, dir_item, trans->transid);
112 btrfs_set_dir_data_len(leaf, dir_item, data_len);
113 name_ptr = (unsigned long)(dir_item + 1);
114 data_ptr = (unsigned long)((char *)name_ptr + name_len);
115
116 write_extent_buffer(leaf, name, name_ptr, name_len);
117 write_extent_buffer(leaf, data, data_ptr, data_len);
118 btrfs_mark_buffer_dirty(path->nodes[0]);
119
120 btrfs_free_path(path);
121 return ret;
122}
123
124/*
125 * insert a directory item in the tree, doing all the magic for
126 * both indexes. 'dir' indicates which objectid to insert it into,
127 * 'location' is the key to stuff into the directory item, 'type' is the
128 * type of the inode we're pointing to, and 'index' is the sequence number
129 * to use for the second index (if one is created).
130 */
131int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
132 *root, const char *name, int name_len, u64 dir,
133 struct btrfs_key *location, u8 type, u64 index)
134{
135 int ret = 0;
136 int ret2 = 0;
137 struct btrfs_path *path;
138 struct btrfs_dir_item *dir_item;
139 struct extent_buffer *leaf;
140 unsigned long name_ptr;
141 struct btrfs_key key;
142 struct btrfs_disk_key disk_key;
143 u32 data_size;
144
145 key.objectid = dir;
146 btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
147 key.offset = btrfs_name_hash(name, name_len);
148 path = btrfs_alloc_path();
149 data_size = sizeof(*dir_item) + name_len;
150 dir_item = insert_with_overflow(trans, root, path, &key, data_size,
151 name, name_len);
152 if (IS_ERR(dir_item)) {
153 ret = PTR_ERR(dir_item);
154 if (ret == -EEXIST)
155 goto second_insert;
156 goto out;
157 }
158
159 leaf = path->nodes[0];
160 btrfs_cpu_key_to_disk(&disk_key, location);
161 btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
162 btrfs_set_dir_type(leaf, dir_item, type);
163 btrfs_set_dir_data_len(leaf, dir_item, 0);
164 btrfs_set_dir_name_len(leaf, dir_item, name_len);
165 btrfs_set_dir_transid(leaf, dir_item, trans->transid);
166 name_ptr = (unsigned long)(dir_item + 1);
167
168 write_extent_buffer(leaf, name, name_ptr, name_len);
169 btrfs_mark_buffer_dirty(leaf);
170
171second_insert:
172 /* FIXME, use some real flag for selecting the extra index */
173 if (root == root->fs_info->tree_root) {
174 ret = 0;
175 goto out;
176 }
177 btrfs_release_path(root, path);
178
179 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
180 key.offset = index;
181 dir_item = insert_with_overflow(trans, root, path, &key, data_size,
182 name, name_len);
183 if (IS_ERR(dir_item)) {
184 ret2 = PTR_ERR(dir_item);
185 goto out;
186 }
187 leaf = path->nodes[0];
188 btrfs_cpu_key_to_disk(&disk_key, location);
189 btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
190 btrfs_set_dir_type(leaf, dir_item, type);
191 btrfs_set_dir_data_len(leaf, dir_item, 0);
192 btrfs_set_dir_name_len(leaf, dir_item, name_len);
193 btrfs_set_dir_transid(leaf, dir_item, trans->transid);
194 name_ptr = (unsigned long)(dir_item + 1);
195 write_extent_buffer(leaf, name, name_ptr, name_len);
196 btrfs_mark_buffer_dirty(leaf);
197out:
198 btrfs_free_path(path);
199 if (ret)
200 return ret;
201 if (ret2)
202 return ret2;
203 return 0;
204}
205
206/*
207 * lookup a directory item based on name. 'dir' is the objectid
208 * we're searching in, and 'mod' tells us if you plan on deleting the
209 * item (use mod < 0) or changing the options (use mod > 0)
210 */
211struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
212 struct btrfs_root *root,
213 struct btrfs_path *path, u64 dir,
214 const char *name, int name_len,
215 int mod)
216{
217 int ret;
218 struct btrfs_key key;
219 int ins_len = mod < 0 ? -1 : 0;
220 int cow = mod != 0;
221 struct btrfs_key found_key;
222 struct extent_buffer *leaf;
223
224 key.objectid = dir;
225 btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
226
227 key.offset = btrfs_name_hash(name, name_len);
228
229 ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
230 if (ret < 0)
231 return ERR_PTR(ret);
232 if (ret > 0) {
233 if (path->slots[0] == 0)
234 return NULL;
235 path->slots[0]--;
236 }
237
238 leaf = path->nodes[0];
239 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
240
241 if (found_key.objectid != dir ||
242 btrfs_key_type(&found_key) != BTRFS_DIR_ITEM_KEY ||
243 found_key.offset != key.offset)
244 return NULL;
245
246 return btrfs_match_dir_item_name(root, path, name, name_len);
247}
248
249/*
250 * lookup a directory item based on index. 'dir' is the objectid
251 * we're searching in, and 'mod' tells us if you plan on deleting the
252 * item (use mod < 0) or changing the options (use mod > 0)
253 *
254 * The name is used to make sure the index really points to the name you were
255 * looking for.
256 */
257struct btrfs_dir_item *
258btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
259 struct btrfs_root *root,
260 struct btrfs_path *path, u64 dir,
261 u64 objectid, const char *name, int name_len,
262 int mod)
263{
264 int ret;
265 struct btrfs_key key;
266 int ins_len = mod < 0 ? -1 : 0;
267 int cow = mod != 0;
268
269 key.objectid = dir;
270 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
271 key.offset = objectid;
272
273 ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
274 if (ret < 0)
275 return ERR_PTR(ret);
276 if (ret > 0)
277 return ERR_PTR(-ENOENT);
278 return btrfs_match_dir_item_name(root, path, name, name_len);
279}
280
281struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
282 struct btrfs_root *root,
283 struct btrfs_path *path, u64 dir,
284 const char *name, u16 name_len,
285 int mod)
286{
287 int ret;
288 struct btrfs_key key;
289 int ins_len = mod < 0 ? -1 : 0;
290 int cow = mod != 0;
291 struct btrfs_key found_key;
292 struct extent_buffer *leaf;
293
294 key.objectid = dir;
295 btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
296 key.offset = btrfs_name_hash(name, name_len);
297 ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
298 if (ret < 0)
299 return ERR_PTR(ret);
300 if (ret > 0) {
301 if (path->slots[0] == 0)
302 return NULL;
303 path->slots[0]--;
304 }
305
306 leaf = path->nodes[0];
307 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
308
309 if (found_key.objectid != dir ||
310 btrfs_key_type(&found_key) != BTRFS_XATTR_ITEM_KEY ||
311 found_key.offset != key.offset)
312 return NULL;
313
314 return btrfs_match_dir_item_name(root, path, name, name_len);
315}
316
317/*
318 * helper function to look at the directory item pointed to by 'path'
319 * this walks through all the entries in a dir item and finds one
320 * for a specific name.
321 */
322struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
323 struct btrfs_path *path,
324 const char *name, int name_len)
325{
326 struct btrfs_dir_item *dir_item;
327 unsigned long name_ptr;
328 u32 total_len;
329 u32 cur = 0;
330 u32 this_len;
331 struct extent_buffer *leaf;
332
333 leaf = path->nodes[0];
334 dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
335 total_len = btrfs_item_size_nr(leaf, path->slots[0]);
336 while (cur < total_len) {
337 this_len = sizeof(*dir_item) +
338 btrfs_dir_name_len(leaf, dir_item) +
339 btrfs_dir_data_len(leaf, dir_item);
340 name_ptr = (unsigned long)(dir_item + 1);
341
342 if (btrfs_dir_name_len(leaf, dir_item) == name_len &&
343 memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)
344 return dir_item;
345
346 cur += this_len;
347 dir_item = (struct btrfs_dir_item *)((char *)dir_item +
348 this_len);
349 }
350 return NULL;
351}
352
353/*
354 * given a pointer into a directory item, delete it. This
355 * handles items that have more than one entry in them.
356 */
357int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
358 struct btrfs_root *root,
359 struct btrfs_path *path,
360 struct btrfs_dir_item *di)
361{
362
363 struct extent_buffer *leaf;
364 u32 sub_item_len;
365 u32 item_len;
366 int ret = 0;
367
368 leaf = path->nodes[0];
369 sub_item_len = sizeof(*di) + btrfs_dir_name_len(leaf, di) +
370 btrfs_dir_data_len(leaf, di);
371 item_len = btrfs_item_size_nr(leaf, path->slots[0]);
372 if (sub_item_len == item_len) {
373 ret = btrfs_del_item(trans, root, path);
374 } else {
375 /* MARKER */
376 unsigned long ptr = (unsigned long)di;
377 unsigned long start;
378
379 start = btrfs_item_ptr_offset(leaf, path->slots[0]);
380 memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
381 item_len - (ptr + sub_item_len - start));
382 ret = btrfs_truncate_item(trans, root, path,
383 item_len - sub_item_len, 1);
384 }
385 return 0;
386}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
new file mode 100644
index 000000000000..81a313874ae5
--- /dev/null
+++ b/fs/btrfs/disk-io.c
@@ -0,0 +1,2343 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/version.h>
20#include <linux/fs.h>
21#include <linux/blkdev.h>
22#include <linux/scatterlist.h>
23#include <linux/swap.h>
24#include <linux/radix-tree.h>
25#include <linux/writeback.h>
26#include <linux/buffer_head.h>
27#include <linux/workqueue.h>
28#include <linux/kthread.h>
29#include <linux/freezer.h>
30#include "compat.h"
31#include "crc32c.h"
32#include "ctree.h"
33#include "disk-io.h"
34#include "transaction.h"
35#include "btrfs_inode.h"
36#include "volumes.h"
37#include "print-tree.h"
38#include "async-thread.h"
39#include "locking.h"
40#include "ref-cache.h"
41#include "tree-log.h"
42
43static struct extent_io_ops btree_extent_io_ops;
44static void end_workqueue_fn(struct btrfs_work *work);
45
46/*
47 * end_io_wq structs are used to do processing in task context when an IO is
48 * complete. This is used during reads to verify checksums, and it is used
49 * by writes to insert metadata for new file extents after IO is complete.
50 */
51struct end_io_wq {
52 struct bio *bio;
53 bio_end_io_t *end_io;
54 void *private;
55 struct btrfs_fs_info *info;
56 int error;
57 int metadata;
58 struct list_head list;
59 struct btrfs_work work;
60};
61
62/*
63 * async submit bios are used to offload expensive checksumming
64 * onto the worker threads. They checksum file and metadata bios
65 * just before they are sent down the IO stack.
66 */
67struct async_submit_bio {
68 struct inode *inode;
69 struct bio *bio;
70 struct list_head list;
71 extent_submit_bio_hook_t *submit_bio_start;
72 extent_submit_bio_hook_t *submit_bio_done;
73 int rw;
74 int mirror_num;
75 unsigned long bio_flags;
76 struct btrfs_work work;
77};
78
79/*
80 * extents on the btree inode are pretty simple, there's one extent
81 * that covers the entire device
82 */
83static struct extent_map *btree_get_extent(struct inode *inode,
84 struct page *page, size_t page_offset, u64 start, u64 len,
85 int create)
86{
87 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
88 struct extent_map *em;
89 int ret;
90
91 spin_lock(&em_tree->lock);
92 em = lookup_extent_mapping(em_tree, start, len);
93 if (em) {
94 em->bdev =
95 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
96 spin_unlock(&em_tree->lock);
97 goto out;
98 }
99 spin_unlock(&em_tree->lock);
100
101 em = alloc_extent_map(GFP_NOFS);
102 if (!em) {
103 em = ERR_PTR(-ENOMEM);
104 goto out;
105 }
106 em->start = 0;
107 em->len = (u64)-1;
108 em->block_len = (u64)-1;
109 em->block_start = 0;
110 em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
111
112 spin_lock(&em_tree->lock);
113 ret = add_extent_mapping(em_tree, em);
114 if (ret == -EEXIST) {
115 u64 failed_start = em->start;
116 u64 failed_len = em->len;
117
118 free_extent_map(em);
119 em = lookup_extent_mapping(em_tree, start, len);
120 if (em) {
121 ret = 0;
122 } else {
123 em = lookup_extent_mapping(em_tree, failed_start,
124 failed_len);
125 ret = -EIO;
126 }
127 } else if (ret) {
128 free_extent_map(em);
129 em = NULL;
130 }
131 spin_unlock(&em_tree->lock);
132
133 if (ret)
134 em = ERR_PTR(ret);
135out:
136 return em;
137}
138
139u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
140{
141 return btrfs_crc32c(seed, data, len);
142}
143
144void btrfs_csum_final(u32 crc, char *result)
145{
146 *(__le32 *)result = ~cpu_to_le32(crc);
147}
148
149/*
150 * compute the csum for a btree block, and either verify it or write it
151 * into the csum field of the block.
152 */
153static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
154 int verify)
155{
156 u16 csum_size =
157 btrfs_super_csum_size(&root->fs_info->super_copy);
158 char *result = NULL;
159 unsigned long len;
160 unsigned long cur_len;
161 unsigned long offset = BTRFS_CSUM_SIZE;
162 char *map_token = NULL;
163 char *kaddr;
164 unsigned long map_start;
165 unsigned long map_len;
166 int err;
167 u32 crc = ~(u32)0;
168 unsigned long inline_result;
169
170 len = buf->len - offset;
171 while (len > 0) {
172 err = map_private_extent_buffer(buf, offset, 32,
173 &map_token, &kaddr,
174 &map_start, &map_len, KM_USER0);
175 if (err)
176 return 1;
177 cur_len = min(len, map_len - (offset - map_start));
178 crc = btrfs_csum_data(root, kaddr + offset - map_start,
179 crc, cur_len);
180 len -= cur_len;
181 offset += cur_len;
182 unmap_extent_buffer(buf, map_token, KM_USER0);
183 }
184 if (csum_size > sizeof(inline_result)) {
185 result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
186 if (!result)
187 return 1;
188 } else {
189 result = (char *)&inline_result;
190 }
191
192 btrfs_csum_final(crc, result);
193
194 if (verify) {
195 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
196 u32 val;
197 u32 found = 0;
198 memcpy(&found, result, csum_size);
199
200 read_extent_buffer(buf, &val, 0, csum_size);
201 printk(KERN_INFO "btrfs: %s checksum verify failed "
202 "on %llu wanted %X found %X level %d\n",
203 root->fs_info->sb->s_id,
204 buf->start, val, found, btrfs_header_level(buf));
205 if (result != (char *)&inline_result)
206 kfree(result);
207 return 1;
208 }
209 } else {
210 write_extent_buffer(buf, result, 0, csum_size);
211 }
212 if (result != (char *)&inline_result)
213 kfree(result);
214 return 0;
215}
216
217/*
218 * we can't consider a given block up to date unless the transid of the
219 * block matches the transid in the parent node's pointer. This is how we
220 * detect blocks that either didn't get written at all or got written
221 * in the wrong place.
222 */
223static int verify_parent_transid(struct extent_io_tree *io_tree,
224 struct extent_buffer *eb, u64 parent_transid)
225{
226 int ret;
227
228 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
229 return 0;
230
231 lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS);
232 if (extent_buffer_uptodate(io_tree, eb) &&
233 btrfs_header_generation(eb) == parent_transid) {
234 ret = 0;
235 goto out;
236 }
237 printk("parent transid verify failed on %llu wanted %llu found %llu\n",
238 (unsigned long long)eb->start,
239 (unsigned long long)parent_transid,
240 (unsigned long long)btrfs_header_generation(eb));
241 ret = 1;
242 clear_extent_buffer_uptodate(io_tree, eb);
243out:
244 unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
245 GFP_NOFS);
246 return ret;
247}
248
249/*
250 * helper to read a given tree block, doing retries as required when
251 * the checksums don't match and we have alternate mirrors to try.
252 */
253static int btree_read_extent_buffer_pages(struct btrfs_root *root,
254 struct extent_buffer *eb,
255 u64 start, u64 parent_transid)
256{
257 struct extent_io_tree *io_tree;
258 int ret;
259 int num_copies = 0;
260 int mirror_num = 0;
261
262 io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
263 while (1) {
264 ret = read_extent_buffer_pages(io_tree, eb, start, 1,
265 btree_get_extent, mirror_num);
266 if (!ret &&
267 !verify_parent_transid(io_tree, eb, parent_transid))
268 return ret;
269
270 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
271 eb->start, eb->len);
272 if (num_copies == 1)
273 return ret;
274
275 mirror_num++;
276 if (mirror_num > num_copies)
277 return ret;
278 }
279 return -EIO;
280}
281
282/*
283 * checksum a dirty tree block before IO. This has extra checks to make sure
284 * we only fill in the checksum field in the first page of a multi-page block
285 */
286
287static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
288{
289 struct extent_io_tree *tree;
290 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
291 u64 found_start;
292 int found_level;
293 unsigned long len;
294 struct extent_buffer *eb;
295 int ret;
296
297 tree = &BTRFS_I(page->mapping->host)->io_tree;
298
299 if (page->private == EXTENT_PAGE_PRIVATE)
300 goto out;
301 if (!page->private)
302 goto out;
303 len = page->private >> 2;
304 WARN_ON(len == 0);
305
306 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
307 ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
308 btrfs_header_generation(eb));
309 BUG_ON(ret);
310 found_start = btrfs_header_bytenr(eb);
311 if (found_start != start) {
312 WARN_ON(1);
313 goto err;
314 }
315 if (eb->first_page != page) {
316 WARN_ON(1);
317 goto err;
318 }
319 if (!PageUptodate(page)) {
320 WARN_ON(1);
321 goto err;
322 }
323 found_level = btrfs_header_level(eb);
324
325 csum_tree_block(root, eb, 0);
326err:
327 free_extent_buffer(eb);
328out:
329 return 0;
330}
331
332static int check_tree_block_fsid(struct btrfs_root *root,
333 struct extent_buffer *eb)
334{
335 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
336 u8 fsid[BTRFS_UUID_SIZE];
337 int ret = 1;
338
339 read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
340 BTRFS_FSID_SIZE);
341 while (fs_devices) {
342 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
343 ret = 0;
344 break;
345 }
346 fs_devices = fs_devices->seed;
347 }
348 return ret;
349}
350
351static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
352 struct extent_state *state)
353{
354 struct extent_io_tree *tree;
355 u64 found_start;
356 int found_level;
357 unsigned long len;
358 struct extent_buffer *eb;
359 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
360 int ret = 0;
361
362 tree = &BTRFS_I(page->mapping->host)->io_tree;
363 if (page->private == EXTENT_PAGE_PRIVATE)
364 goto out;
365 if (!page->private)
366 goto out;
367
368 len = page->private >> 2;
369 WARN_ON(len == 0);
370
371 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
372
373 found_start = btrfs_header_bytenr(eb);
374 if (found_start != start) {
375 printk(KERN_INFO "btrfs bad tree block start %llu %llu\n",
376 (unsigned long long)found_start,
377 (unsigned long long)eb->start);
378 ret = -EIO;
379 goto err;
380 }
381 if (eb->first_page != page) {
382 printk(KERN_INFO "btrfs bad first page %lu %lu\n",
383 eb->first_page->index, page->index);
384 WARN_ON(1);
385 ret = -EIO;
386 goto err;
387 }
388 if (check_tree_block_fsid(root, eb)) {
389 printk(KERN_INFO "btrfs bad fsid on block %llu\n",
390 (unsigned long long)eb->start);
391 ret = -EIO;
392 goto err;
393 }
394 found_level = btrfs_header_level(eb);
395
396 ret = csum_tree_block(root, eb, 1);
397 if (ret)
398 ret = -EIO;
399
400 end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
401 end = eb->start + end - 1;
402err:
403 free_extent_buffer(eb);
404out:
405 return ret;
406}
407
408static void end_workqueue_bio(struct bio *bio, int err)
409{
410 struct end_io_wq *end_io_wq = bio->bi_private;
411 struct btrfs_fs_info *fs_info;
412
413 fs_info = end_io_wq->info;
414 end_io_wq->error = err;
415 end_io_wq->work.func = end_workqueue_fn;
416 end_io_wq->work.flags = 0;
417
418 if (bio->bi_rw & (1 << BIO_RW)) {
419 if (end_io_wq->metadata)
420 btrfs_queue_worker(&fs_info->endio_meta_write_workers,
421 &end_io_wq->work);
422 else
423 btrfs_queue_worker(&fs_info->endio_write_workers,
424 &end_io_wq->work);
425 } else {
426 if (end_io_wq->metadata)
427 btrfs_queue_worker(&fs_info->endio_meta_workers,
428 &end_io_wq->work);
429 else
430 btrfs_queue_worker(&fs_info->endio_workers,
431 &end_io_wq->work);
432 }
433}
434
435int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
436 int metadata)
437{
438 struct end_io_wq *end_io_wq;
439 end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
440 if (!end_io_wq)
441 return -ENOMEM;
442
443 end_io_wq->private = bio->bi_private;
444 end_io_wq->end_io = bio->bi_end_io;
445 end_io_wq->info = info;
446 end_io_wq->error = 0;
447 end_io_wq->bio = bio;
448 end_io_wq->metadata = metadata;
449
450 bio->bi_private = end_io_wq;
451 bio->bi_end_io = end_workqueue_bio;
452 return 0;
453}
454
455unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
456{
457 unsigned long limit = min_t(unsigned long,
458 info->workers.max_workers,
459 info->fs_devices->open_devices);
460 return 256 * limit;
461}
462
463int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
464{
465 return atomic_read(&info->nr_async_bios) >
466 btrfs_async_submit_limit(info);
467}
468
469static void run_one_async_start(struct btrfs_work *work)
470{
471 struct btrfs_fs_info *fs_info;
472 struct async_submit_bio *async;
473
474 async = container_of(work, struct async_submit_bio, work);
475 fs_info = BTRFS_I(async->inode)->root->fs_info;
476 async->submit_bio_start(async->inode, async->rw, async->bio,
477 async->mirror_num, async->bio_flags);
478}
479
480static void run_one_async_done(struct btrfs_work *work)
481{
482 struct btrfs_fs_info *fs_info;
483 struct async_submit_bio *async;
484 int limit;
485
486 async = container_of(work, struct async_submit_bio, work);
487 fs_info = BTRFS_I(async->inode)->root->fs_info;
488
489 limit = btrfs_async_submit_limit(fs_info);
490 limit = limit * 2 / 3;
491
492 atomic_dec(&fs_info->nr_async_submits);
493
494 if (atomic_read(&fs_info->nr_async_submits) < limit &&
495 waitqueue_active(&fs_info->async_submit_wait))
496 wake_up(&fs_info->async_submit_wait);
497
498 async->submit_bio_done(async->inode, async->rw, async->bio,
499 async->mirror_num, async->bio_flags);
500}
501
502static void run_one_async_free(struct btrfs_work *work)
503{
504 struct async_submit_bio *async;
505
506 async = container_of(work, struct async_submit_bio, work);
507 kfree(async);
508}
509
510int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
511 int rw, struct bio *bio, int mirror_num,
512 unsigned long bio_flags,
513 extent_submit_bio_hook_t *submit_bio_start,
514 extent_submit_bio_hook_t *submit_bio_done)
515{
516 struct async_submit_bio *async;
517
518 async = kmalloc(sizeof(*async), GFP_NOFS);
519 if (!async)
520 return -ENOMEM;
521
522 async->inode = inode;
523 async->rw = rw;
524 async->bio = bio;
525 async->mirror_num = mirror_num;
526 async->submit_bio_start = submit_bio_start;
527 async->submit_bio_done = submit_bio_done;
528
529 async->work.func = run_one_async_start;
530 async->work.ordered_func = run_one_async_done;
531 async->work.ordered_free = run_one_async_free;
532
533 async->work.flags = 0;
534 async->bio_flags = bio_flags;
535
536 atomic_inc(&fs_info->nr_async_submits);
537 btrfs_queue_worker(&fs_info->workers, &async->work);
538#if 0
539 int limit = btrfs_async_submit_limit(fs_info);
540 if (atomic_read(&fs_info->nr_async_submits) > limit) {
541 wait_event_timeout(fs_info->async_submit_wait,
542 (atomic_read(&fs_info->nr_async_submits) < limit),
543 HZ/10);
544
545 wait_event_timeout(fs_info->async_submit_wait,
546 (atomic_read(&fs_info->nr_async_bios) < limit),
547 HZ/10);
548 }
549#endif
550 while (atomic_read(&fs_info->async_submit_draining) &&
551 atomic_read(&fs_info->nr_async_submits)) {
552 wait_event(fs_info->async_submit_wait,
553 (atomic_read(&fs_info->nr_async_submits) == 0));
554 }
555
556 return 0;
557}
558
559static int btree_csum_one_bio(struct bio *bio)
560{
561 struct bio_vec *bvec = bio->bi_io_vec;
562 int bio_index = 0;
563 struct btrfs_root *root;
564
565 WARN_ON(bio->bi_vcnt <= 0);
566 while (bio_index < bio->bi_vcnt) {
567 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
568 csum_dirty_buffer(root, bvec->bv_page);
569 bio_index++;
570 bvec++;
571 }
572 return 0;
573}
574
575static int __btree_submit_bio_start(struct inode *inode, int rw,
576 struct bio *bio, int mirror_num,
577 unsigned long bio_flags)
578{
579 /*
580 * when we're called for a write, we're already in the async
581 * submission context. Just jump into btrfs_map_bio
582 */
583 btree_csum_one_bio(bio);
584 return 0;
585}
586
587static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
588 int mirror_num, unsigned long bio_flags)
589{
590 /*
591 * when we're called for a write, we're already in the async
592 * submission context. Just jump into btrfs_map_bio
593 */
594 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
595}
596
597static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
598 int mirror_num, unsigned long bio_flags)
599{
600 int ret;
601
602 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
603 bio, 1);
604 BUG_ON(ret);
605
606 if (!(rw & (1 << BIO_RW))) {
607 /*
608 * called for a read, do the setup so that checksum validation
609 * can happen in the async kernel threads
610 */
611 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
612 mirror_num, 0);
613 }
614 /*
615 * kthread helpers are used to submit writes so that checksumming
616 * can happen in parallel across all CPUs
617 */
618 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
619 inode, rw, bio, mirror_num, 0,
620 __btree_submit_bio_start,
621 __btree_submit_bio_done);
622}
623
624static int btree_writepage(struct page *page, struct writeback_control *wbc)
625{
626 struct extent_io_tree *tree;
627 tree = &BTRFS_I(page->mapping->host)->io_tree;
628
629 if (current->flags & PF_MEMALLOC) {
630 redirty_page_for_writepage(wbc, page);
631 unlock_page(page);
632 return 0;
633 }
634 return extent_write_full_page(tree, page, btree_get_extent, wbc);
635}
636
637static int btree_writepages(struct address_space *mapping,
638 struct writeback_control *wbc)
639{
640 struct extent_io_tree *tree;
641 tree = &BTRFS_I(mapping->host)->io_tree;
642 if (wbc->sync_mode == WB_SYNC_NONE) {
643 u64 num_dirty;
644 u64 start = 0;
645 unsigned long thresh = 32 * 1024 * 1024;
646
647 if (wbc->for_kupdate)
648 return 0;
649
650 num_dirty = count_range_bits(tree, &start, (u64)-1,
651 thresh, EXTENT_DIRTY);
652 if (num_dirty < thresh)
653 return 0;
654 }
655 return extent_writepages(tree, mapping, btree_get_extent, wbc);
656}
657
658static int btree_readpage(struct file *file, struct page *page)
659{
660 struct extent_io_tree *tree;
661 tree = &BTRFS_I(page->mapping->host)->io_tree;
662 return extent_read_full_page(tree, page, btree_get_extent);
663}
664
665static int btree_releasepage(struct page *page, gfp_t gfp_flags)
666{
667 struct extent_io_tree *tree;
668 struct extent_map_tree *map;
669 int ret;
670
671 if (PageWriteback(page) || PageDirty(page))
672 return 0;
673
674 tree = &BTRFS_I(page->mapping->host)->io_tree;
675 map = &BTRFS_I(page->mapping->host)->extent_tree;
676
677 ret = try_release_extent_state(map, tree, page, gfp_flags);
678 if (!ret)
679 return 0;
680
681 ret = try_release_extent_buffer(tree, page);
682 if (ret == 1) {
683 ClearPagePrivate(page);
684 set_page_private(page, 0);
685 page_cache_release(page);
686 }
687
688 return ret;
689}
690
691static void btree_invalidatepage(struct page *page, unsigned long offset)
692{
693 struct extent_io_tree *tree;
694 tree = &BTRFS_I(page->mapping->host)->io_tree;
695 extent_invalidatepage(tree, page, offset);
696 btree_releasepage(page, GFP_NOFS);
697 if (PagePrivate(page)) {
698 printk(KERN_WARNING "btrfs warning page private not zero "
699 "on page %llu\n", (unsigned long long)page_offset(page));
700 ClearPagePrivate(page);
701 set_page_private(page, 0);
702 page_cache_release(page);
703 }
704}
705
706#if 0
707static int btree_writepage(struct page *page, struct writeback_control *wbc)
708{
709 struct buffer_head *bh;
710 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
711 struct buffer_head *head;
712 if (!page_has_buffers(page)) {
713 create_empty_buffers(page, root->fs_info->sb->s_blocksize,
714 (1 << BH_Dirty)|(1 << BH_Uptodate));
715 }
716 head = page_buffers(page);
717 bh = head;
718 do {
719 if (buffer_dirty(bh))
720 csum_tree_block(root, bh, 0);
721 bh = bh->b_this_page;
722 } while (bh != head);
723 return block_write_full_page(page, btree_get_block, wbc);
724}
725#endif
726
727static struct address_space_operations btree_aops = {
728 .readpage = btree_readpage,
729 .writepage = btree_writepage,
730 .writepages = btree_writepages,
731 .releasepage = btree_releasepage,
732 .invalidatepage = btree_invalidatepage,
733 .sync_page = block_sync_page,
734};
735
736int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
737 u64 parent_transid)
738{
739 struct extent_buffer *buf = NULL;
740 struct inode *btree_inode = root->fs_info->btree_inode;
741 int ret = 0;
742
743 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
744 if (!buf)
745 return 0;
746 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
747 buf, 0, 0, btree_get_extent, 0);
748 free_extent_buffer(buf);
749 return ret;
750}
751
752struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
753 u64 bytenr, u32 blocksize)
754{
755 struct inode *btree_inode = root->fs_info->btree_inode;
756 struct extent_buffer *eb;
757 eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
758 bytenr, blocksize, GFP_NOFS);
759 return eb;
760}
761
762struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
763 u64 bytenr, u32 blocksize)
764{
765 struct inode *btree_inode = root->fs_info->btree_inode;
766 struct extent_buffer *eb;
767
768 eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
769 bytenr, blocksize, NULL, GFP_NOFS);
770 return eb;
771}
772
773
774int btrfs_write_tree_block(struct extent_buffer *buf)
775{
776 return btrfs_fdatawrite_range(buf->first_page->mapping, buf->start,
777 buf->start + buf->len - 1, WB_SYNC_ALL);
778}
779
780int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
781{
782 return btrfs_wait_on_page_writeback_range(buf->first_page->mapping,
783 buf->start, buf->start + buf->len - 1);
784}
785
786struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
787 u32 blocksize, u64 parent_transid)
788{
789 struct extent_buffer *buf = NULL;
790 struct inode *btree_inode = root->fs_info->btree_inode;
791 struct extent_io_tree *io_tree;
792 int ret;
793
794 io_tree = &BTRFS_I(btree_inode)->io_tree;
795
796 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
797 if (!buf)
798 return NULL;
799
800 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
801
802 if (ret == 0)
803 buf->flags |= EXTENT_UPTODATE;
804 else
805 WARN_ON(1);
806 return buf;
807
808}
809
810int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
811 struct extent_buffer *buf)
812{
813 struct inode *btree_inode = root->fs_info->btree_inode;
814 if (btrfs_header_generation(buf) ==
815 root->fs_info->running_transaction->transid) {
816 WARN_ON(!btrfs_tree_locked(buf));
817 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
818 buf);
819 }
820 return 0;
821}
822
823static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
824 u32 stripesize, struct btrfs_root *root,
825 struct btrfs_fs_info *fs_info,
826 u64 objectid)
827{
828 root->node = NULL;
829 root->commit_root = NULL;
830 root->ref_tree = NULL;
831 root->sectorsize = sectorsize;
832 root->nodesize = nodesize;
833 root->leafsize = leafsize;
834 root->stripesize = stripesize;
835 root->ref_cows = 0;
836 root->track_dirty = 0;
837
838 root->fs_info = fs_info;
839 root->objectid = objectid;
840 root->last_trans = 0;
841 root->highest_inode = 0;
842 root->last_inode_alloc = 0;
843 root->name = NULL;
844 root->in_sysfs = 0;
845
846 INIT_LIST_HEAD(&root->dirty_list);
847 INIT_LIST_HEAD(&root->orphan_list);
848 INIT_LIST_HEAD(&root->dead_list);
849 spin_lock_init(&root->node_lock);
850 spin_lock_init(&root->list_lock);
851 mutex_init(&root->objectid_mutex);
852 mutex_init(&root->log_mutex);
853 extent_io_tree_init(&root->dirty_log_pages,
854 fs_info->btree_inode->i_mapping, GFP_NOFS);
855
856 btrfs_leaf_ref_tree_init(&root->ref_tree_struct);
857 root->ref_tree = &root->ref_tree_struct;
858
859 memset(&root->root_key, 0, sizeof(root->root_key));
860 memset(&root->root_item, 0, sizeof(root->root_item));
861 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
862 memset(&root->root_kobj, 0, sizeof(root->root_kobj));
863 root->defrag_trans_start = fs_info->generation;
864 init_completion(&root->kobj_unregister);
865 root->defrag_running = 0;
866 root->defrag_level = 0;
867 root->root_key.objectid = objectid;
868 root->anon_super.s_root = NULL;
869 root->anon_super.s_dev = 0;
870 INIT_LIST_HEAD(&root->anon_super.s_list);
871 INIT_LIST_HEAD(&root->anon_super.s_instances);
872 init_rwsem(&root->anon_super.s_umount);
873
874 return 0;
875}
876
877static int find_and_setup_root(struct btrfs_root *tree_root,
878 struct btrfs_fs_info *fs_info,
879 u64 objectid,
880 struct btrfs_root *root)
881{
882 int ret;
883 u32 blocksize;
884 u64 generation;
885
886 __setup_root(tree_root->nodesize, tree_root->leafsize,
887 tree_root->sectorsize, tree_root->stripesize,
888 root, fs_info, objectid);
889 ret = btrfs_find_last_root(tree_root, objectid,
890 &root->root_item, &root->root_key);
891 BUG_ON(ret);
892
893 generation = btrfs_root_generation(&root->root_item);
894 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
895 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
896 blocksize, generation);
897 BUG_ON(!root->node);
898 return 0;
899}
900
901int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
902 struct btrfs_fs_info *fs_info)
903{
904 struct extent_buffer *eb;
905 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
906 u64 start = 0;
907 u64 end = 0;
908 int ret;
909
910 if (!log_root_tree)
911 return 0;
912
913 while (1) {
914 ret = find_first_extent_bit(&log_root_tree->dirty_log_pages,
915 0, &start, &end, EXTENT_DIRTY);
916 if (ret)
917 break;
918
919 clear_extent_dirty(&log_root_tree->dirty_log_pages,
920 start, end, GFP_NOFS);
921 }
922 eb = fs_info->log_root_tree->node;
923
924 WARN_ON(btrfs_header_level(eb) != 0);
925 WARN_ON(btrfs_header_nritems(eb) != 0);
926
927 ret = btrfs_free_reserved_extent(fs_info->tree_root,
928 eb->start, eb->len);
929 BUG_ON(ret);
930
931 free_extent_buffer(eb);
932 kfree(fs_info->log_root_tree);
933 fs_info->log_root_tree = NULL;
934 return 0;
935}
936
937int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
938 struct btrfs_fs_info *fs_info)
939{
940 struct btrfs_root *root;
941 struct btrfs_root *tree_root = fs_info->tree_root;
942
943 root = kzalloc(sizeof(*root), GFP_NOFS);
944 if (!root)
945 return -ENOMEM;
946
947 __setup_root(tree_root->nodesize, tree_root->leafsize,
948 tree_root->sectorsize, tree_root->stripesize,
949 root, fs_info, BTRFS_TREE_LOG_OBJECTID);
950
951 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
952 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
953 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
954 root->ref_cows = 0;
955
956 root->node = btrfs_alloc_free_block(trans, root, root->leafsize,
957 0, BTRFS_TREE_LOG_OBJECTID,
958 trans->transid, 0, 0, 0);
959
960 btrfs_set_header_nritems(root->node, 0);
961 btrfs_set_header_level(root->node, 0);
962 btrfs_set_header_bytenr(root->node, root->node->start);
963 btrfs_set_header_generation(root->node, trans->transid);
964 btrfs_set_header_owner(root->node, BTRFS_TREE_LOG_OBJECTID);
965
966 write_extent_buffer(root->node, root->fs_info->fsid,
967 (unsigned long)btrfs_header_fsid(root->node),
968 BTRFS_FSID_SIZE);
969 btrfs_mark_buffer_dirty(root->node);
970 btrfs_tree_unlock(root->node);
971 fs_info->log_root_tree = root;
972 return 0;
973}
974
975struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
976 struct btrfs_key *location)
977{
978 struct btrfs_root *root;
979 struct btrfs_fs_info *fs_info = tree_root->fs_info;
980 struct btrfs_path *path;
981 struct extent_buffer *l;
982 u64 highest_inode;
983 u64 generation;
984 u32 blocksize;
985 int ret = 0;
986
987 root = kzalloc(sizeof(*root), GFP_NOFS);
988 if (!root)
989 return ERR_PTR(-ENOMEM);
990 if (location->offset == (u64)-1) {
991 ret = find_and_setup_root(tree_root, fs_info,
992 location->objectid, root);
993 if (ret) {
994 kfree(root);
995 return ERR_PTR(ret);
996 }
997 goto insert;
998 }
999
1000 __setup_root(tree_root->nodesize, tree_root->leafsize,
1001 tree_root->sectorsize, tree_root->stripesize,
1002 root, fs_info, location->objectid);
1003
1004 path = btrfs_alloc_path();
1005 BUG_ON(!path);
1006 ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
1007 if (ret != 0) {
1008 if (ret > 0)
1009 ret = -ENOENT;
1010 goto out;
1011 }
1012 l = path->nodes[0];
1013 read_extent_buffer(l, &root->root_item,
1014 btrfs_item_ptr_offset(l, path->slots[0]),
1015 sizeof(root->root_item));
1016 memcpy(&root->root_key, location, sizeof(*location));
1017 ret = 0;
1018out:
1019 btrfs_release_path(root, path);
1020 btrfs_free_path(path);
1021 if (ret) {
1022 kfree(root);
1023 return ERR_PTR(ret);
1024 }
1025 generation = btrfs_root_generation(&root->root_item);
1026 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1027 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1028 blocksize, generation);
1029 BUG_ON(!root->node);
1030insert:
1031 if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
1032 root->ref_cows = 1;
1033 ret = btrfs_find_highest_inode(root, &highest_inode);
1034 if (ret == 0) {
1035 root->highest_inode = highest_inode;
1036 root->last_inode_alloc = highest_inode;
1037 }
1038 }
1039 return root;
1040}
1041
1042struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1043 u64 root_objectid)
1044{
1045 struct btrfs_root *root;
1046
1047 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
1048 return fs_info->tree_root;
1049 if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
1050 return fs_info->extent_root;
1051
1052 root = radix_tree_lookup(&fs_info->fs_roots_radix,
1053 (unsigned long)root_objectid);
1054 return root;
1055}
1056
1057struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1058 struct btrfs_key *location)
1059{
1060 struct btrfs_root *root;
1061 int ret;
1062
1063 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1064 return fs_info->tree_root;
1065 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1066 return fs_info->extent_root;
1067 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1068 return fs_info->chunk_root;
1069 if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1070 return fs_info->dev_root;
1071 if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1072 return fs_info->csum_root;
1073
1074 root = radix_tree_lookup(&fs_info->fs_roots_radix,
1075 (unsigned long)location->objectid);
1076 if (root)
1077 return root;
1078
1079 root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1080 if (IS_ERR(root))
1081 return root;
1082
1083 set_anon_super(&root->anon_super, NULL);
1084
1085 ret = radix_tree_insert(&fs_info->fs_roots_radix,
1086 (unsigned long)root->root_key.objectid,
1087 root);
1088 if (ret) {
1089 free_extent_buffer(root->node);
1090 kfree(root);
1091 return ERR_PTR(ret);
1092 }
1093 if (!(fs_info->sb->s_flags & MS_RDONLY)) {
1094 ret = btrfs_find_dead_roots(fs_info->tree_root,
1095 root->root_key.objectid, root);
1096 BUG_ON(ret);
1097 btrfs_orphan_cleanup(root);
1098 }
1099 return root;
1100}
1101
1102struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
1103 struct btrfs_key *location,
1104 const char *name, int namelen)
1105{
1106 struct btrfs_root *root;
1107 int ret;
1108
1109 root = btrfs_read_fs_root_no_name(fs_info, location);
1110 if (!root)
1111 return NULL;
1112
1113 if (root->in_sysfs)
1114 return root;
1115
1116 ret = btrfs_set_root_name(root, name, namelen);
1117 if (ret) {
1118 free_extent_buffer(root->node);
1119 kfree(root);
1120 return ERR_PTR(ret);
1121 }
1122#if 0
1123 ret = btrfs_sysfs_add_root(root);
1124 if (ret) {
1125 free_extent_buffer(root->node);
1126 kfree(root->name);
1127 kfree(root);
1128 return ERR_PTR(ret);
1129 }
1130#endif
1131 root->in_sysfs = 1;
1132 return root;
1133}
1134
1135static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1136{
1137 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1138 int ret = 0;
1139 struct list_head *cur;
1140 struct btrfs_device *device;
1141 struct backing_dev_info *bdi;
1142#if 0
1143 if ((bdi_bits & (1 << BDI_write_congested)) &&
1144 btrfs_congested_async(info, 0))
1145 return 1;
1146#endif
1147 list_for_each(cur, &info->fs_devices->devices) {
1148 device = list_entry(cur, struct btrfs_device, dev_list);
1149 if (!device->bdev)
1150 continue;
1151 bdi = blk_get_backing_dev_info(device->bdev);
1152 if (bdi && bdi_congested(bdi, bdi_bits)) {
1153 ret = 1;
1154 break;
1155 }
1156 }
1157 return ret;
1158}
1159
1160/*
1161 * this unplugs every device on the box, and it is only used when page
1162 * is null
1163 */
1164static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1165{
1166 struct list_head *cur;
1167 struct btrfs_device *device;
1168 struct btrfs_fs_info *info;
1169
1170 info = (struct btrfs_fs_info *)bdi->unplug_io_data;
1171 list_for_each(cur, &info->fs_devices->devices) {
1172 device = list_entry(cur, struct btrfs_device, dev_list);
1173 if (!device->bdev)
1174 continue;
1175
1176 bdi = blk_get_backing_dev_info(device->bdev);
1177 if (bdi->unplug_io_fn)
1178 bdi->unplug_io_fn(bdi, page);
1179 }
1180}
1181
1182static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1183{
1184 struct inode *inode;
1185 struct extent_map_tree *em_tree;
1186 struct extent_map *em;
1187 struct address_space *mapping;
1188 u64 offset;
1189
1190 /* the generic O_DIRECT read code does this */
1191 if (1 || !page) {
1192 __unplug_io_fn(bdi, page);
1193 return;
1194 }
1195
1196 /*
1197 * page->mapping may change at any time. Get a consistent copy
1198 * and use that for everything below
1199 */
1200 smp_mb();
1201 mapping = page->mapping;
1202 if (!mapping)
1203 return;
1204
1205 inode = mapping->host;
1206
1207 /*
1208 * don't do the expensive searching for a small number of
1209 * devices
1210 */
1211 if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
1212 __unplug_io_fn(bdi, page);
1213 return;
1214 }
1215
1216 offset = page_offset(page);
1217
1218 em_tree = &BTRFS_I(inode)->extent_tree;
1219 spin_lock(&em_tree->lock);
1220 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
1221 spin_unlock(&em_tree->lock);
1222 if (!em) {
1223 __unplug_io_fn(bdi, page);
1224 return;
1225 }
1226
1227 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1228 free_extent_map(em);
1229 __unplug_io_fn(bdi, page);
1230 return;
1231 }
1232 offset = offset - em->start;
1233 btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
1234 em->block_start + offset, page);
1235 free_extent_map(em);
1236}
1237
1238static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1239{
1240 bdi_init(bdi);
1241 bdi->ra_pages = default_backing_dev_info.ra_pages;
1242 bdi->state = 0;
1243 bdi->capabilities = default_backing_dev_info.capabilities;
1244 bdi->unplug_io_fn = btrfs_unplug_io_fn;
1245 bdi->unplug_io_data = info;
1246 bdi->congested_fn = btrfs_congested_fn;
1247 bdi->congested_data = info;
1248 return 0;
1249}
1250
1251static int bio_ready_for_csum(struct bio *bio)
1252{
1253 u64 length = 0;
1254 u64 buf_len = 0;
1255 u64 start = 0;
1256 struct page *page;
1257 struct extent_io_tree *io_tree = NULL;
1258 struct btrfs_fs_info *info = NULL;
1259 struct bio_vec *bvec;
1260 int i;
1261 int ret;
1262
1263 bio_for_each_segment(bvec, bio, i) {
1264 page = bvec->bv_page;
1265 if (page->private == EXTENT_PAGE_PRIVATE) {
1266 length += bvec->bv_len;
1267 continue;
1268 }
1269 if (!page->private) {
1270 length += bvec->bv_len;
1271 continue;
1272 }
1273 length = bvec->bv_len;
1274 buf_len = page->private >> 2;
1275 start = page_offset(page) + bvec->bv_offset;
1276 io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1277 info = BTRFS_I(page->mapping->host)->root->fs_info;
1278 }
1279 /* are we fully contained in this bio? */
1280 if (buf_len <= length)
1281 return 1;
1282
1283 ret = extent_range_uptodate(io_tree, start + length,
1284 start + buf_len - 1);
1285 if (ret == 1)
1286 return ret;
1287 return ret;
1288}
1289
1290/*
1291 * called by the kthread helper functions to finally call the bio end_io
1292 * functions. This is where read checksum verification actually happens
1293 */
1294static void end_workqueue_fn(struct btrfs_work *work)
1295{
1296 struct bio *bio;
1297 struct end_io_wq *end_io_wq;
1298 struct btrfs_fs_info *fs_info;
1299 int error;
1300
1301 end_io_wq = container_of(work, struct end_io_wq, work);
1302 bio = end_io_wq->bio;
1303 fs_info = end_io_wq->info;
1304
1305 /* metadata bio reads are special because the whole tree block must
1306 * be checksummed at once. This makes sure the entire block is in
1307 * ram and up to date before trying to verify things. For
1308 * blocksize <= pagesize, it is basically a noop
1309 */
1310 if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata &&
1311 !bio_ready_for_csum(bio)) {
1312 btrfs_queue_worker(&fs_info->endio_meta_workers,
1313 &end_io_wq->work);
1314 return;
1315 }
1316 error = end_io_wq->error;
1317 bio->bi_private = end_io_wq->private;
1318 bio->bi_end_io = end_io_wq->end_io;
1319 kfree(end_io_wq);
1320 bio_endio(bio, error);
1321}
1322
1323static int cleaner_kthread(void *arg)
1324{
1325 struct btrfs_root *root = arg;
1326
1327 do {
1328 smp_mb();
1329 if (root->fs_info->closing)
1330 break;
1331
1332 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1333 mutex_lock(&root->fs_info->cleaner_mutex);
1334 btrfs_clean_old_snapshots(root);
1335 mutex_unlock(&root->fs_info->cleaner_mutex);
1336
1337 if (freezing(current)) {
1338 refrigerator();
1339 } else {
1340 smp_mb();
1341 if (root->fs_info->closing)
1342 break;
1343 set_current_state(TASK_INTERRUPTIBLE);
1344 schedule();
1345 __set_current_state(TASK_RUNNING);
1346 }
1347 } while (!kthread_should_stop());
1348 return 0;
1349}
1350
1351static int transaction_kthread(void *arg)
1352{
1353 struct btrfs_root *root = arg;
1354 struct btrfs_trans_handle *trans;
1355 struct btrfs_transaction *cur;
1356 unsigned long now;
1357 unsigned long delay;
1358 int ret;
1359
1360 do {
1361 smp_mb();
1362 if (root->fs_info->closing)
1363 break;
1364
1365 delay = HZ * 30;
1366 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1367 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1368
1369 if (root->fs_info->total_ref_cache_size > 20 * 1024 * 1024) {
1370 printk(KERN_INFO "btrfs: total reference cache "
1371 "size %llu\n",
1372 root->fs_info->total_ref_cache_size);
1373 }
1374
1375 mutex_lock(&root->fs_info->trans_mutex);
1376 cur = root->fs_info->running_transaction;
1377 if (!cur) {
1378 mutex_unlock(&root->fs_info->trans_mutex);
1379 goto sleep;
1380 }
1381
1382 now = get_seconds();
1383 if (now < cur->start_time || now - cur->start_time < 30) {
1384 mutex_unlock(&root->fs_info->trans_mutex);
1385 delay = HZ * 5;
1386 goto sleep;
1387 }
1388 mutex_unlock(&root->fs_info->trans_mutex);
1389 trans = btrfs_start_transaction(root, 1);
1390 ret = btrfs_commit_transaction(trans, root);
1391sleep:
1392 wake_up_process(root->fs_info->cleaner_kthread);
1393 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1394
1395 if (freezing(current)) {
1396 refrigerator();
1397 } else {
1398 if (root->fs_info->closing)
1399 break;
1400 set_current_state(TASK_INTERRUPTIBLE);
1401 schedule_timeout(delay);
1402 __set_current_state(TASK_RUNNING);
1403 }
1404 } while (!kthread_should_stop());
1405 return 0;
1406}
1407
1408struct btrfs_root *open_ctree(struct super_block *sb,
1409 struct btrfs_fs_devices *fs_devices,
1410 char *options)
1411{
1412 u32 sectorsize;
1413 u32 nodesize;
1414 u32 leafsize;
1415 u32 blocksize;
1416 u32 stripesize;
1417 u64 generation;
1418 u64 features;
1419 struct btrfs_key location;
1420 struct buffer_head *bh;
1421 struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
1422 GFP_NOFS);
1423 struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
1424 GFP_NOFS);
1425 struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root),
1426 GFP_NOFS);
1427 struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
1428 GFP_NOFS);
1429 struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
1430 GFP_NOFS);
1431 struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
1432 GFP_NOFS);
1433 struct btrfs_root *log_tree_root;
1434
1435 int ret;
1436 int err = -EINVAL;
1437
1438 struct btrfs_super_block *disk_super;
1439
1440 if (!extent_root || !tree_root || !fs_info ||
1441 !chunk_root || !dev_root || !csum_root) {
1442 err = -ENOMEM;
1443 goto fail;
1444 }
1445 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
1446 INIT_LIST_HEAD(&fs_info->trans_list);
1447 INIT_LIST_HEAD(&fs_info->dead_roots);
1448 INIT_LIST_HEAD(&fs_info->hashers);
1449 INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1450 spin_lock_init(&fs_info->hash_lock);
1451 spin_lock_init(&fs_info->delalloc_lock);
1452 spin_lock_init(&fs_info->new_trans_lock);
1453 spin_lock_init(&fs_info->ref_cache_lock);
1454
1455 init_completion(&fs_info->kobj_unregister);
1456 fs_info->tree_root = tree_root;
1457 fs_info->extent_root = extent_root;
1458 fs_info->csum_root = csum_root;
1459 fs_info->chunk_root = chunk_root;
1460 fs_info->dev_root = dev_root;
1461 fs_info->fs_devices = fs_devices;
1462 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1463 INIT_LIST_HEAD(&fs_info->space_info);
1464 btrfs_mapping_init(&fs_info->mapping_tree);
1465 atomic_set(&fs_info->nr_async_submits, 0);
1466 atomic_set(&fs_info->async_delalloc_pages, 0);
1467 atomic_set(&fs_info->async_submit_draining, 0);
1468 atomic_set(&fs_info->nr_async_bios, 0);
1469 atomic_set(&fs_info->throttles, 0);
1470 atomic_set(&fs_info->throttle_gen, 0);
1471 fs_info->sb = sb;
1472 fs_info->max_extent = (u64)-1;
1473 fs_info->max_inline = 8192 * 1024;
1474 setup_bdi(fs_info, &fs_info->bdi);
1475 fs_info->btree_inode = new_inode(sb);
1476 fs_info->btree_inode->i_ino = 1;
1477 fs_info->btree_inode->i_nlink = 1;
1478
1479 fs_info->thread_pool_size = min_t(unsigned long,
1480 num_online_cpus() + 2, 8);
1481
1482 INIT_LIST_HEAD(&fs_info->ordered_extents);
1483 spin_lock_init(&fs_info->ordered_extent_lock);
1484
1485 sb->s_blocksize = 4096;
1486 sb->s_blocksize_bits = blksize_bits(4096);
1487
1488 /*
1489 * we set the i_size on the btree inode to the max possible int.
1490 * the real end of the address space is determined by all of
1491 * the devices in the system
1492 */
1493 fs_info->btree_inode->i_size = OFFSET_MAX;
1494 fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1495 fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1496
1497 extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1498 fs_info->btree_inode->i_mapping,
1499 GFP_NOFS);
1500 extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
1501 GFP_NOFS);
1502
1503 BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1504
1505 spin_lock_init(&fs_info->block_group_cache_lock);
1506 fs_info->block_group_cache_tree.rb_node = NULL;
1507
1508 extent_io_tree_init(&fs_info->pinned_extents,
1509 fs_info->btree_inode->i_mapping, GFP_NOFS);
1510 extent_io_tree_init(&fs_info->pending_del,
1511 fs_info->btree_inode->i_mapping, GFP_NOFS);
1512 extent_io_tree_init(&fs_info->extent_ins,
1513 fs_info->btree_inode->i_mapping, GFP_NOFS);
1514 fs_info->do_barriers = 1;
1515
1516 INIT_LIST_HEAD(&fs_info->dead_reloc_roots);
1517 btrfs_leaf_ref_tree_init(&fs_info->reloc_ref_tree);
1518 btrfs_leaf_ref_tree_init(&fs_info->shared_ref_tree);
1519
1520 BTRFS_I(fs_info->btree_inode)->root = tree_root;
1521 memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
1522 sizeof(struct btrfs_key));
1523 insert_inode_hash(fs_info->btree_inode);
1524
1525 mutex_init(&fs_info->trans_mutex);
1526 mutex_init(&fs_info->tree_log_mutex);
1527 mutex_init(&fs_info->drop_mutex);
1528 mutex_init(&fs_info->extent_ins_mutex);
1529 mutex_init(&fs_info->pinned_mutex);
1530 mutex_init(&fs_info->chunk_mutex);
1531 mutex_init(&fs_info->transaction_kthread_mutex);
1532 mutex_init(&fs_info->cleaner_mutex);
1533 mutex_init(&fs_info->volume_mutex);
1534 mutex_init(&fs_info->tree_reloc_mutex);
1535 init_waitqueue_head(&fs_info->transaction_throttle);
1536 init_waitqueue_head(&fs_info->transaction_wait);
1537 init_waitqueue_head(&fs_info->async_submit_wait);
1538 init_waitqueue_head(&fs_info->tree_log_wait);
1539 atomic_set(&fs_info->tree_log_commit, 0);
1540 atomic_set(&fs_info->tree_log_writers, 0);
1541 fs_info->tree_log_transid = 0;
1542
1543 __setup_root(4096, 4096, 4096, 4096, tree_root,
1544 fs_info, BTRFS_ROOT_TREE_OBJECTID);
1545
1546
1547 bh = btrfs_read_dev_super(fs_devices->latest_bdev);
1548 if (!bh)
1549 goto fail_iput;
1550
1551 memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
1552 memcpy(&fs_info->super_for_commit, &fs_info->super_copy,
1553 sizeof(fs_info->super_for_commit));
1554 brelse(bh);
1555
1556 memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
1557
1558 disk_super = &fs_info->super_copy;
1559 if (!btrfs_super_root(disk_super))
1560 goto fail_iput;
1561
1562 ret = btrfs_parse_options(tree_root, options);
1563 if (ret) {
1564 err = ret;
1565 goto fail_iput;
1566 }
1567
1568 features = btrfs_super_incompat_flags(disk_super) &
1569 ~BTRFS_FEATURE_INCOMPAT_SUPP;
1570 if (features) {
1571 printk(KERN_ERR "BTRFS: couldn't mount because of "
1572 "unsupported optional features (%Lx).\n",
1573 features);
1574 err = -EINVAL;
1575 goto fail_iput;
1576 }
1577
1578 features = btrfs_super_compat_ro_flags(disk_super) &
1579 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
1580 if (!(sb->s_flags & MS_RDONLY) && features) {
1581 printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
1582 "unsupported option features (%Lx).\n",
1583 features);
1584 err = -EINVAL;
1585 goto fail_iput;
1586 }
1587
1588 /*
1589 * we need to start all the end_io workers up front because the
1590 * queue work function gets called at interrupt time, and so it
1591 * cannot dynamically grow.
1592 */
1593 btrfs_init_workers(&fs_info->workers, "worker",
1594 fs_info->thread_pool_size);
1595
1596 btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
1597 fs_info->thread_pool_size);
1598
1599 btrfs_init_workers(&fs_info->submit_workers, "submit",
1600 min_t(u64, fs_devices->num_devices,
1601 fs_info->thread_pool_size));
1602
1603 /* a higher idle thresh on the submit workers makes it much more
1604 * likely that bios will be send down in a sane order to the
1605 * devices
1606 */
1607 fs_info->submit_workers.idle_thresh = 64;
1608
1609 fs_info->workers.idle_thresh = 16;
1610 fs_info->workers.ordered = 1;
1611
1612 fs_info->delalloc_workers.idle_thresh = 2;
1613 fs_info->delalloc_workers.ordered = 1;
1614
1615 btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1);
1616 btrfs_init_workers(&fs_info->endio_workers, "endio",
1617 fs_info->thread_pool_size);
1618 btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
1619 fs_info->thread_pool_size);
1620 btrfs_init_workers(&fs_info->endio_meta_write_workers,
1621 "endio-meta-write", fs_info->thread_pool_size);
1622 btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1623 fs_info->thread_pool_size);
1624
1625 /*
1626 * endios are largely parallel and should have a very
1627 * low idle thresh
1628 */
1629 fs_info->endio_workers.idle_thresh = 4;
1630 fs_info->endio_write_workers.idle_thresh = 64;
1631 fs_info->endio_meta_write_workers.idle_thresh = 64;
1632
1633 btrfs_start_workers(&fs_info->workers, 1);
1634 btrfs_start_workers(&fs_info->submit_workers, 1);
1635 btrfs_start_workers(&fs_info->delalloc_workers, 1);
1636 btrfs_start_workers(&fs_info->fixup_workers, 1);
1637 btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
1638 btrfs_start_workers(&fs_info->endio_meta_workers,
1639 fs_info->thread_pool_size);
1640 btrfs_start_workers(&fs_info->endio_meta_write_workers,
1641 fs_info->thread_pool_size);
1642 btrfs_start_workers(&fs_info->endio_write_workers,
1643 fs_info->thread_pool_size);
1644
1645 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1646 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
1647 4 * 1024 * 1024 / PAGE_CACHE_SIZE);
1648
1649 nodesize = btrfs_super_nodesize(disk_super);
1650 leafsize = btrfs_super_leafsize(disk_super);
1651 sectorsize = btrfs_super_sectorsize(disk_super);
1652 stripesize = btrfs_super_stripesize(disk_super);
1653 tree_root->nodesize = nodesize;
1654 tree_root->leafsize = leafsize;
1655 tree_root->sectorsize = sectorsize;
1656 tree_root->stripesize = stripesize;
1657
1658 sb->s_blocksize = sectorsize;
1659 sb->s_blocksize_bits = blksize_bits(sectorsize);
1660
1661 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
1662 sizeof(disk_super->magic))) {
1663 printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
1664 goto fail_sb_buffer;
1665 }
1666
1667 mutex_lock(&fs_info->chunk_mutex);
1668 ret = btrfs_read_sys_array(tree_root);
1669 mutex_unlock(&fs_info->chunk_mutex);
1670 if (ret) {
1671 printk(KERN_WARNING "btrfs: failed to read the system "
1672 "array on %s\n", sb->s_id);
1673 goto fail_sys_array;
1674 }
1675
1676 blocksize = btrfs_level_size(tree_root,
1677 btrfs_super_chunk_root_level(disk_super));
1678 generation = btrfs_super_chunk_root_generation(disk_super);
1679
1680 __setup_root(nodesize, leafsize, sectorsize, stripesize,
1681 chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1682
1683 chunk_root->node = read_tree_block(chunk_root,
1684 btrfs_super_chunk_root(disk_super),
1685 blocksize, generation);
1686 BUG_ON(!chunk_root->node);
1687
1688 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
1689 (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
1690 BTRFS_UUID_SIZE);
1691
1692 mutex_lock(&fs_info->chunk_mutex);
1693 ret = btrfs_read_chunk_tree(chunk_root);
1694 mutex_unlock(&fs_info->chunk_mutex);
1695 if (ret) {
1696 printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
1697 sb->s_id);
1698 goto fail_chunk_root;
1699 }
1700
1701 btrfs_close_extra_devices(fs_devices);
1702
1703 blocksize = btrfs_level_size(tree_root,
1704 btrfs_super_root_level(disk_super));
1705 generation = btrfs_super_generation(disk_super);
1706
1707 tree_root->node = read_tree_block(tree_root,
1708 btrfs_super_root(disk_super),
1709 blocksize, generation);
1710 if (!tree_root->node)
1711 goto fail_chunk_root;
1712
1713
1714 ret = find_and_setup_root(tree_root, fs_info,
1715 BTRFS_EXTENT_TREE_OBJECTID, extent_root);
1716 if (ret)
1717 goto fail_tree_root;
1718 extent_root->track_dirty = 1;
1719
1720 ret = find_and_setup_root(tree_root, fs_info,
1721 BTRFS_DEV_TREE_OBJECTID, dev_root);
1722 dev_root->track_dirty = 1;
1723
1724 if (ret)
1725 goto fail_extent_root;
1726
1727 ret = find_and_setup_root(tree_root, fs_info,
1728 BTRFS_CSUM_TREE_OBJECTID, csum_root);
1729 if (ret)
1730 goto fail_extent_root;
1731
1732 csum_root->track_dirty = 1;
1733
1734 btrfs_read_block_groups(extent_root);
1735
1736 fs_info->generation = generation;
1737 fs_info->last_trans_committed = generation;
1738 fs_info->data_alloc_profile = (u64)-1;
1739 fs_info->metadata_alloc_profile = (u64)-1;
1740 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1741 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
1742 "btrfs-cleaner");
1743 if (!fs_info->cleaner_kthread)
1744 goto fail_csum_root;
1745
1746 fs_info->transaction_kthread = kthread_run(transaction_kthread,
1747 tree_root,
1748 "btrfs-transaction");
1749 if (!fs_info->transaction_kthread)
1750 goto fail_cleaner;
1751
1752 if (btrfs_super_log_root(disk_super) != 0) {
1753 u64 bytenr = btrfs_super_log_root(disk_super);
1754
1755 if (fs_devices->rw_devices == 0) {
1756 printk(KERN_WARNING "Btrfs log replay required "
1757 "on RO media\n");
1758 err = -EIO;
1759 goto fail_trans_kthread;
1760 }
1761 blocksize =
1762 btrfs_level_size(tree_root,
1763 btrfs_super_log_root_level(disk_super));
1764
1765 log_tree_root = kzalloc(sizeof(struct btrfs_root),
1766 GFP_NOFS);
1767
1768 __setup_root(nodesize, leafsize, sectorsize, stripesize,
1769 log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1770
1771 log_tree_root->node = read_tree_block(tree_root, bytenr,
1772 blocksize,
1773 generation + 1);
1774 ret = btrfs_recover_log_trees(log_tree_root);
1775 BUG_ON(ret);
1776
1777 if (sb->s_flags & MS_RDONLY) {
1778 ret = btrfs_commit_super(tree_root);
1779 BUG_ON(ret);
1780 }
1781 }
1782
1783 if (!(sb->s_flags & MS_RDONLY)) {
1784 ret = btrfs_cleanup_reloc_trees(tree_root);
1785 BUG_ON(ret);
1786 }
1787
1788 location.objectid = BTRFS_FS_TREE_OBJECTID;
1789 location.type = BTRFS_ROOT_ITEM_KEY;
1790 location.offset = (u64)-1;
1791
1792 fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
1793 if (!fs_info->fs_root)
1794 goto fail_trans_kthread;
1795 return tree_root;
1796
1797fail_trans_kthread:
1798 kthread_stop(fs_info->transaction_kthread);
1799fail_cleaner:
1800 kthread_stop(fs_info->cleaner_kthread);
1801
1802 /*
1803 * make sure we're done with the btree inode before we stop our
1804 * kthreads
1805 */
1806 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
1807 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
1808
1809fail_csum_root:
1810 free_extent_buffer(csum_root->node);
1811fail_extent_root:
1812 free_extent_buffer(extent_root->node);
1813fail_tree_root:
1814 free_extent_buffer(tree_root->node);
1815fail_chunk_root:
1816 free_extent_buffer(chunk_root->node);
1817fail_sys_array:
1818 free_extent_buffer(dev_root->node);
1819fail_sb_buffer:
1820 btrfs_stop_workers(&fs_info->fixup_workers);
1821 btrfs_stop_workers(&fs_info->delalloc_workers);
1822 btrfs_stop_workers(&fs_info->workers);
1823 btrfs_stop_workers(&fs_info->endio_workers);
1824 btrfs_stop_workers(&fs_info->endio_meta_workers);
1825 btrfs_stop_workers(&fs_info->endio_meta_write_workers);
1826 btrfs_stop_workers(&fs_info->endio_write_workers);
1827 btrfs_stop_workers(&fs_info->submit_workers);
1828fail_iput:
1829 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
1830 iput(fs_info->btree_inode);
1831fail:
1832 btrfs_close_devices(fs_info->fs_devices);
1833 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1834
1835 kfree(extent_root);
1836 kfree(tree_root);
1837 bdi_destroy(&fs_info->bdi);
1838 kfree(fs_info);
1839 kfree(chunk_root);
1840 kfree(dev_root);
1841 kfree(csum_root);
1842 return ERR_PTR(err);
1843}
1844
1845static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1846{
1847 char b[BDEVNAME_SIZE];
1848
1849 if (uptodate) {
1850 set_buffer_uptodate(bh);
1851 } else {
1852 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
1853 printk(KERN_WARNING "lost page write due to "
1854 "I/O error on %s\n",
1855 bdevname(bh->b_bdev, b));
1856 }
1857 /* note, we dont' set_buffer_write_io_error because we have
1858 * our own ways of dealing with the IO errors
1859 */
1860 clear_buffer_uptodate(bh);
1861 }
1862 unlock_buffer(bh);
1863 put_bh(bh);
1864}
1865
1866struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
1867{
1868 struct buffer_head *bh;
1869 struct buffer_head *latest = NULL;
1870 struct btrfs_super_block *super;
1871 int i;
1872 u64 transid = 0;
1873 u64 bytenr;
1874
1875 /* we would like to check all the supers, but that would make
1876 * a btrfs mount succeed after a mkfs from a different FS.
1877 * So, we need to add a special mount option to scan for
1878 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1879 */
1880 for (i = 0; i < 1; i++) {
1881 bytenr = btrfs_sb_offset(i);
1882 if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
1883 break;
1884 bh = __bread(bdev, bytenr / 4096, 4096);
1885 if (!bh)
1886 continue;
1887
1888 super = (struct btrfs_super_block *)bh->b_data;
1889 if (btrfs_super_bytenr(super) != bytenr ||
1890 strncmp((char *)(&super->magic), BTRFS_MAGIC,
1891 sizeof(super->magic))) {
1892 brelse(bh);
1893 continue;
1894 }
1895
1896 if (!latest || btrfs_super_generation(super) > transid) {
1897 brelse(latest);
1898 latest = bh;
1899 transid = btrfs_super_generation(super);
1900 } else {
1901 brelse(bh);
1902 }
1903 }
1904 return latest;
1905}
1906
1907static int write_dev_supers(struct btrfs_device *device,
1908 struct btrfs_super_block *sb,
1909 int do_barriers, int wait, int max_mirrors)
1910{
1911 struct buffer_head *bh;
1912 int i;
1913 int ret;
1914 int errors = 0;
1915 u32 crc;
1916 u64 bytenr;
1917 int last_barrier = 0;
1918
1919 if (max_mirrors == 0)
1920 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
1921
1922 /* make sure only the last submit_bh does a barrier */
1923 if (do_barriers) {
1924 for (i = 0; i < max_mirrors; i++) {
1925 bytenr = btrfs_sb_offset(i);
1926 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
1927 device->total_bytes)
1928 break;
1929 last_barrier = i;
1930 }
1931 }
1932
1933 for (i = 0; i < max_mirrors; i++) {
1934 bytenr = btrfs_sb_offset(i);
1935 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
1936 break;
1937
1938 if (wait) {
1939 bh = __find_get_block(device->bdev, bytenr / 4096,
1940 BTRFS_SUPER_INFO_SIZE);
1941 BUG_ON(!bh);
1942 brelse(bh);
1943 wait_on_buffer(bh);
1944 if (buffer_uptodate(bh)) {
1945 brelse(bh);
1946 continue;
1947 }
1948 } else {
1949 btrfs_set_super_bytenr(sb, bytenr);
1950
1951 crc = ~(u32)0;
1952 crc = btrfs_csum_data(NULL, (char *)sb +
1953 BTRFS_CSUM_SIZE, crc,
1954 BTRFS_SUPER_INFO_SIZE -
1955 BTRFS_CSUM_SIZE);
1956 btrfs_csum_final(crc, sb->csum);
1957
1958 bh = __getblk(device->bdev, bytenr / 4096,
1959 BTRFS_SUPER_INFO_SIZE);
1960 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
1961
1962 set_buffer_uptodate(bh);
1963 get_bh(bh);
1964 lock_buffer(bh);
1965 bh->b_end_io = btrfs_end_buffer_write_sync;
1966 }
1967
1968 if (i == last_barrier && do_barriers && device->barriers) {
1969 ret = submit_bh(WRITE_BARRIER, bh);
1970 if (ret == -EOPNOTSUPP) {
1971 printk("btrfs: disabling barriers on dev %s\n",
1972 device->name);
1973 set_buffer_uptodate(bh);
1974 device->barriers = 0;
1975 get_bh(bh);
1976 lock_buffer(bh);
1977 ret = submit_bh(WRITE, bh);
1978 }
1979 } else {
1980 ret = submit_bh(WRITE, bh);
1981 }
1982
1983 if (!ret && wait) {
1984 wait_on_buffer(bh);
1985 if (!buffer_uptodate(bh))
1986 errors++;
1987 } else if (ret) {
1988 errors++;
1989 }
1990 if (wait)
1991 brelse(bh);
1992 }
1993 return errors < i ? 0 : -1;
1994}
1995
1996int write_all_supers(struct btrfs_root *root, int max_mirrors)
1997{
1998 struct list_head *cur;
1999 struct list_head *head = &root->fs_info->fs_devices->devices;
2000 struct btrfs_device *dev;
2001 struct btrfs_super_block *sb;
2002 struct btrfs_dev_item *dev_item;
2003 int ret;
2004 int do_barriers;
2005 int max_errors;
2006 int total_errors = 0;
2007 u64 flags;
2008
2009 max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
2010 do_barriers = !btrfs_test_opt(root, NOBARRIER);
2011
2012 sb = &root->fs_info->super_for_commit;
2013 dev_item = &sb->dev_item;
2014 list_for_each(cur, head) {
2015 dev = list_entry(cur, struct btrfs_device, dev_list);
2016 if (!dev->bdev) {
2017 total_errors++;
2018 continue;
2019 }
2020 if (!dev->in_fs_metadata || !dev->writeable)
2021 continue;
2022
2023 btrfs_set_stack_device_generation(dev_item, 0);
2024 btrfs_set_stack_device_type(dev_item, dev->type);
2025 btrfs_set_stack_device_id(dev_item, dev->devid);
2026 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
2027 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
2028 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
2029 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
2030 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
2031 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
2032 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
2033
2034 flags = btrfs_super_flags(sb);
2035 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
2036
2037 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
2038 if (ret)
2039 total_errors++;
2040 }
2041 if (total_errors > max_errors) {
2042 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2043 total_errors);
2044 BUG();
2045 }
2046
2047 total_errors = 0;
2048 list_for_each(cur, head) {
2049 dev = list_entry(cur, struct btrfs_device, dev_list);
2050 if (!dev->bdev)
2051 continue;
2052 if (!dev->in_fs_metadata || !dev->writeable)
2053 continue;
2054
2055 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
2056 if (ret)
2057 total_errors++;
2058 }
2059 if (total_errors > max_errors) {
2060 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2061 total_errors);
2062 BUG();
2063 }
2064 return 0;
2065}
2066
2067int write_ctree_super(struct btrfs_trans_handle *trans,
2068 struct btrfs_root *root, int max_mirrors)
2069{
2070 int ret;
2071
2072 ret = write_all_supers(root, max_mirrors);
2073 return ret;
2074}
2075
2076int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2077{
2078 radix_tree_delete(&fs_info->fs_roots_radix,
2079 (unsigned long)root->root_key.objectid);
2080 if (root->anon_super.s_dev) {
2081 down_write(&root->anon_super.s_umount);
2082 kill_anon_super(&root->anon_super);
2083 }
2084 if (root->node)
2085 free_extent_buffer(root->node);
2086 if (root->commit_root)
2087 free_extent_buffer(root->commit_root);
2088 kfree(root->name);
2089 kfree(root);
2090 return 0;
2091}
2092
2093static int del_fs_roots(struct btrfs_fs_info *fs_info)
2094{
2095 int ret;
2096 struct btrfs_root *gang[8];
2097 int i;
2098
2099 while (1) {
2100 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2101 (void **)gang, 0,
2102 ARRAY_SIZE(gang));
2103 if (!ret)
2104 break;
2105 for (i = 0; i < ret; i++)
2106 btrfs_free_fs_root(fs_info, gang[i]);
2107 }
2108 return 0;
2109}
2110
2111int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2112{
2113 u64 root_objectid = 0;
2114 struct btrfs_root *gang[8];
2115 int i;
2116 int ret;
2117
2118 while (1) {
2119 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2120 (void **)gang, root_objectid,
2121 ARRAY_SIZE(gang));
2122 if (!ret)
2123 break;
2124 for (i = 0; i < ret; i++) {
2125 root_objectid = gang[i]->root_key.objectid;
2126 ret = btrfs_find_dead_roots(fs_info->tree_root,
2127 root_objectid, gang[i]);
2128 BUG_ON(ret);
2129 btrfs_orphan_cleanup(gang[i]);
2130 }
2131 root_objectid++;
2132 }
2133 return 0;
2134}
2135
2136int btrfs_commit_super(struct btrfs_root *root)
2137{
2138 struct btrfs_trans_handle *trans;
2139 int ret;
2140
2141 mutex_lock(&root->fs_info->cleaner_mutex);
2142 btrfs_clean_old_snapshots(root);
2143 mutex_unlock(&root->fs_info->cleaner_mutex);
2144 trans = btrfs_start_transaction(root, 1);
2145 ret = btrfs_commit_transaction(trans, root);
2146 BUG_ON(ret);
2147 /* run commit again to drop the original snapshot */
2148 trans = btrfs_start_transaction(root, 1);
2149 btrfs_commit_transaction(trans, root);
2150 ret = btrfs_write_and_wait_transaction(NULL, root);
2151 BUG_ON(ret);
2152
2153 ret = write_ctree_super(NULL, root, 0);
2154 return ret;
2155}
2156
2157int close_ctree(struct btrfs_root *root)
2158{
2159 struct btrfs_fs_info *fs_info = root->fs_info;
2160 int ret;
2161
2162 fs_info->closing = 1;
2163 smp_mb();
2164
2165 kthread_stop(root->fs_info->transaction_kthread);
2166 kthread_stop(root->fs_info->cleaner_kthread);
2167
2168 if (!(fs_info->sb->s_flags & MS_RDONLY)) {
2169 ret = btrfs_commit_super(root);
2170 if (ret)
2171 printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2172 }
2173
2174 if (fs_info->delalloc_bytes) {
2175 printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
2176 fs_info->delalloc_bytes);
2177 }
2178 if (fs_info->total_ref_cache_size) {
2179 printk(KERN_INFO "btrfs: at umount reference cache size %llu\n",
2180 (unsigned long long)fs_info->total_ref_cache_size);
2181 }
2182
2183 if (fs_info->extent_root->node)
2184 free_extent_buffer(fs_info->extent_root->node);
2185
2186 if (fs_info->tree_root->node)
2187 free_extent_buffer(fs_info->tree_root->node);
2188
2189 if (root->fs_info->chunk_root->node)
2190 free_extent_buffer(root->fs_info->chunk_root->node);
2191
2192 if (root->fs_info->dev_root->node)
2193 free_extent_buffer(root->fs_info->dev_root->node);
2194
2195 if (root->fs_info->csum_root->node)
2196 free_extent_buffer(root->fs_info->csum_root->node);
2197
2198 btrfs_free_block_groups(root->fs_info);
2199
2200 del_fs_roots(fs_info);
2201
2202 iput(fs_info->btree_inode);
2203
2204 btrfs_stop_workers(&fs_info->fixup_workers);
2205 btrfs_stop_workers(&fs_info->delalloc_workers);
2206 btrfs_stop_workers(&fs_info->workers);
2207 btrfs_stop_workers(&fs_info->endio_workers);
2208 btrfs_stop_workers(&fs_info->endio_meta_workers);
2209 btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2210 btrfs_stop_workers(&fs_info->endio_write_workers);
2211 btrfs_stop_workers(&fs_info->submit_workers);
2212
2213#if 0
2214 while (!list_empty(&fs_info->hashers)) {
2215 struct btrfs_hasher *hasher;
2216 hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher,
2217 hashers);
2218 list_del(&hasher->hashers);
2219 crypto_free_hash(&fs_info->hash_tfm);
2220 kfree(hasher);
2221 }
2222#endif
2223 btrfs_close_devices(fs_info->fs_devices);
2224 btrfs_mapping_tree_free(&fs_info->mapping_tree);
2225
2226 bdi_destroy(&fs_info->bdi);
2227
2228 kfree(fs_info->extent_root);
2229 kfree(fs_info->tree_root);
2230 kfree(fs_info->chunk_root);
2231 kfree(fs_info->dev_root);
2232 kfree(fs_info->csum_root);
2233 return 0;
2234}
2235
2236int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
2237{
2238 int ret;
2239 struct inode *btree_inode = buf->first_page->mapping->host;
2240
2241 ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
2242 if (!ret)
2243 return ret;
2244
2245 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
2246 parent_transid);
2247 return !ret;
2248}
2249
2250int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
2251{
2252 struct inode *btree_inode = buf->first_page->mapping->host;
2253 return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
2254 buf);
2255}
2256
2257void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
2258{
2259 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2260 u64 transid = btrfs_header_generation(buf);
2261 struct inode *btree_inode = root->fs_info->btree_inode;
2262
2263 WARN_ON(!btrfs_tree_locked(buf));
2264 if (transid != root->fs_info->generation) {
2265 printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
2266 "found %llu running %llu\n",
2267 (unsigned long long)buf->start,
2268 (unsigned long long)transid,
2269 (unsigned long long)root->fs_info->generation);
2270 WARN_ON(1);
2271 }
2272 set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf);
2273}
2274
2275void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
2276{
2277 /*
2278 * looks as though older kernels can get into trouble with
2279 * this code, they end up stuck in balance_dirty_pages forever
2280 */
2281 struct extent_io_tree *tree;
2282 u64 num_dirty;
2283 u64 start = 0;
2284 unsigned long thresh = 32 * 1024 * 1024;
2285 tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
2286
2287 if (current_is_pdflush() || current->flags & PF_MEMALLOC)
2288 return;
2289
2290 num_dirty = count_range_bits(tree, &start, (u64)-1,
2291 thresh, EXTENT_DIRTY);
2292 if (num_dirty > thresh) {
2293 balance_dirty_pages_ratelimited_nr(
2294 root->fs_info->btree_inode->i_mapping, 1);
2295 }
2296 return;
2297}
2298
2299int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
2300{
2301 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2302 int ret;
2303 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
2304 if (ret == 0)
2305 buf->flags |= EXTENT_UPTODATE;
2306 return ret;
2307}
2308
2309int btree_lock_page_hook(struct page *page)
2310{
2311 struct inode *inode = page->mapping->host;
2312 struct btrfs_root *root = BTRFS_I(inode)->root;
2313 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2314 struct extent_buffer *eb;
2315 unsigned long len;
2316 u64 bytenr = page_offset(page);
2317
2318 if (page->private == EXTENT_PAGE_PRIVATE)
2319 goto out;
2320
2321 len = page->private >> 2;
2322 eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS);
2323 if (!eb)
2324 goto out;
2325
2326 btrfs_tree_lock(eb);
2327 spin_lock(&root->fs_info->hash_lock);
2328 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
2329 spin_unlock(&root->fs_info->hash_lock);
2330 btrfs_tree_unlock(eb);
2331 free_extent_buffer(eb);
2332out:
2333 lock_page(page);
2334 return 0;
2335}
2336
2337static struct extent_io_ops btree_extent_io_ops = {
2338 .write_cache_pages_lock_hook = btree_lock_page_hook,
2339 .readpage_end_io_hook = btree_readpage_end_io_hook,
2340 .submit_bio_hook = btree_submit_bio_hook,
2341 /* note we're sharing with inode.c for the merge bio hook */
2342 .merge_bio_hook = btrfs_merge_bio_hook,
2343};
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
new file mode 100644
index 000000000000..c0ff404c31b7
--- /dev/null
+++ b/fs/btrfs/disk-io.h
@@ -0,0 +1,102 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#ifndef __DISKIO__
20#define __DISKIO__
21
22#define BTRFS_SUPER_INFO_OFFSET (64 * 1024)
23#define BTRFS_SUPER_INFO_SIZE 4096
24
25#define BTRFS_SUPER_MIRROR_MAX 3
26#define BTRFS_SUPER_MIRROR_SHIFT 12
27
28static inline u64 btrfs_sb_offset(int mirror)
29{
30 u64 start = 16 * 1024;
31 if (mirror)
32 return start << (BTRFS_SUPER_MIRROR_SHIFT * mirror);
33 return BTRFS_SUPER_INFO_OFFSET;
34}
35
36struct btrfs_device;
37struct btrfs_fs_devices;
38
39struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
40 u32 blocksize, u64 parent_transid);
41int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
42 u64 parent_transid);
43struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
44 u64 bytenr, u32 blocksize);
45int clean_tree_block(struct btrfs_trans_handle *trans,
46 struct btrfs_root *root, struct extent_buffer *buf);
47struct btrfs_root *open_ctree(struct super_block *sb,
48 struct btrfs_fs_devices *fs_devices,
49 char *options);
50int close_ctree(struct btrfs_root *root);
51int write_ctree_super(struct btrfs_trans_handle *trans,
52 struct btrfs_root *root, int max_mirrors);
53struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
54int btrfs_commit_super(struct btrfs_root *root);
55struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
56 u64 bytenr, u32 blocksize);
57struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
58 u64 root_objectid);
59struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
60 struct btrfs_key *location,
61 const char *name, int namelen);
62struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
63 struct btrfs_key *location);
64struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
65 struct btrfs_key *location);
66int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
67int btrfs_insert_dev_radix(struct btrfs_root *root,
68 struct block_device *bdev,
69 u64 device_id,
70 u64 block_start,
71 u64 num_blocks);
72void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
73int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
74void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
75int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
76int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
77int wait_on_tree_block_writeback(struct btrfs_root *root,
78 struct extent_buffer *buf);
79int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
80u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len);
81void btrfs_csum_final(u32 crc, char *result);
82int btrfs_open_device(struct btrfs_device *dev);
83int btrfs_verify_block_csum(struct btrfs_root *root,
84 struct extent_buffer *buf);
85int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
86 int metadata);
87int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
88 int rw, struct bio *bio, int mirror_num,
89 unsigned long bio_flags,
90 extent_submit_bio_hook_t *submit_bio_start,
91 extent_submit_bio_hook_t *submit_bio_done);
92
93int btrfs_congested_async(struct btrfs_fs_info *info, int iodone);
94unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
95int btrfs_write_tree_block(struct extent_buffer *buf);
96int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
97int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
98 struct btrfs_fs_info *fs_info);
99int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
100 struct btrfs_fs_info *fs_info);
101int btree_lock_page_hook(struct page *page);
102#endif
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
new file mode 100644
index 000000000000..85315d2c90de
--- /dev/null
+++ b/fs/btrfs/export.c
@@ -0,0 +1,203 @@
1#include <linux/fs.h>
2#include <linux/types.h>
3#include "ctree.h"
4#include "disk-io.h"
5#include "btrfs_inode.h"
6#include "print-tree.h"
7#include "export.h"
8#include "compat.h"
9
10#define BTRFS_FID_SIZE_NON_CONNECTABLE (offsetof(struct btrfs_fid, \
11 parent_objectid) / 4)
12#define BTRFS_FID_SIZE_CONNECTABLE (offsetof(struct btrfs_fid, \
13 parent_root_objectid) / 4)
14#define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid) / 4)
15
16static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
17 int connectable)
18{
19 struct btrfs_fid *fid = (struct btrfs_fid *)fh;
20 struct inode *inode = dentry->d_inode;
21 int len = *max_len;
22 int type;
23
24 if ((len < BTRFS_FID_SIZE_NON_CONNECTABLE) ||
25 (connectable && len < BTRFS_FID_SIZE_CONNECTABLE))
26 return 255;
27
28 len = BTRFS_FID_SIZE_NON_CONNECTABLE;
29 type = FILEID_BTRFS_WITHOUT_PARENT;
30
31 fid->objectid = BTRFS_I(inode)->location.objectid;
32 fid->root_objectid = BTRFS_I(inode)->root->objectid;
33 fid->gen = inode->i_generation;
34
35 if (connectable && !S_ISDIR(inode->i_mode)) {
36 struct inode *parent;
37 u64 parent_root_id;
38
39 spin_lock(&dentry->d_lock);
40
41 parent = dentry->d_parent->d_inode;
42 fid->parent_objectid = BTRFS_I(parent)->location.objectid;
43 fid->parent_gen = parent->i_generation;
44 parent_root_id = BTRFS_I(parent)->root->objectid;
45
46 spin_unlock(&dentry->d_lock);
47
48 if (parent_root_id != fid->root_objectid) {
49 fid->parent_root_objectid = parent_root_id;
50 len = BTRFS_FID_SIZE_CONNECTABLE_ROOT;
51 type = FILEID_BTRFS_WITH_PARENT_ROOT;
52 } else {
53 len = BTRFS_FID_SIZE_CONNECTABLE;
54 type = FILEID_BTRFS_WITH_PARENT;
55 }
56 }
57
58 *max_len = len;
59 return type;
60}
61
62static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
63 u64 root_objectid, u32 generation)
64{
65 struct btrfs_root *root;
66 struct inode *inode;
67 struct btrfs_key key;
68
69 key.objectid = root_objectid;
70 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
71 key.offset = (u64)-1;
72
73 root = btrfs_read_fs_root_no_name(btrfs_sb(sb)->fs_info, &key);
74 if (IS_ERR(root))
75 return ERR_CAST(root);
76
77 key.objectid = objectid;
78 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
79 key.offset = 0;
80
81 inode = btrfs_iget(sb, &key, root, NULL);
82 if (IS_ERR(inode))
83 return (void *)inode;
84
85 if (generation != inode->i_generation) {
86 iput(inode);
87 return ERR_PTR(-ESTALE);
88 }
89
90 return d_obtain_alias(inode);
91}
92
93static struct dentry *btrfs_fh_to_parent(struct super_block *sb, struct fid *fh,
94 int fh_len, int fh_type)
95{
96 struct btrfs_fid *fid = (struct btrfs_fid *) fh;
97 u64 objectid, root_objectid;
98 u32 generation;
99
100 if (fh_type == FILEID_BTRFS_WITH_PARENT) {
101 if (fh_len != BTRFS_FID_SIZE_CONNECTABLE)
102 return NULL;
103 root_objectid = fid->root_objectid;
104 } else if (fh_type == FILEID_BTRFS_WITH_PARENT_ROOT) {
105 if (fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT)
106 return NULL;
107 root_objectid = fid->parent_root_objectid;
108 } else
109 return NULL;
110
111 objectid = fid->parent_objectid;
112 generation = fid->parent_gen;
113
114 return btrfs_get_dentry(sb, objectid, root_objectid, generation);
115}
116
117static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
118 int fh_len, int fh_type)
119{
120 struct btrfs_fid *fid = (struct btrfs_fid *) fh;
121 u64 objectid, root_objectid;
122 u32 generation;
123
124 if ((fh_type != FILEID_BTRFS_WITH_PARENT ||
125 fh_len != BTRFS_FID_SIZE_CONNECTABLE) &&
126 (fh_type != FILEID_BTRFS_WITH_PARENT_ROOT ||
127 fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) &&
128 (fh_type != FILEID_BTRFS_WITHOUT_PARENT ||
129 fh_len != BTRFS_FID_SIZE_NON_CONNECTABLE))
130 return NULL;
131
132 objectid = fid->objectid;
133 root_objectid = fid->root_objectid;
134 generation = fid->gen;
135
136 return btrfs_get_dentry(sb, objectid, root_objectid, generation);
137}
138
139static struct dentry *btrfs_get_parent(struct dentry *child)
140{
141 struct inode *dir = child->d_inode;
142 struct btrfs_root *root = BTRFS_I(dir)->root;
143 struct btrfs_key key;
144 struct btrfs_path *path;
145 struct extent_buffer *leaf;
146 int slot;
147 u64 objectid;
148 int ret;
149
150 path = btrfs_alloc_path();
151
152 key.objectid = dir->i_ino;
153 btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
154 key.offset = (u64)-1;
155
156 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
157 if (ret < 0) {
158 /* Error */
159 btrfs_free_path(path);
160 return ERR_PTR(ret);
161 }
162 leaf = path->nodes[0];
163 slot = path->slots[0];
164 if (ret) {
165 /* btrfs_search_slot() returns the slot where we'd want to
166 insert a backref for parent inode #0xFFFFFFFFFFFFFFFF.
167 The _real_ backref, telling us what the parent inode
168 _actually_ is, will be in the slot _before_ the one
169 that btrfs_search_slot() returns. */
170 if (!slot) {
171 /* Unless there is _no_ key in the tree before... */
172 btrfs_free_path(path);
173 return ERR_PTR(-EIO);
174 }
175 slot--;
176 }
177
178 btrfs_item_key_to_cpu(leaf, &key, slot);
179 btrfs_free_path(path);
180
181 if (key.objectid != dir->i_ino || key.type != BTRFS_INODE_REF_KEY)
182 return ERR_PTR(-EINVAL);
183
184 objectid = key.offset;
185
186 /* If we are already at the root of a subvol, return the real root */
187 if (objectid == dir->i_ino)
188 return dget(dir->i_sb->s_root);
189
190 /* Build a new key for the inode item */
191 key.objectid = objectid;
192 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
193 key.offset = 0;
194
195 return d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL));
196}
197
198const struct export_operations btrfs_export_ops = {
199 .encode_fh = btrfs_encode_fh,
200 .fh_to_dentry = btrfs_fh_to_dentry,
201 .fh_to_parent = btrfs_fh_to_parent,
202 .get_parent = btrfs_get_parent,
203};
diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h
new file mode 100644
index 000000000000..074348a95841
--- /dev/null
+++ b/fs/btrfs/export.h
@@ -0,0 +1,19 @@
1#ifndef BTRFS_EXPORT_H
2#define BTRFS_EXPORT_H
3
4#include <linux/exportfs.h>
5
6extern const struct export_operations btrfs_export_ops;
7
8struct btrfs_fid {
9 u64 objectid;
10 u64 root_objectid;
11 u32 gen;
12
13 u64 parent_objectid;
14 u32 parent_gen;
15
16 u64 parent_root_objectid;
17} __attribute__ ((packed));
18
19#endif
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
new file mode 100644
index 000000000000..293da650873f
--- /dev/null
+++ b/fs/btrfs/extent-tree.c
@@ -0,0 +1,5986 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/pagemap.h>
20#include <linux/writeback.h>
21#include <linux/blkdev.h>
22#include <linux/version.h>
23#include "compat.h"
24#include "hash.h"
25#include "crc32c.h"
26#include "ctree.h"
27#include "disk-io.h"
28#include "print-tree.h"
29#include "transaction.h"
30#include "volumes.h"
31#include "locking.h"
32#include "ref-cache.h"
33#include "compat.h"
34
35#define PENDING_EXTENT_INSERT 0
36#define PENDING_EXTENT_DELETE 1
37#define PENDING_BACKREF_UPDATE 2
38
39struct pending_extent_op {
40 int type;
41 u64 bytenr;
42 u64 num_bytes;
43 u64 parent;
44 u64 orig_parent;
45 u64 generation;
46 u64 orig_generation;
47 int level;
48 struct list_head list;
49 int del;
50};
51
52static int finish_current_insert(struct btrfs_trans_handle *trans,
53 struct btrfs_root *extent_root, int all);
54static int del_pending_extents(struct btrfs_trans_handle *trans,
55 struct btrfs_root *extent_root, int all);
56static int pin_down_bytes(struct btrfs_trans_handle *trans,
57 struct btrfs_root *root,
58 u64 bytenr, u64 num_bytes, int is_data);
59static int update_block_group(struct btrfs_trans_handle *trans,
60 struct btrfs_root *root,
61 u64 bytenr, u64 num_bytes, int alloc,
62 int mark_free);
63
64static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
65{
66 return (cache->flags & bits) == bits;
67}
68
69/*
70 * this adds the block group to the fs_info rb tree for the block group
71 * cache
72 */
73static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
74 struct btrfs_block_group_cache *block_group)
75{
76 struct rb_node **p;
77 struct rb_node *parent = NULL;
78 struct btrfs_block_group_cache *cache;
79
80 spin_lock(&info->block_group_cache_lock);
81 p = &info->block_group_cache_tree.rb_node;
82
83 while (*p) {
84 parent = *p;
85 cache = rb_entry(parent, struct btrfs_block_group_cache,
86 cache_node);
87 if (block_group->key.objectid < cache->key.objectid) {
88 p = &(*p)->rb_left;
89 } else if (block_group->key.objectid > cache->key.objectid) {
90 p = &(*p)->rb_right;
91 } else {
92 spin_unlock(&info->block_group_cache_lock);
93 return -EEXIST;
94 }
95 }
96
97 rb_link_node(&block_group->cache_node, parent, p);
98 rb_insert_color(&block_group->cache_node,
99 &info->block_group_cache_tree);
100 spin_unlock(&info->block_group_cache_lock);
101
102 return 0;
103}
104
105/*
106 * This will return the block group at or after bytenr if contains is 0, else
107 * it will return the block group that contains the bytenr
108 */
109static struct btrfs_block_group_cache *
110block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
111 int contains)
112{
113 struct btrfs_block_group_cache *cache, *ret = NULL;
114 struct rb_node *n;
115 u64 end, start;
116
117 spin_lock(&info->block_group_cache_lock);
118 n = info->block_group_cache_tree.rb_node;
119
120 while (n) {
121 cache = rb_entry(n, struct btrfs_block_group_cache,
122 cache_node);
123 end = cache->key.objectid + cache->key.offset - 1;
124 start = cache->key.objectid;
125
126 if (bytenr < start) {
127 if (!contains && (!ret || start < ret->key.objectid))
128 ret = cache;
129 n = n->rb_left;
130 } else if (bytenr > start) {
131 if (contains && bytenr <= end) {
132 ret = cache;
133 break;
134 }
135 n = n->rb_right;
136 } else {
137 ret = cache;
138 break;
139 }
140 }
141 if (ret)
142 atomic_inc(&ret->count);
143 spin_unlock(&info->block_group_cache_lock);
144
145 return ret;
146}
147
148/*
149 * this is only called by cache_block_group, since we could have freed extents
150 * we need to check the pinned_extents for any extents that can't be used yet
151 * since their free space will be released as soon as the transaction commits.
152 */
153static int add_new_free_space(struct btrfs_block_group_cache *block_group,
154 struct btrfs_fs_info *info, u64 start, u64 end)
155{
156 u64 extent_start, extent_end, size;
157 int ret;
158
159 mutex_lock(&info->pinned_mutex);
160 while (start < end) {
161 ret = find_first_extent_bit(&info->pinned_extents, start,
162 &extent_start, &extent_end,
163 EXTENT_DIRTY);
164 if (ret)
165 break;
166
167 if (extent_start == start) {
168 start = extent_end + 1;
169 } else if (extent_start > start && extent_start < end) {
170 size = extent_start - start;
171 ret = btrfs_add_free_space(block_group, start,
172 size);
173 BUG_ON(ret);
174 start = extent_end + 1;
175 } else {
176 break;
177 }
178 }
179
180 if (start < end) {
181 size = end - start;
182 ret = btrfs_add_free_space(block_group, start, size);
183 BUG_ON(ret);
184 }
185 mutex_unlock(&info->pinned_mutex);
186
187 return 0;
188}
189
190static int remove_sb_from_cache(struct btrfs_root *root,
191 struct btrfs_block_group_cache *cache)
192{
193 u64 bytenr;
194 u64 *logical;
195 int stripe_len;
196 int i, nr, ret;
197
198 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
199 bytenr = btrfs_sb_offset(i);
200 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
201 cache->key.objectid, bytenr, 0,
202 &logical, &nr, &stripe_len);
203 BUG_ON(ret);
204 while (nr--) {
205 btrfs_remove_free_space(cache, logical[nr],
206 stripe_len);
207 }
208 kfree(logical);
209 }
210 return 0;
211}
212
213static int cache_block_group(struct btrfs_root *root,
214 struct btrfs_block_group_cache *block_group)
215{
216 struct btrfs_path *path;
217 int ret = 0;
218 struct btrfs_key key;
219 struct extent_buffer *leaf;
220 int slot;
221 u64 last;
222
223 if (!block_group)
224 return 0;
225
226 root = root->fs_info->extent_root;
227
228 if (block_group->cached)
229 return 0;
230
231 path = btrfs_alloc_path();
232 if (!path)
233 return -ENOMEM;
234
235 path->reada = 2;
236 /*
237 * we get into deadlocks with paths held by callers of this function.
238 * since the alloc_mutex is protecting things right now, just
239 * skip the locking here
240 */
241 path->skip_locking = 1;
242 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
243 key.objectid = last;
244 key.offset = 0;
245 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
246 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
247 if (ret < 0)
248 goto err;
249
250 while (1) {
251 leaf = path->nodes[0];
252 slot = path->slots[0];
253 if (slot >= btrfs_header_nritems(leaf)) {
254 ret = btrfs_next_leaf(root, path);
255 if (ret < 0)
256 goto err;
257 if (ret == 0)
258 continue;
259 else
260 break;
261 }
262 btrfs_item_key_to_cpu(leaf, &key, slot);
263 if (key.objectid < block_group->key.objectid)
264 goto next;
265
266 if (key.objectid >= block_group->key.objectid +
267 block_group->key.offset)
268 break;
269
270 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
271 add_new_free_space(block_group, root->fs_info, last,
272 key.objectid);
273
274 last = key.objectid + key.offset;
275 }
276next:
277 path->slots[0]++;
278 }
279
280 add_new_free_space(block_group, root->fs_info, last,
281 block_group->key.objectid +
282 block_group->key.offset);
283
284 remove_sb_from_cache(root, block_group);
285 block_group->cached = 1;
286 ret = 0;
287err:
288 btrfs_free_path(path);
289 return ret;
290}
291
292/*
293 * return the block group that starts at or after bytenr
294 */
295static struct btrfs_block_group_cache *
296btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
297{
298 struct btrfs_block_group_cache *cache;
299
300 cache = block_group_cache_tree_search(info, bytenr, 0);
301
302 return cache;
303}
304
305/*
306 * return the block group that contains teh given bytenr
307 */
308struct btrfs_block_group_cache *btrfs_lookup_block_group(
309 struct btrfs_fs_info *info,
310 u64 bytenr)
311{
312 struct btrfs_block_group_cache *cache;
313
314 cache = block_group_cache_tree_search(info, bytenr, 1);
315
316 return cache;
317}
318
319static inline void put_block_group(struct btrfs_block_group_cache *cache)
320{
321 if (atomic_dec_and_test(&cache->count))
322 kfree(cache);
323}
324
325static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
326 u64 flags)
327{
328 struct list_head *head = &info->space_info;
329 struct list_head *cur;
330 struct btrfs_space_info *found;
331 list_for_each(cur, head) {
332 found = list_entry(cur, struct btrfs_space_info, list);
333 if (found->flags == flags)
334 return found;
335 }
336 return NULL;
337}
338
339static u64 div_factor(u64 num, int factor)
340{
341 if (factor == 10)
342 return num;
343 num *= factor;
344 do_div(num, 10);
345 return num;
346}
347
348u64 btrfs_find_block_group(struct btrfs_root *root,
349 u64 search_start, u64 search_hint, int owner)
350{
351 struct btrfs_block_group_cache *cache;
352 u64 used;
353 u64 last = max(search_hint, search_start);
354 u64 group_start = 0;
355 int full_search = 0;
356 int factor = 9;
357 int wrapped = 0;
358again:
359 while (1) {
360 cache = btrfs_lookup_first_block_group(root->fs_info, last);
361 if (!cache)
362 break;
363
364 spin_lock(&cache->lock);
365 last = cache->key.objectid + cache->key.offset;
366 used = btrfs_block_group_used(&cache->item);
367
368 if ((full_search || !cache->ro) &&
369 block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
370 if (used + cache->pinned + cache->reserved <
371 div_factor(cache->key.offset, factor)) {
372 group_start = cache->key.objectid;
373 spin_unlock(&cache->lock);
374 put_block_group(cache);
375 goto found;
376 }
377 }
378 spin_unlock(&cache->lock);
379 put_block_group(cache);
380 cond_resched();
381 }
382 if (!wrapped) {
383 last = search_start;
384 wrapped = 1;
385 goto again;
386 }
387 if (!full_search && factor < 10) {
388 last = search_start;
389 full_search = 1;
390 factor = 10;
391 goto again;
392 }
393found:
394 return group_start;
395}
396
397/* simple helper to search for an existing extent at a given offset */
398int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
399{
400 int ret;
401 struct btrfs_key key;
402 struct btrfs_path *path;
403
404 path = btrfs_alloc_path();
405 BUG_ON(!path);
406 key.objectid = start;
407 key.offset = len;
408 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
409 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
410 0, 0);
411 btrfs_free_path(path);
412 return ret;
413}
414
415/*
416 * Back reference rules. Back refs have three main goals:
417 *
418 * 1) differentiate between all holders of references to an extent so that
419 * when a reference is dropped we can make sure it was a valid reference
420 * before freeing the extent.
421 *
422 * 2) Provide enough information to quickly find the holders of an extent
423 * if we notice a given block is corrupted or bad.
424 *
425 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
426 * maintenance. This is actually the same as #2, but with a slightly
427 * different use case.
428 *
429 * File extents can be referenced by:
430 *
431 * - multiple snapshots, subvolumes, or different generations in one subvol
432 * - different files inside a single subvolume
433 * - different offsets inside a file (bookend extents in file.c)
434 *
435 * The extent ref structure has fields for:
436 *
437 * - Objectid of the subvolume root
438 * - Generation number of the tree holding the reference
439 * - objectid of the file holding the reference
440 * - number of references holding by parent node (alway 1 for tree blocks)
441 *
442 * Btree leaf may hold multiple references to a file extent. In most cases,
443 * these references are from same file and the corresponding offsets inside
444 * the file are close together.
445 *
446 * When a file extent is allocated the fields are filled in:
447 * (root_key.objectid, trans->transid, inode objectid, 1)
448 *
449 * When a leaf is cow'd new references are added for every file extent found
450 * in the leaf. It looks similar to the create case, but trans->transid will
451 * be different when the block is cow'd.
452 *
453 * (root_key.objectid, trans->transid, inode objectid,
454 * number of references in the leaf)
455 *
456 * When a file extent is removed either during snapshot deletion or
457 * file truncation, we find the corresponding back reference and check
458 * the following fields:
459 *
460 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
461 * inode objectid)
462 *
463 * Btree extents can be referenced by:
464 *
465 * - Different subvolumes
466 * - Different generations of the same subvolume
467 *
468 * When a tree block is created, back references are inserted:
469 *
470 * (root->root_key.objectid, trans->transid, level, 1)
471 *
472 * When a tree block is cow'd, new back references are added for all the
473 * blocks it points to. If the tree block isn't in reference counted root,
474 * the old back references are removed. These new back references are of
475 * the form (trans->transid will have increased since creation):
476 *
477 * (root->root_key.objectid, trans->transid, level, 1)
478 *
479 * When a backref is in deleting, the following fields are checked:
480 *
481 * if backref was for a tree root:
482 * (btrfs_header_owner(itself), btrfs_header_generation(itself), level)
483 * else
484 * (btrfs_header_owner(parent), btrfs_header_generation(parent), level)
485 *
486 * Back Reference Key composing:
487 *
488 * The key objectid corresponds to the first byte in the extent, the key
489 * type is set to BTRFS_EXTENT_REF_KEY, and the key offset is the first
490 * byte of parent extent. If a extent is tree root, the key offset is set
491 * to the key objectid.
492 */
493
494static noinline int lookup_extent_backref(struct btrfs_trans_handle *trans,
495 struct btrfs_root *root,
496 struct btrfs_path *path,
497 u64 bytenr, u64 parent,
498 u64 ref_root, u64 ref_generation,
499 u64 owner_objectid, int del)
500{
501 struct btrfs_key key;
502 struct btrfs_extent_ref *ref;
503 struct extent_buffer *leaf;
504 u64 ref_objectid;
505 int ret;
506
507 key.objectid = bytenr;
508 key.type = BTRFS_EXTENT_REF_KEY;
509 key.offset = parent;
510
511 ret = btrfs_search_slot(trans, root, &key, path, del ? -1 : 0, 1);
512 if (ret < 0)
513 goto out;
514 if (ret > 0) {
515 ret = -ENOENT;
516 goto out;
517 }
518
519 leaf = path->nodes[0];
520 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
521 ref_objectid = btrfs_ref_objectid(leaf, ref);
522 if (btrfs_ref_root(leaf, ref) != ref_root ||
523 btrfs_ref_generation(leaf, ref) != ref_generation ||
524 (ref_objectid != owner_objectid &&
525 ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
526 ret = -EIO;
527 WARN_ON(1);
528 goto out;
529 }
530 ret = 0;
531out:
532 return ret;
533}
534
535/*
536 * updates all the backrefs that are pending on update_list for the
537 * extent_root
538 */
539static noinline int update_backrefs(struct btrfs_trans_handle *trans,
540 struct btrfs_root *extent_root,
541 struct btrfs_path *path,
542 struct list_head *update_list)
543{
544 struct btrfs_key key;
545 struct btrfs_extent_ref *ref;
546 struct btrfs_fs_info *info = extent_root->fs_info;
547 struct pending_extent_op *op;
548 struct extent_buffer *leaf;
549 int ret = 0;
550 struct list_head *cur = update_list->next;
551 u64 ref_objectid;
552 u64 ref_root = extent_root->root_key.objectid;
553
554 op = list_entry(cur, struct pending_extent_op, list);
555
556search:
557 key.objectid = op->bytenr;
558 key.type = BTRFS_EXTENT_REF_KEY;
559 key.offset = op->orig_parent;
560
561 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 1);
562 BUG_ON(ret);
563
564 leaf = path->nodes[0];
565
566loop:
567 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
568
569 ref_objectid = btrfs_ref_objectid(leaf, ref);
570
571 if (btrfs_ref_root(leaf, ref) != ref_root ||
572 btrfs_ref_generation(leaf, ref) != op->orig_generation ||
573 (ref_objectid != op->level &&
574 ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
575 printk(KERN_ERR "btrfs couldn't find %llu, parent %llu, "
576 "root %llu, owner %u\n",
577 (unsigned long long)op->bytenr,
578 (unsigned long long)op->orig_parent,
579 (unsigned long long)ref_root, op->level);
580 btrfs_print_leaf(extent_root, leaf);
581 BUG();
582 }
583
584 key.objectid = op->bytenr;
585 key.offset = op->parent;
586 key.type = BTRFS_EXTENT_REF_KEY;
587 ret = btrfs_set_item_key_safe(trans, extent_root, path, &key);
588 BUG_ON(ret);
589 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
590 btrfs_set_ref_generation(leaf, ref, op->generation);
591
592 cur = cur->next;
593
594 list_del_init(&op->list);
595 unlock_extent(&info->extent_ins, op->bytenr,
596 op->bytenr + op->num_bytes - 1, GFP_NOFS);
597 kfree(op);
598
599 if (cur == update_list) {
600 btrfs_mark_buffer_dirty(path->nodes[0]);
601 btrfs_release_path(extent_root, path);
602 goto out;
603 }
604
605 op = list_entry(cur, struct pending_extent_op, list);
606
607 path->slots[0]++;
608 while (path->slots[0] < btrfs_header_nritems(leaf)) {
609 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
610 if (key.objectid == op->bytenr &&
611 key.type == BTRFS_EXTENT_REF_KEY)
612 goto loop;
613 path->slots[0]++;
614 }
615
616 btrfs_mark_buffer_dirty(path->nodes[0]);
617 btrfs_release_path(extent_root, path);
618 goto search;
619
620out:
621 return 0;
622}
623
624static noinline int insert_extents(struct btrfs_trans_handle *trans,
625 struct btrfs_root *extent_root,
626 struct btrfs_path *path,
627 struct list_head *insert_list, int nr)
628{
629 struct btrfs_key *keys;
630 u32 *data_size;
631 struct pending_extent_op *op;
632 struct extent_buffer *leaf;
633 struct list_head *cur = insert_list->next;
634 struct btrfs_fs_info *info = extent_root->fs_info;
635 u64 ref_root = extent_root->root_key.objectid;
636 int i = 0, last = 0, ret;
637 int total = nr * 2;
638
639 if (!nr)
640 return 0;
641
642 keys = kzalloc(total * sizeof(struct btrfs_key), GFP_NOFS);
643 if (!keys)
644 return -ENOMEM;
645
646 data_size = kzalloc(total * sizeof(u32), GFP_NOFS);
647 if (!data_size) {
648 kfree(keys);
649 return -ENOMEM;
650 }
651
652 list_for_each_entry(op, insert_list, list) {
653 keys[i].objectid = op->bytenr;
654 keys[i].offset = op->num_bytes;
655 keys[i].type = BTRFS_EXTENT_ITEM_KEY;
656 data_size[i] = sizeof(struct btrfs_extent_item);
657 i++;
658
659 keys[i].objectid = op->bytenr;
660 keys[i].offset = op->parent;
661 keys[i].type = BTRFS_EXTENT_REF_KEY;
662 data_size[i] = sizeof(struct btrfs_extent_ref);
663 i++;
664 }
665
666 op = list_entry(cur, struct pending_extent_op, list);
667 i = 0;
668 while (i < total) {
669 int c;
670 ret = btrfs_insert_some_items(trans, extent_root, path,
671 keys+i, data_size+i, total-i);
672 BUG_ON(ret < 0);
673
674 if (last && ret > 1)
675 BUG();
676
677 leaf = path->nodes[0];
678 for (c = 0; c < ret; c++) {
679 int ref_first = keys[i].type == BTRFS_EXTENT_REF_KEY;
680
681 /*
682 * if the first item we inserted was a backref, then
683 * the EXTENT_ITEM will be the odd c's, else it will
684 * be the even c's
685 */
686 if ((ref_first && (c % 2)) ||
687 (!ref_first && !(c % 2))) {
688 struct btrfs_extent_item *itm;
689
690 itm = btrfs_item_ptr(leaf, path->slots[0] + c,
691 struct btrfs_extent_item);
692 btrfs_set_extent_refs(path->nodes[0], itm, 1);
693 op->del++;
694 } else {
695 struct btrfs_extent_ref *ref;
696
697 ref = btrfs_item_ptr(leaf, path->slots[0] + c,
698 struct btrfs_extent_ref);
699 btrfs_set_ref_root(leaf, ref, ref_root);
700 btrfs_set_ref_generation(leaf, ref,
701 op->generation);
702 btrfs_set_ref_objectid(leaf, ref, op->level);
703 btrfs_set_ref_num_refs(leaf, ref, 1);
704 op->del++;
705 }
706
707 /*
708 * using del to see when its ok to free up the
709 * pending_extent_op. In the case where we insert the
710 * last item on the list in order to help do batching
711 * we need to not free the extent op until we actually
712 * insert the extent_item
713 */
714 if (op->del == 2) {
715 unlock_extent(&info->extent_ins, op->bytenr,
716 op->bytenr + op->num_bytes - 1,
717 GFP_NOFS);
718 cur = cur->next;
719 list_del_init(&op->list);
720 kfree(op);
721 if (cur != insert_list)
722 op = list_entry(cur,
723 struct pending_extent_op,
724 list);
725 }
726 }
727 btrfs_mark_buffer_dirty(leaf);
728 btrfs_release_path(extent_root, path);
729
730 /*
731 * Ok backref's and items usually go right next to eachother,
732 * but if we could only insert 1 item that means that we
733 * inserted on the end of a leaf, and we have no idea what may
734 * be on the next leaf so we just play it safe. In order to
735 * try and help this case we insert the last thing on our
736 * insert list so hopefully it will end up being the last
737 * thing on the leaf and everything else will be before it,
738 * which will let us insert a whole bunch of items at the same
739 * time.
740 */
741 if (ret == 1 && !last && (i + ret < total)) {
742 /*
743 * last: where we will pick up the next time around
744 * i: our current key to insert, will be total - 1
745 * cur: the current op we are screwing with
746 * op: duh
747 */
748 last = i + ret;
749 i = total - 1;
750 cur = insert_list->prev;
751 op = list_entry(cur, struct pending_extent_op, list);
752 } else if (last) {
753 /*
754 * ok we successfully inserted the last item on the
755 * list, lets reset everything
756 *
757 * i: our current key to insert, so where we left off
758 * last time
759 * last: done with this
760 * cur: the op we are messing with
761 * op: duh
762 * total: since we inserted the last key, we need to
763 * decrement total so we dont overflow
764 */
765 i = last;
766 last = 0;
767 total--;
768 if (i < total) {
769 cur = insert_list->next;
770 op = list_entry(cur, struct pending_extent_op,
771 list);
772 }
773 } else {
774 i += ret;
775 }
776
777 cond_resched();
778 }
779 ret = 0;
780 kfree(keys);
781 kfree(data_size);
782 return ret;
783}
784
785static noinline int insert_extent_backref(struct btrfs_trans_handle *trans,
786 struct btrfs_root *root,
787 struct btrfs_path *path,
788 u64 bytenr, u64 parent,
789 u64 ref_root, u64 ref_generation,
790 u64 owner_objectid)
791{
792 struct btrfs_key key;
793 struct extent_buffer *leaf;
794 struct btrfs_extent_ref *ref;
795 u32 num_refs;
796 int ret;
797
798 key.objectid = bytenr;
799 key.type = BTRFS_EXTENT_REF_KEY;
800 key.offset = parent;
801
802 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*ref));
803 if (ret == 0) {
804 leaf = path->nodes[0];
805 ref = btrfs_item_ptr(leaf, path->slots[0],
806 struct btrfs_extent_ref);
807 btrfs_set_ref_root(leaf, ref, ref_root);
808 btrfs_set_ref_generation(leaf, ref, ref_generation);
809 btrfs_set_ref_objectid(leaf, ref, owner_objectid);
810 btrfs_set_ref_num_refs(leaf, ref, 1);
811 } else if (ret == -EEXIST) {
812 u64 existing_owner;
813 BUG_ON(owner_objectid < BTRFS_FIRST_FREE_OBJECTID);
814 leaf = path->nodes[0];
815 ref = btrfs_item_ptr(leaf, path->slots[0],
816 struct btrfs_extent_ref);
817 if (btrfs_ref_root(leaf, ref) != ref_root ||
818 btrfs_ref_generation(leaf, ref) != ref_generation) {
819 ret = -EIO;
820 WARN_ON(1);
821 goto out;
822 }
823
824 num_refs = btrfs_ref_num_refs(leaf, ref);
825 BUG_ON(num_refs == 0);
826 btrfs_set_ref_num_refs(leaf, ref, num_refs + 1);
827
828 existing_owner = btrfs_ref_objectid(leaf, ref);
829 if (existing_owner != owner_objectid &&
830 existing_owner != BTRFS_MULTIPLE_OBJECTIDS) {
831 btrfs_set_ref_objectid(leaf, ref,
832 BTRFS_MULTIPLE_OBJECTIDS);
833 }
834 ret = 0;
835 } else {
836 goto out;
837 }
838 btrfs_mark_buffer_dirty(path->nodes[0]);
839out:
840 btrfs_release_path(root, path);
841 return ret;
842}
843
844static noinline int remove_extent_backref(struct btrfs_trans_handle *trans,
845 struct btrfs_root *root,
846 struct btrfs_path *path)
847{
848 struct extent_buffer *leaf;
849 struct btrfs_extent_ref *ref;
850 u32 num_refs;
851 int ret = 0;
852
853 leaf = path->nodes[0];
854 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
855 num_refs = btrfs_ref_num_refs(leaf, ref);
856 BUG_ON(num_refs == 0);
857 num_refs -= 1;
858 if (num_refs == 0) {
859 ret = btrfs_del_item(trans, root, path);
860 } else {
861 btrfs_set_ref_num_refs(leaf, ref, num_refs);
862 btrfs_mark_buffer_dirty(leaf);
863 }
864 btrfs_release_path(root, path);
865 return ret;
866}
867
868#ifdef BIO_RW_DISCARD
869static void btrfs_issue_discard(struct block_device *bdev,
870 u64 start, u64 len)
871{
872 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
873}
874#endif
875
876static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
877 u64 num_bytes)
878{
879#ifdef BIO_RW_DISCARD
880 int ret;
881 u64 map_length = num_bytes;
882 struct btrfs_multi_bio *multi = NULL;
883
884 /* Tell the block device(s) that the sectors can be discarded */
885 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
886 bytenr, &map_length, &multi, 0);
887 if (!ret) {
888 struct btrfs_bio_stripe *stripe = multi->stripes;
889 int i;
890
891 if (map_length > num_bytes)
892 map_length = num_bytes;
893
894 for (i = 0; i < multi->num_stripes; i++, stripe++) {
895 btrfs_issue_discard(stripe->dev->bdev,
896 stripe->physical,
897 map_length);
898 }
899 kfree(multi);
900 }
901
902 return ret;
903#else
904 return 0;
905#endif
906}
907
908static noinline int free_extents(struct btrfs_trans_handle *trans,
909 struct btrfs_root *extent_root,
910 struct list_head *del_list)
911{
912 struct btrfs_fs_info *info = extent_root->fs_info;
913 struct btrfs_path *path;
914 struct btrfs_key key, found_key;
915 struct extent_buffer *leaf;
916 struct list_head *cur;
917 struct pending_extent_op *op;
918 struct btrfs_extent_item *ei;
919 int ret, num_to_del, extent_slot = 0, found_extent = 0;
920 u32 refs;
921 u64 bytes_freed = 0;
922
923 path = btrfs_alloc_path();
924 if (!path)
925 return -ENOMEM;
926 path->reada = 1;
927
928search:
929 /* search for the backref for the current ref we want to delete */
930 cur = del_list->next;
931 op = list_entry(cur, struct pending_extent_op, list);
932 ret = lookup_extent_backref(trans, extent_root, path, op->bytenr,
933 op->orig_parent,
934 extent_root->root_key.objectid,
935 op->orig_generation, op->level, 1);
936 if (ret) {
937 printk(KERN_ERR "btrfs unable to find backref byte nr %llu "
938 "root %llu gen %llu owner %u\n",
939 (unsigned long long)op->bytenr,
940 (unsigned long long)extent_root->root_key.objectid,
941 (unsigned long long)op->orig_generation, op->level);
942 btrfs_print_leaf(extent_root, path->nodes[0]);
943 WARN_ON(1);
944 goto out;
945 }
946
947 extent_slot = path->slots[0];
948 num_to_del = 1;
949 found_extent = 0;
950
951 /*
952 * if we aren't the first item on the leaf we can move back one and see
953 * if our ref is right next to our extent item
954 */
955 if (likely(extent_slot)) {
956 extent_slot--;
957 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
958 extent_slot);
959 if (found_key.objectid == op->bytenr &&
960 found_key.type == BTRFS_EXTENT_ITEM_KEY &&
961 found_key.offset == op->num_bytes) {
962 num_to_del++;
963 found_extent = 1;
964 }
965 }
966
967 /*
968 * if we didn't find the extent we need to delete the backref and then
969 * search for the extent item key so we can update its ref count
970 */
971 if (!found_extent) {
972 key.objectid = op->bytenr;
973 key.type = BTRFS_EXTENT_ITEM_KEY;
974 key.offset = op->num_bytes;
975
976 ret = remove_extent_backref(trans, extent_root, path);
977 BUG_ON(ret);
978 btrfs_release_path(extent_root, path);
979 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
980 BUG_ON(ret);
981 extent_slot = path->slots[0];
982 }
983
984 /* this is where we update the ref count for the extent */
985 leaf = path->nodes[0];
986 ei = btrfs_item_ptr(leaf, extent_slot, struct btrfs_extent_item);
987 refs = btrfs_extent_refs(leaf, ei);
988 BUG_ON(refs == 0);
989 refs--;
990 btrfs_set_extent_refs(leaf, ei, refs);
991
992 btrfs_mark_buffer_dirty(leaf);
993
994 /*
995 * This extent needs deleting. The reason cur_slot is extent_slot +
996 * num_to_del is because extent_slot points to the slot where the extent
997 * is, and if the backref was not right next to the extent we will be
998 * deleting at least 1 item, and will want to start searching at the
999 * slot directly next to extent_slot. However if we did find the
1000 * backref next to the extent item them we will be deleting at least 2
1001 * items and will want to start searching directly after the ref slot
1002 */
1003 if (!refs) {
1004 struct list_head *pos, *n, *end;
1005 int cur_slot = extent_slot+num_to_del;
1006 u64 super_used;
1007 u64 root_used;
1008
1009 path->slots[0] = extent_slot;
1010 bytes_freed = op->num_bytes;
1011
1012 mutex_lock(&info->pinned_mutex);
1013 ret = pin_down_bytes(trans, extent_root, op->bytenr,
1014 op->num_bytes, op->level >=
1015 BTRFS_FIRST_FREE_OBJECTID);
1016 mutex_unlock(&info->pinned_mutex);
1017 BUG_ON(ret < 0);
1018 op->del = ret;
1019
1020 /*
1021 * we need to see if we can delete multiple things at once, so
1022 * start looping through the list of extents we are wanting to
1023 * delete and see if their extent/backref's are right next to
1024 * eachother and the extents only have 1 ref
1025 */
1026 for (pos = cur->next; pos != del_list; pos = pos->next) {
1027 struct pending_extent_op *tmp;
1028
1029 tmp = list_entry(pos, struct pending_extent_op, list);
1030
1031 /* we only want to delete extent+ref at this stage */
1032 if (cur_slot >= btrfs_header_nritems(leaf) - 1)
1033 break;
1034
1035 btrfs_item_key_to_cpu(leaf, &found_key, cur_slot);
1036 if (found_key.objectid != tmp->bytenr ||
1037 found_key.type != BTRFS_EXTENT_ITEM_KEY ||
1038 found_key.offset != tmp->num_bytes)
1039 break;
1040
1041 /* check to make sure this extent only has one ref */
1042 ei = btrfs_item_ptr(leaf, cur_slot,
1043 struct btrfs_extent_item);
1044 if (btrfs_extent_refs(leaf, ei) != 1)
1045 break;
1046
1047 btrfs_item_key_to_cpu(leaf, &found_key, cur_slot+1);
1048 if (found_key.objectid != tmp->bytenr ||
1049 found_key.type != BTRFS_EXTENT_REF_KEY ||
1050 found_key.offset != tmp->orig_parent)
1051 break;
1052
1053 /*
1054 * the ref is right next to the extent, we can set the
1055 * ref count to 0 since we will delete them both now
1056 */
1057 btrfs_set_extent_refs(leaf, ei, 0);
1058
1059 /* pin down the bytes for this extent */
1060 mutex_lock(&info->pinned_mutex);
1061 ret = pin_down_bytes(trans, extent_root, tmp->bytenr,
1062 tmp->num_bytes, tmp->level >=
1063 BTRFS_FIRST_FREE_OBJECTID);
1064 mutex_unlock(&info->pinned_mutex);
1065 BUG_ON(ret < 0);
1066
1067 /*
1068 * use the del field to tell if we need to go ahead and
1069 * free up the extent when we delete the item or not.
1070 */
1071 tmp->del = ret;
1072 bytes_freed += tmp->num_bytes;
1073
1074 num_to_del += 2;
1075 cur_slot += 2;
1076 }
1077 end = pos;
1078
1079 /* update the free space counters */
1080 spin_lock(&info->delalloc_lock);
1081 super_used = btrfs_super_bytes_used(&info->super_copy);
1082 btrfs_set_super_bytes_used(&info->super_copy,
1083 super_used - bytes_freed);
1084
1085 root_used = btrfs_root_used(&extent_root->root_item);
1086 btrfs_set_root_used(&extent_root->root_item,
1087 root_used - bytes_freed);
1088 spin_unlock(&info->delalloc_lock);
1089
1090 /* delete the items */
1091 ret = btrfs_del_items(trans, extent_root, path,
1092 path->slots[0], num_to_del);
1093 BUG_ON(ret);
1094
1095 /*
1096 * loop through the extents we deleted and do the cleanup work
1097 * on them
1098 */
1099 for (pos = cur, n = pos->next; pos != end;
1100 pos = n, n = pos->next) {
1101 struct pending_extent_op *tmp;
1102 tmp = list_entry(pos, struct pending_extent_op, list);
1103
1104 /*
1105 * remember tmp->del tells us wether or not we pinned
1106 * down the extent
1107 */
1108 ret = update_block_group(trans, extent_root,
1109 tmp->bytenr, tmp->num_bytes, 0,
1110 tmp->del);
1111 BUG_ON(ret);
1112
1113 list_del_init(&tmp->list);
1114 unlock_extent(&info->extent_ins, tmp->bytenr,
1115 tmp->bytenr + tmp->num_bytes - 1,
1116 GFP_NOFS);
1117 kfree(tmp);
1118 }
1119 } else if (refs && found_extent) {
1120 /*
1121 * the ref and extent were right next to eachother, but the
1122 * extent still has a ref, so just free the backref and keep
1123 * going
1124 */
1125 ret = remove_extent_backref(trans, extent_root, path);
1126 BUG_ON(ret);
1127
1128 list_del_init(&op->list);
1129 unlock_extent(&info->extent_ins, op->bytenr,
1130 op->bytenr + op->num_bytes - 1, GFP_NOFS);
1131 kfree(op);
1132 } else {
1133 /*
1134 * the extent has multiple refs and the backref we were looking
1135 * for was not right next to it, so just unlock and go next,
1136 * we're good to go
1137 */
1138 list_del_init(&op->list);
1139 unlock_extent(&info->extent_ins, op->bytenr,
1140 op->bytenr + op->num_bytes - 1, GFP_NOFS);
1141 kfree(op);
1142 }
1143
1144 btrfs_release_path(extent_root, path);
1145 if (!list_empty(del_list))
1146 goto search;
1147
1148out:
1149 btrfs_free_path(path);
1150 return ret;
1151}
1152
1153static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
1154 struct btrfs_root *root, u64 bytenr,
1155 u64 orig_parent, u64 parent,
1156 u64 orig_root, u64 ref_root,
1157 u64 orig_generation, u64 ref_generation,
1158 u64 owner_objectid)
1159{
1160 int ret;
1161 struct btrfs_root *extent_root = root->fs_info->extent_root;
1162 struct btrfs_path *path;
1163
1164 if (root == root->fs_info->extent_root) {
1165 struct pending_extent_op *extent_op;
1166 u64 num_bytes;
1167
1168 BUG_ON(owner_objectid >= BTRFS_MAX_LEVEL);
1169 num_bytes = btrfs_level_size(root, (int)owner_objectid);
1170 mutex_lock(&root->fs_info->extent_ins_mutex);
1171 if (test_range_bit(&root->fs_info->extent_ins, bytenr,
1172 bytenr + num_bytes - 1, EXTENT_WRITEBACK, 0)) {
1173 u64 priv;
1174 ret = get_state_private(&root->fs_info->extent_ins,
1175 bytenr, &priv);
1176 BUG_ON(ret);
1177 extent_op = (struct pending_extent_op *)
1178 (unsigned long)priv;
1179 BUG_ON(extent_op->parent != orig_parent);
1180 BUG_ON(extent_op->generation != orig_generation);
1181
1182 extent_op->parent = parent;
1183 extent_op->generation = ref_generation;
1184 } else {
1185 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
1186 BUG_ON(!extent_op);
1187
1188 extent_op->type = PENDING_BACKREF_UPDATE;
1189 extent_op->bytenr = bytenr;
1190 extent_op->num_bytes = num_bytes;
1191 extent_op->parent = parent;
1192 extent_op->orig_parent = orig_parent;
1193 extent_op->generation = ref_generation;
1194 extent_op->orig_generation = orig_generation;
1195 extent_op->level = (int)owner_objectid;
1196 INIT_LIST_HEAD(&extent_op->list);
1197 extent_op->del = 0;
1198
1199 set_extent_bits(&root->fs_info->extent_ins,
1200 bytenr, bytenr + num_bytes - 1,
1201 EXTENT_WRITEBACK, GFP_NOFS);
1202 set_state_private(&root->fs_info->extent_ins,
1203 bytenr, (unsigned long)extent_op);
1204 }
1205 mutex_unlock(&root->fs_info->extent_ins_mutex);
1206 return 0;
1207 }
1208
1209 path = btrfs_alloc_path();
1210 if (!path)
1211 return -ENOMEM;
1212 ret = lookup_extent_backref(trans, extent_root, path,
1213 bytenr, orig_parent, orig_root,
1214 orig_generation, owner_objectid, 1);
1215 if (ret)
1216 goto out;
1217 ret = remove_extent_backref(trans, extent_root, path);
1218 if (ret)
1219 goto out;
1220 ret = insert_extent_backref(trans, extent_root, path, bytenr,
1221 parent, ref_root, ref_generation,
1222 owner_objectid);
1223 BUG_ON(ret);
1224 finish_current_insert(trans, extent_root, 0);
1225 del_pending_extents(trans, extent_root, 0);
1226out:
1227 btrfs_free_path(path);
1228 return ret;
1229}
1230
1231int btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
1232 struct btrfs_root *root, u64 bytenr,
1233 u64 orig_parent, u64 parent,
1234 u64 ref_root, u64 ref_generation,
1235 u64 owner_objectid)
1236{
1237 int ret;
1238 if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
1239 owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
1240 return 0;
1241 ret = __btrfs_update_extent_ref(trans, root, bytenr, orig_parent,
1242 parent, ref_root, ref_root,
1243 ref_generation, ref_generation,
1244 owner_objectid);
1245 return ret;
1246}
1247
1248static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1249 struct btrfs_root *root, u64 bytenr,
1250 u64 orig_parent, u64 parent,
1251 u64 orig_root, u64 ref_root,
1252 u64 orig_generation, u64 ref_generation,
1253 u64 owner_objectid)
1254{
1255 struct btrfs_path *path;
1256 int ret;
1257 struct btrfs_key key;
1258 struct extent_buffer *l;
1259 struct btrfs_extent_item *item;
1260 u32 refs;
1261
1262 path = btrfs_alloc_path();
1263 if (!path)
1264 return -ENOMEM;
1265
1266 path->reada = 1;
1267 key.objectid = bytenr;
1268 key.type = BTRFS_EXTENT_ITEM_KEY;
1269 key.offset = (u64)-1;
1270
1271 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
1272 0, 1);
1273 if (ret < 0)
1274 return ret;
1275 BUG_ON(ret == 0 || path->slots[0] == 0);
1276
1277 path->slots[0]--;
1278 l = path->nodes[0];
1279
1280 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1281 if (key.objectid != bytenr) {
1282 btrfs_print_leaf(root->fs_info->extent_root, path->nodes[0]);
1283 printk(KERN_ERR "btrfs wanted %llu found %llu\n",
1284 (unsigned long long)bytenr,
1285 (unsigned long long)key.objectid);
1286 BUG();
1287 }
1288 BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY);
1289
1290 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
1291 refs = btrfs_extent_refs(l, item);
1292 btrfs_set_extent_refs(l, item, refs + 1);
1293 btrfs_mark_buffer_dirty(path->nodes[0]);
1294
1295 btrfs_release_path(root->fs_info->extent_root, path);
1296
1297 path->reada = 1;
1298 ret = insert_extent_backref(trans, root->fs_info->extent_root,
1299 path, bytenr, parent,
1300 ref_root, ref_generation,
1301 owner_objectid);
1302 BUG_ON(ret);
1303 finish_current_insert(trans, root->fs_info->extent_root, 0);
1304 del_pending_extents(trans, root->fs_info->extent_root, 0);
1305
1306 btrfs_free_path(path);
1307 return 0;
1308}
1309
1310int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1311 struct btrfs_root *root,
1312 u64 bytenr, u64 num_bytes, u64 parent,
1313 u64 ref_root, u64 ref_generation,
1314 u64 owner_objectid)
1315{
1316 int ret;
1317 if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
1318 owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
1319 return 0;
1320 ret = __btrfs_inc_extent_ref(trans, root, bytenr, 0, parent,
1321 0, ref_root, 0, ref_generation,
1322 owner_objectid);
1323 return ret;
1324}
1325
1326int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
1327 struct btrfs_root *root)
1328{
1329 finish_current_insert(trans, root->fs_info->extent_root, 1);
1330 del_pending_extents(trans, root->fs_info->extent_root, 1);
1331 return 0;
1332}
1333
1334int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans,
1335 struct btrfs_root *root, u64 bytenr,
1336 u64 num_bytes, u32 *refs)
1337{
1338 struct btrfs_path *path;
1339 int ret;
1340 struct btrfs_key key;
1341 struct extent_buffer *l;
1342 struct btrfs_extent_item *item;
1343
1344 WARN_ON(num_bytes < root->sectorsize);
1345 path = btrfs_alloc_path();
1346 path->reada = 1;
1347 key.objectid = bytenr;
1348 key.offset = num_bytes;
1349 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
1350 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
1351 0, 0);
1352 if (ret < 0)
1353 goto out;
1354 if (ret != 0) {
1355 btrfs_print_leaf(root, path->nodes[0]);
1356 printk(KERN_INFO "btrfs failed to find block number %llu\n",
1357 (unsigned long long)bytenr);
1358 BUG();
1359 }
1360 l = path->nodes[0];
1361 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
1362 *refs = btrfs_extent_refs(l, item);
1363out:
1364 btrfs_free_path(path);
1365 return 0;
1366}
1367
1368int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
1369 struct btrfs_root *root, u64 objectid, u64 bytenr)
1370{
1371 struct btrfs_root *extent_root = root->fs_info->extent_root;
1372 struct btrfs_path *path;
1373 struct extent_buffer *leaf;
1374 struct btrfs_extent_ref *ref_item;
1375 struct btrfs_key key;
1376 struct btrfs_key found_key;
1377 u64 ref_root;
1378 u64 last_snapshot;
1379 u32 nritems;
1380 int ret;
1381
1382 key.objectid = bytenr;
1383 key.offset = (u64)-1;
1384 key.type = BTRFS_EXTENT_ITEM_KEY;
1385
1386 path = btrfs_alloc_path();
1387 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1388 if (ret < 0)
1389 goto out;
1390 BUG_ON(ret == 0);
1391
1392 ret = -ENOENT;
1393 if (path->slots[0] == 0)
1394 goto out;
1395
1396 path->slots[0]--;
1397 leaf = path->nodes[0];
1398 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1399
1400 if (found_key.objectid != bytenr ||
1401 found_key.type != BTRFS_EXTENT_ITEM_KEY)
1402 goto out;
1403
1404 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1405 while (1) {
1406 leaf = path->nodes[0];
1407 nritems = btrfs_header_nritems(leaf);
1408 if (path->slots[0] >= nritems) {
1409 ret = btrfs_next_leaf(extent_root, path);
1410 if (ret < 0)
1411 goto out;
1412 if (ret == 0)
1413 continue;
1414 break;
1415 }
1416 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1417 if (found_key.objectid != bytenr)
1418 break;
1419
1420 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
1421 path->slots[0]++;
1422 continue;
1423 }
1424
1425 ref_item = btrfs_item_ptr(leaf, path->slots[0],
1426 struct btrfs_extent_ref);
1427 ref_root = btrfs_ref_root(leaf, ref_item);
1428 if ((ref_root != root->root_key.objectid &&
1429 ref_root != BTRFS_TREE_LOG_OBJECTID) ||
1430 objectid != btrfs_ref_objectid(leaf, ref_item)) {
1431 ret = 1;
1432 goto out;
1433 }
1434 if (btrfs_ref_generation(leaf, ref_item) <= last_snapshot) {
1435 ret = 1;
1436 goto out;
1437 }
1438
1439 path->slots[0]++;
1440 }
1441 ret = 0;
1442out:
1443 btrfs_free_path(path);
1444 return ret;
1445}
1446
1447int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1448 struct extent_buffer *buf, u32 nr_extents)
1449{
1450 struct btrfs_key key;
1451 struct btrfs_file_extent_item *fi;
1452 u64 root_gen;
1453 u32 nritems;
1454 int i;
1455 int level;
1456 int ret = 0;
1457 int shared = 0;
1458
1459 if (!root->ref_cows)
1460 return 0;
1461
1462 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1463 shared = 0;
1464 root_gen = root->root_key.offset;
1465 } else {
1466 shared = 1;
1467 root_gen = trans->transid - 1;
1468 }
1469
1470 level = btrfs_header_level(buf);
1471 nritems = btrfs_header_nritems(buf);
1472
1473 if (level == 0) {
1474 struct btrfs_leaf_ref *ref;
1475 struct btrfs_extent_info *info;
1476
1477 ref = btrfs_alloc_leaf_ref(root, nr_extents);
1478 if (!ref) {
1479 ret = -ENOMEM;
1480 goto out;
1481 }
1482
1483 ref->root_gen = root_gen;
1484 ref->bytenr = buf->start;
1485 ref->owner = btrfs_header_owner(buf);
1486 ref->generation = btrfs_header_generation(buf);
1487 ref->nritems = nr_extents;
1488 info = ref->extents;
1489
1490 for (i = 0; nr_extents > 0 && i < nritems; i++) {
1491 u64 disk_bytenr;
1492 btrfs_item_key_to_cpu(buf, &key, i);
1493 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1494 continue;
1495 fi = btrfs_item_ptr(buf, i,
1496 struct btrfs_file_extent_item);
1497 if (btrfs_file_extent_type(buf, fi) ==
1498 BTRFS_FILE_EXTENT_INLINE)
1499 continue;
1500 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1501 if (disk_bytenr == 0)
1502 continue;
1503
1504 info->bytenr = disk_bytenr;
1505 info->num_bytes =
1506 btrfs_file_extent_disk_num_bytes(buf, fi);
1507 info->objectid = key.objectid;
1508 info->offset = key.offset;
1509 info++;
1510 }
1511
1512 ret = btrfs_add_leaf_ref(root, ref, shared);
1513 if (ret == -EEXIST && shared) {
1514 struct btrfs_leaf_ref *old;
1515 old = btrfs_lookup_leaf_ref(root, ref->bytenr);
1516 BUG_ON(!old);
1517 btrfs_remove_leaf_ref(root, old);
1518 btrfs_free_leaf_ref(root, old);
1519 ret = btrfs_add_leaf_ref(root, ref, shared);
1520 }
1521 WARN_ON(ret);
1522 btrfs_free_leaf_ref(root, ref);
1523 }
1524out:
1525 return ret;
1526}
1527
1528int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1529 struct extent_buffer *orig_buf, struct extent_buffer *buf,
1530 u32 *nr_extents)
1531{
1532 u64 bytenr;
1533 u64 ref_root;
1534 u64 orig_root;
1535 u64 ref_generation;
1536 u64 orig_generation;
1537 u32 nritems;
1538 u32 nr_file_extents = 0;
1539 struct btrfs_key key;
1540 struct btrfs_file_extent_item *fi;
1541 int i;
1542 int level;
1543 int ret = 0;
1544 int faili = 0;
1545 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
1546 u64, u64, u64, u64, u64, u64, u64, u64);
1547
1548 ref_root = btrfs_header_owner(buf);
1549 ref_generation = btrfs_header_generation(buf);
1550 orig_root = btrfs_header_owner(orig_buf);
1551 orig_generation = btrfs_header_generation(orig_buf);
1552
1553 nritems = btrfs_header_nritems(buf);
1554 level = btrfs_header_level(buf);
1555
1556 if (root->ref_cows) {
1557 process_func = __btrfs_inc_extent_ref;
1558 } else {
1559 if (level == 0 &&
1560 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
1561 goto out;
1562 if (level != 0 &&
1563 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
1564 goto out;
1565 process_func = __btrfs_update_extent_ref;
1566 }
1567
1568 for (i = 0; i < nritems; i++) {
1569 cond_resched();
1570 if (level == 0) {
1571 btrfs_item_key_to_cpu(buf, &key, i);
1572 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1573 continue;
1574 fi = btrfs_item_ptr(buf, i,
1575 struct btrfs_file_extent_item);
1576 if (btrfs_file_extent_type(buf, fi) ==
1577 BTRFS_FILE_EXTENT_INLINE)
1578 continue;
1579 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1580 if (bytenr == 0)
1581 continue;
1582
1583 nr_file_extents++;
1584
1585 ret = process_func(trans, root, bytenr,
1586 orig_buf->start, buf->start,
1587 orig_root, ref_root,
1588 orig_generation, ref_generation,
1589 key.objectid);
1590
1591 if (ret) {
1592 faili = i;
1593 WARN_ON(1);
1594 goto fail;
1595 }
1596 } else {
1597 bytenr = btrfs_node_blockptr(buf, i);
1598 ret = process_func(trans, root, bytenr,
1599 orig_buf->start, buf->start,
1600 orig_root, ref_root,
1601 orig_generation, ref_generation,
1602 level - 1);
1603 if (ret) {
1604 faili = i;
1605 WARN_ON(1);
1606 goto fail;
1607 }
1608 }
1609 }
1610out:
1611 if (nr_extents) {
1612 if (level == 0)
1613 *nr_extents = nr_file_extents;
1614 else
1615 *nr_extents = nritems;
1616 }
1617 return 0;
1618fail:
1619 WARN_ON(1);
1620 return ret;
1621}
1622
1623int btrfs_update_ref(struct btrfs_trans_handle *trans,
1624 struct btrfs_root *root, struct extent_buffer *orig_buf,
1625 struct extent_buffer *buf, int start_slot, int nr)
1626
1627{
1628 u64 bytenr;
1629 u64 ref_root;
1630 u64 orig_root;
1631 u64 ref_generation;
1632 u64 orig_generation;
1633 struct btrfs_key key;
1634 struct btrfs_file_extent_item *fi;
1635 int i;
1636 int ret;
1637 int slot;
1638 int level;
1639
1640 BUG_ON(start_slot < 0);
1641 BUG_ON(start_slot + nr > btrfs_header_nritems(buf));
1642
1643 ref_root = btrfs_header_owner(buf);
1644 ref_generation = btrfs_header_generation(buf);
1645 orig_root = btrfs_header_owner(orig_buf);
1646 orig_generation = btrfs_header_generation(orig_buf);
1647 level = btrfs_header_level(buf);
1648
1649 if (!root->ref_cows) {
1650 if (level == 0 &&
1651 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
1652 return 0;
1653 if (level != 0 &&
1654 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
1655 return 0;
1656 }
1657
1658 for (i = 0, slot = start_slot; i < nr; i++, slot++) {
1659 cond_resched();
1660 if (level == 0) {
1661 btrfs_item_key_to_cpu(buf, &key, slot);
1662 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1663 continue;
1664 fi = btrfs_item_ptr(buf, slot,
1665 struct btrfs_file_extent_item);
1666 if (btrfs_file_extent_type(buf, fi) ==
1667 BTRFS_FILE_EXTENT_INLINE)
1668 continue;
1669 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1670 if (bytenr == 0)
1671 continue;
1672 ret = __btrfs_update_extent_ref(trans, root, bytenr,
1673 orig_buf->start, buf->start,
1674 orig_root, ref_root,
1675 orig_generation, ref_generation,
1676 key.objectid);
1677 if (ret)
1678 goto fail;
1679 } else {
1680 bytenr = btrfs_node_blockptr(buf, slot);
1681 ret = __btrfs_update_extent_ref(trans, root, bytenr,
1682 orig_buf->start, buf->start,
1683 orig_root, ref_root,
1684 orig_generation, ref_generation,
1685 level - 1);
1686 if (ret)
1687 goto fail;
1688 }
1689 }
1690 return 0;
1691fail:
1692 WARN_ON(1);
1693 return -1;
1694}
1695
1696static int write_one_cache_group(struct btrfs_trans_handle *trans,
1697 struct btrfs_root *root,
1698 struct btrfs_path *path,
1699 struct btrfs_block_group_cache *cache)
1700{
1701 int ret;
1702 int pending_ret;
1703 struct btrfs_root *extent_root = root->fs_info->extent_root;
1704 unsigned long bi;
1705 struct extent_buffer *leaf;
1706
1707 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
1708 if (ret < 0)
1709 goto fail;
1710 BUG_ON(ret);
1711
1712 leaf = path->nodes[0];
1713 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
1714 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
1715 btrfs_mark_buffer_dirty(leaf);
1716 btrfs_release_path(extent_root, path);
1717fail:
1718 finish_current_insert(trans, extent_root, 0);
1719 pending_ret = del_pending_extents(trans, extent_root, 0);
1720 if (ret)
1721 return ret;
1722 if (pending_ret)
1723 return pending_ret;
1724 return 0;
1725
1726}
1727
1728int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1729 struct btrfs_root *root)
1730{
1731 struct btrfs_block_group_cache *cache, *entry;
1732 struct rb_node *n;
1733 int err = 0;
1734 int werr = 0;
1735 struct btrfs_path *path;
1736 u64 last = 0;
1737
1738 path = btrfs_alloc_path();
1739 if (!path)
1740 return -ENOMEM;
1741
1742 while (1) {
1743 cache = NULL;
1744 spin_lock(&root->fs_info->block_group_cache_lock);
1745 for (n = rb_first(&root->fs_info->block_group_cache_tree);
1746 n; n = rb_next(n)) {
1747 entry = rb_entry(n, struct btrfs_block_group_cache,
1748 cache_node);
1749 if (entry->dirty) {
1750 cache = entry;
1751 break;
1752 }
1753 }
1754 spin_unlock(&root->fs_info->block_group_cache_lock);
1755
1756 if (!cache)
1757 break;
1758
1759 cache->dirty = 0;
1760 last += cache->key.offset;
1761
1762 err = write_one_cache_group(trans, root,
1763 path, cache);
1764 /*
1765 * if we fail to write the cache group, we want
1766 * to keep it marked dirty in hopes that a later
1767 * write will work
1768 */
1769 if (err) {
1770 werr = err;
1771 continue;
1772 }
1773 }
1774 btrfs_free_path(path);
1775 return werr;
1776}
1777
1778int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
1779{
1780 struct btrfs_block_group_cache *block_group;
1781 int readonly = 0;
1782
1783 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
1784 if (!block_group || block_group->ro)
1785 readonly = 1;
1786 if (block_group)
1787 put_block_group(block_group);
1788 return readonly;
1789}
1790
1791static int update_space_info(struct btrfs_fs_info *info, u64 flags,
1792 u64 total_bytes, u64 bytes_used,
1793 struct btrfs_space_info **space_info)
1794{
1795 struct btrfs_space_info *found;
1796
1797 found = __find_space_info(info, flags);
1798 if (found) {
1799 spin_lock(&found->lock);
1800 found->total_bytes += total_bytes;
1801 found->bytes_used += bytes_used;
1802 found->full = 0;
1803 spin_unlock(&found->lock);
1804 *space_info = found;
1805 return 0;
1806 }
1807 found = kzalloc(sizeof(*found), GFP_NOFS);
1808 if (!found)
1809 return -ENOMEM;
1810
1811 list_add(&found->list, &info->space_info);
1812 INIT_LIST_HEAD(&found->block_groups);
1813 init_rwsem(&found->groups_sem);
1814 spin_lock_init(&found->lock);
1815 found->flags = flags;
1816 found->total_bytes = total_bytes;
1817 found->bytes_used = bytes_used;
1818 found->bytes_pinned = 0;
1819 found->bytes_reserved = 0;
1820 found->bytes_readonly = 0;
1821 found->full = 0;
1822 found->force_alloc = 0;
1823 *space_info = found;
1824 return 0;
1825}
1826
1827static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1828{
1829 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
1830 BTRFS_BLOCK_GROUP_RAID1 |
1831 BTRFS_BLOCK_GROUP_RAID10 |
1832 BTRFS_BLOCK_GROUP_DUP);
1833 if (extra_flags) {
1834 if (flags & BTRFS_BLOCK_GROUP_DATA)
1835 fs_info->avail_data_alloc_bits |= extra_flags;
1836 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1837 fs_info->avail_metadata_alloc_bits |= extra_flags;
1838 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1839 fs_info->avail_system_alloc_bits |= extra_flags;
1840 }
1841}
1842
1843static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
1844{
1845 spin_lock(&cache->space_info->lock);
1846 spin_lock(&cache->lock);
1847 if (!cache->ro) {
1848 cache->space_info->bytes_readonly += cache->key.offset -
1849 btrfs_block_group_used(&cache->item);
1850 cache->ro = 1;
1851 }
1852 spin_unlock(&cache->lock);
1853 spin_unlock(&cache->space_info->lock);
1854}
1855
1856u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
1857{
1858 u64 num_devices = root->fs_info->fs_devices->rw_devices;
1859
1860 if (num_devices == 1)
1861 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
1862 if (num_devices < 4)
1863 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
1864
1865 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
1866 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
1867 BTRFS_BLOCK_GROUP_RAID10))) {
1868 flags &= ~BTRFS_BLOCK_GROUP_DUP;
1869 }
1870
1871 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
1872 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
1873 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
1874 }
1875
1876 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
1877 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
1878 (flags & BTRFS_BLOCK_GROUP_RAID10) |
1879 (flags & BTRFS_BLOCK_GROUP_DUP)))
1880 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
1881 return flags;
1882}
1883
1884static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1885 struct btrfs_root *extent_root, u64 alloc_bytes,
1886 u64 flags, int force)
1887{
1888 struct btrfs_space_info *space_info;
1889 u64 thresh;
1890 int ret = 0;
1891
1892 mutex_lock(&extent_root->fs_info->chunk_mutex);
1893
1894 flags = btrfs_reduce_alloc_profile(extent_root, flags);
1895
1896 space_info = __find_space_info(extent_root->fs_info, flags);
1897 if (!space_info) {
1898 ret = update_space_info(extent_root->fs_info, flags,
1899 0, 0, &space_info);
1900 BUG_ON(ret);
1901 }
1902 BUG_ON(!space_info);
1903
1904 spin_lock(&space_info->lock);
1905 if (space_info->force_alloc) {
1906 force = 1;
1907 space_info->force_alloc = 0;
1908 }
1909 if (space_info->full) {
1910 spin_unlock(&space_info->lock);
1911 goto out;
1912 }
1913
1914 thresh = space_info->total_bytes - space_info->bytes_readonly;
1915 thresh = div_factor(thresh, 6);
1916 if (!force &&
1917 (space_info->bytes_used + space_info->bytes_pinned +
1918 space_info->bytes_reserved + alloc_bytes) < thresh) {
1919 spin_unlock(&space_info->lock);
1920 goto out;
1921 }
1922 spin_unlock(&space_info->lock);
1923
1924 ret = btrfs_alloc_chunk(trans, extent_root, flags);
1925 if (ret)
1926 space_info->full = 1;
1927out:
1928 mutex_unlock(&extent_root->fs_info->chunk_mutex);
1929 return ret;
1930}
1931
1932static int update_block_group(struct btrfs_trans_handle *trans,
1933 struct btrfs_root *root,
1934 u64 bytenr, u64 num_bytes, int alloc,
1935 int mark_free)
1936{
1937 struct btrfs_block_group_cache *cache;
1938 struct btrfs_fs_info *info = root->fs_info;
1939 u64 total = num_bytes;
1940 u64 old_val;
1941 u64 byte_in_group;
1942
1943 while (total) {
1944 cache = btrfs_lookup_block_group(info, bytenr);
1945 if (!cache)
1946 return -1;
1947 byte_in_group = bytenr - cache->key.objectid;
1948 WARN_ON(byte_in_group > cache->key.offset);
1949
1950 spin_lock(&cache->space_info->lock);
1951 spin_lock(&cache->lock);
1952 cache->dirty = 1;
1953 old_val = btrfs_block_group_used(&cache->item);
1954 num_bytes = min(total, cache->key.offset - byte_in_group);
1955 if (alloc) {
1956 old_val += num_bytes;
1957 cache->space_info->bytes_used += num_bytes;
1958 if (cache->ro)
1959 cache->space_info->bytes_readonly -= num_bytes;
1960 btrfs_set_block_group_used(&cache->item, old_val);
1961 spin_unlock(&cache->lock);
1962 spin_unlock(&cache->space_info->lock);
1963 } else {
1964 old_val -= num_bytes;
1965 cache->space_info->bytes_used -= num_bytes;
1966 if (cache->ro)
1967 cache->space_info->bytes_readonly += num_bytes;
1968 btrfs_set_block_group_used(&cache->item, old_val);
1969 spin_unlock(&cache->lock);
1970 spin_unlock(&cache->space_info->lock);
1971 if (mark_free) {
1972 int ret;
1973
1974 ret = btrfs_discard_extent(root, bytenr,
1975 num_bytes);
1976 WARN_ON(ret);
1977
1978 ret = btrfs_add_free_space(cache, bytenr,
1979 num_bytes);
1980 WARN_ON(ret);
1981 }
1982 }
1983 put_block_group(cache);
1984 total -= num_bytes;
1985 bytenr += num_bytes;
1986 }
1987 return 0;
1988}
1989
1990static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
1991{
1992 struct btrfs_block_group_cache *cache;
1993 u64 bytenr;
1994
1995 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
1996 if (!cache)
1997 return 0;
1998
1999 bytenr = cache->key.objectid;
2000 put_block_group(cache);
2001
2002 return bytenr;
2003}
2004
2005int btrfs_update_pinned_extents(struct btrfs_root *root,
2006 u64 bytenr, u64 num, int pin)
2007{
2008 u64 len;
2009 struct btrfs_block_group_cache *cache;
2010 struct btrfs_fs_info *fs_info = root->fs_info;
2011
2012 WARN_ON(!mutex_is_locked(&root->fs_info->pinned_mutex));
2013 if (pin) {
2014 set_extent_dirty(&fs_info->pinned_extents,
2015 bytenr, bytenr + num - 1, GFP_NOFS);
2016 } else {
2017 clear_extent_dirty(&fs_info->pinned_extents,
2018 bytenr, bytenr + num - 1, GFP_NOFS);
2019 }
2020 while (num > 0) {
2021 cache = btrfs_lookup_block_group(fs_info, bytenr);
2022 BUG_ON(!cache);
2023 len = min(num, cache->key.offset -
2024 (bytenr - cache->key.objectid));
2025 if (pin) {
2026 spin_lock(&cache->space_info->lock);
2027 spin_lock(&cache->lock);
2028 cache->pinned += len;
2029 cache->space_info->bytes_pinned += len;
2030 spin_unlock(&cache->lock);
2031 spin_unlock(&cache->space_info->lock);
2032 fs_info->total_pinned += len;
2033 } else {
2034 spin_lock(&cache->space_info->lock);
2035 spin_lock(&cache->lock);
2036 cache->pinned -= len;
2037 cache->space_info->bytes_pinned -= len;
2038 spin_unlock(&cache->lock);
2039 spin_unlock(&cache->space_info->lock);
2040 fs_info->total_pinned -= len;
2041 if (cache->cached)
2042 btrfs_add_free_space(cache, bytenr, len);
2043 }
2044 put_block_group(cache);
2045 bytenr += len;
2046 num -= len;
2047 }
2048 return 0;
2049}
2050
2051static int update_reserved_extents(struct btrfs_root *root,
2052 u64 bytenr, u64 num, int reserve)
2053{
2054 u64 len;
2055 struct btrfs_block_group_cache *cache;
2056 struct btrfs_fs_info *fs_info = root->fs_info;
2057
2058 while (num > 0) {
2059 cache = btrfs_lookup_block_group(fs_info, bytenr);
2060 BUG_ON(!cache);
2061 len = min(num, cache->key.offset -
2062 (bytenr - cache->key.objectid));
2063
2064 spin_lock(&cache->space_info->lock);
2065 spin_lock(&cache->lock);
2066 if (reserve) {
2067 cache->reserved += len;
2068 cache->space_info->bytes_reserved += len;
2069 } else {
2070 cache->reserved -= len;
2071 cache->space_info->bytes_reserved -= len;
2072 }
2073 spin_unlock(&cache->lock);
2074 spin_unlock(&cache->space_info->lock);
2075 put_block_group(cache);
2076 bytenr += len;
2077 num -= len;
2078 }
2079 return 0;
2080}
2081
2082int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
2083{
2084 u64 last = 0;
2085 u64 start;
2086 u64 end;
2087 struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
2088 int ret;
2089
2090 mutex_lock(&root->fs_info->pinned_mutex);
2091 while (1) {
2092 ret = find_first_extent_bit(pinned_extents, last,
2093 &start, &end, EXTENT_DIRTY);
2094 if (ret)
2095 break;
2096 set_extent_dirty(copy, start, end, GFP_NOFS);
2097 last = end + 1;
2098 }
2099 mutex_unlock(&root->fs_info->pinned_mutex);
2100 return 0;
2101}
2102
2103int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
2104 struct btrfs_root *root,
2105 struct extent_io_tree *unpin)
2106{
2107 u64 start;
2108 u64 end;
2109 int ret;
2110
2111 mutex_lock(&root->fs_info->pinned_mutex);
2112 while (1) {
2113 ret = find_first_extent_bit(unpin, 0, &start, &end,
2114 EXTENT_DIRTY);
2115 if (ret)
2116 break;
2117
2118 ret = btrfs_discard_extent(root, start, end + 1 - start);
2119
2120 btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
2121 clear_extent_dirty(unpin, start, end, GFP_NOFS);
2122
2123 if (need_resched()) {
2124 mutex_unlock(&root->fs_info->pinned_mutex);
2125 cond_resched();
2126 mutex_lock(&root->fs_info->pinned_mutex);
2127 }
2128 }
2129 mutex_unlock(&root->fs_info->pinned_mutex);
2130 return ret;
2131}
2132
2133static int finish_current_insert(struct btrfs_trans_handle *trans,
2134 struct btrfs_root *extent_root, int all)
2135{
2136 u64 start;
2137 u64 end;
2138 u64 priv;
2139 u64 search = 0;
2140 u64 skipped = 0;
2141 struct btrfs_fs_info *info = extent_root->fs_info;
2142 struct btrfs_path *path;
2143 struct pending_extent_op *extent_op, *tmp;
2144 struct list_head insert_list, update_list;
2145 int ret;
2146 int num_inserts = 0, max_inserts;
2147
2148 path = btrfs_alloc_path();
2149 INIT_LIST_HEAD(&insert_list);
2150 INIT_LIST_HEAD(&update_list);
2151
2152 max_inserts = extent_root->leafsize /
2153 (2 * sizeof(struct btrfs_key) + 2 * sizeof(struct btrfs_item) +
2154 sizeof(struct btrfs_extent_ref) +
2155 sizeof(struct btrfs_extent_item));
2156again:
2157 mutex_lock(&info->extent_ins_mutex);
2158 while (1) {
2159 ret = find_first_extent_bit(&info->extent_ins, search, &start,
2160 &end, EXTENT_WRITEBACK);
2161 if (ret) {
2162 if (skipped && all && !num_inserts) {
2163 skipped = 0;
2164 search = 0;
2165 continue;
2166 }
2167 mutex_unlock(&info->extent_ins_mutex);
2168 break;
2169 }
2170
2171 ret = try_lock_extent(&info->extent_ins, start, end, GFP_NOFS);
2172 if (!ret) {
2173 skipped = 1;
2174 search = end + 1;
2175 if (need_resched()) {
2176 mutex_unlock(&info->extent_ins_mutex);
2177 cond_resched();
2178 mutex_lock(&info->extent_ins_mutex);
2179 }
2180 continue;
2181 }
2182
2183 ret = get_state_private(&info->extent_ins, start, &priv);
2184 BUG_ON(ret);
2185 extent_op = (struct pending_extent_op *)(unsigned long) priv;
2186
2187 if (extent_op->type == PENDING_EXTENT_INSERT) {
2188 num_inserts++;
2189 list_add_tail(&extent_op->list, &insert_list);
2190 search = end + 1;
2191 if (num_inserts == max_inserts) {
2192 mutex_unlock(&info->extent_ins_mutex);
2193 break;
2194 }
2195 } else if (extent_op->type == PENDING_BACKREF_UPDATE) {
2196 list_add_tail(&extent_op->list, &update_list);
2197 search = end + 1;
2198 } else {
2199 BUG();
2200 }
2201 }
2202
2203 /*
2204 * process the update list, clear the writeback bit for it, and if
2205 * somebody marked this thing for deletion then just unlock it and be
2206 * done, the free_extents will handle it
2207 */
2208 mutex_lock(&info->extent_ins_mutex);
2209 list_for_each_entry_safe(extent_op, tmp, &update_list, list) {
2210 clear_extent_bits(&info->extent_ins, extent_op->bytenr,
2211 extent_op->bytenr + extent_op->num_bytes - 1,
2212 EXTENT_WRITEBACK, GFP_NOFS);
2213 if (extent_op->del) {
2214 list_del_init(&extent_op->list);
2215 unlock_extent(&info->extent_ins, extent_op->bytenr,
2216 extent_op->bytenr + extent_op->num_bytes
2217 - 1, GFP_NOFS);
2218 kfree(extent_op);
2219 }
2220 }
2221 mutex_unlock(&info->extent_ins_mutex);
2222
2223 /*
2224 * still have things left on the update list, go ahead an update
2225 * everything
2226 */
2227 if (!list_empty(&update_list)) {
2228 ret = update_backrefs(trans, extent_root, path, &update_list);
2229 BUG_ON(ret);
2230 }
2231
2232 /*
2233 * if no inserts need to be done, but we skipped some extents and we
2234 * need to make sure everything is cleaned then reset everything and
2235 * go back to the beginning
2236 */
2237 if (!num_inserts && all && skipped) {
2238 search = 0;
2239 skipped = 0;
2240 INIT_LIST_HEAD(&update_list);
2241 INIT_LIST_HEAD(&insert_list);
2242 goto again;
2243 } else if (!num_inserts) {
2244 goto out;
2245 }
2246
2247 /*
2248 * process the insert extents list. Again if we are deleting this
2249 * extent, then just unlock it, pin down the bytes if need be, and be
2250 * done with it. Saves us from having to actually insert the extent
2251 * into the tree and then subsequently come along and delete it
2252 */
2253 mutex_lock(&info->extent_ins_mutex);
2254 list_for_each_entry_safe(extent_op, tmp, &insert_list, list) {
2255 clear_extent_bits(&info->extent_ins, extent_op->bytenr,
2256 extent_op->bytenr + extent_op->num_bytes - 1,
2257 EXTENT_WRITEBACK, GFP_NOFS);
2258 if (extent_op->del) {
2259 u64 used;
2260 list_del_init(&extent_op->list);
2261 unlock_extent(&info->extent_ins, extent_op->bytenr,
2262 extent_op->bytenr + extent_op->num_bytes
2263 - 1, GFP_NOFS);
2264
2265 mutex_lock(&extent_root->fs_info->pinned_mutex);
2266 ret = pin_down_bytes(trans, extent_root,
2267 extent_op->bytenr,
2268 extent_op->num_bytes, 0);
2269 mutex_unlock(&extent_root->fs_info->pinned_mutex);
2270
2271 spin_lock(&info->delalloc_lock);
2272 used = btrfs_super_bytes_used(&info->super_copy);
2273 btrfs_set_super_bytes_used(&info->super_copy,
2274 used - extent_op->num_bytes);
2275 used = btrfs_root_used(&extent_root->root_item);
2276 btrfs_set_root_used(&extent_root->root_item,
2277 used - extent_op->num_bytes);
2278 spin_unlock(&info->delalloc_lock);
2279
2280 ret = update_block_group(trans, extent_root,
2281 extent_op->bytenr,
2282 extent_op->num_bytes,
2283 0, ret > 0);
2284 BUG_ON(ret);
2285 kfree(extent_op);
2286 num_inserts--;
2287 }
2288 }
2289 mutex_unlock(&info->extent_ins_mutex);
2290
2291 ret = insert_extents(trans, extent_root, path, &insert_list,
2292 num_inserts);
2293 BUG_ON(ret);
2294
2295 /*
2296 * if we broke out of the loop in order to insert stuff because we hit
2297 * the maximum number of inserts at a time we can handle, then loop
2298 * back and pick up where we left off
2299 */
2300 if (num_inserts == max_inserts) {
2301 INIT_LIST_HEAD(&insert_list);
2302 INIT_LIST_HEAD(&update_list);
2303 num_inserts = 0;
2304 goto again;
2305 }
2306
2307 /*
2308 * again, if we need to make absolutely sure there are no more pending
2309 * extent operations left and we know that we skipped some, go back to
2310 * the beginning and do it all again
2311 */
2312 if (all && skipped) {
2313 INIT_LIST_HEAD(&insert_list);
2314 INIT_LIST_HEAD(&update_list);
2315 search = 0;
2316 skipped = 0;
2317 num_inserts = 0;
2318 goto again;
2319 }
2320out:
2321 btrfs_free_path(path);
2322 return 0;
2323}
2324
2325static int pin_down_bytes(struct btrfs_trans_handle *trans,
2326 struct btrfs_root *root,
2327 u64 bytenr, u64 num_bytes, int is_data)
2328{
2329 int err = 0;
2330 struct extent_buffer *buf;
2331
2332 if (is_data)
2333 goto pinit;
2334
2335 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
2336 if (!buf)
2337 goto pinit;
2338
2339 /* we can reuse a block if it hasn't been written
2340 * and it is from this transaction. We can't
2341 * reuse anything from the tree log root because
2342 * it has tiny sub-transactions.
2343 */
2344 if (btrfs_buffer_uptodate(buf, 0) &&
2345 btrfs_try_tree_lock(buf)) {
2346 u64 header_owner = btrfs_header_owner(buf);
2347 u64 header_transid = btrfs_header_generation(buf);
2348 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
2349 header_owner != BTRFS_TREE_RELOC_OBJECTID &&
2350 header_transid == trans->transid &&
2351 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
2352 clean_tree_block(NULL, root, buf);
2353 btrfs_tree_unlock(buf);
2354 free_extent_buffer(buf);
2355 return 1;
2356 }
2357 btrfs_tree_unlock(buf);
2358 }
2359 free_extent_buffer(buf);
2360pinit:
2361 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
2362
2363 BUG_ON(err < 0);
2364 return 0;
2365}
2366
2367/*
2368 * remove an extent from the root, returns 0 on success
2369 */
2370static int __free_extent(struct btrfs_trans_handle *trans,
2371 struct btrfs_root *root,
2372 u64 bytenr, u64 num_bytes, u64 parent,
2373 u64 root_objectid, u64 ref_generation,
2374 u64 owner_objectid, int pin, int mark_free)
2375{
2376 struct btrfs_path *path;
2377 struct btrfs_key key;
2378 struct btrfs_fs_info *info = root->fs_info;
2379 struct btrfs_root *extent_root = info->extent_root;
2380 struct extent_buffer *leaf;
2381 int ret;
2382 int extent_slot = 0;
2383 int found_extent = 0;
2384 int num_to_del = 1;
2385 struct btrfs_extent_item *ei;
2386 u32 refs;
2387
2388 key.objectid = bytenr;
2389 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
2390 key.offset = num_bytes;
2391 path = btrfs_alloc_path();
2392 if (!path)
2393 return -ENOMEM;
2394
2395 path->reada = 1;
2396 ret = lookup_extent_backref(trans, extent_root, path,
2397 bytenr, parent, root_objectid,
2398 ref_generation, owner_objectid, 1);
2399 if (ret == 0) {
2400 struct btrfs_key found_key;
2401 extent_slot = path->slots[0];
2402 while (extent_slot > 0) {
2403 extent_slot--;
2404 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2405 extent_slot);
2406 if (found_key.objectid != bytenr)
2407 break;
2408 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
2409 found_key.offset == num_bytes) {
2410 found_extent = 1;
2411 break;
2412 }
2413 if (path->slots[0] - extent_slot > 5)
2414 break;
2415 }
2416 if (!found_extent) {
2417 ret = remove_extent_backref(trans, extent_root, path);
2418 BUG_ON(ret);
2419 btrfs_release_path(extent_root, path);
2420 ret = btrfs_search_slot(trans, extent_root,
2421 &key, path, -1, 1);
2422 if (ret) {
2423 printk(KERN_ERR "umm, got %d back from search"
2424 ", was looking for %llu\n", ret,
2425 (unsigned long long)bytenr);
2426 btrfs_print_leaf(extent_root, path->nodes[0]);
2427 }
2428 BUG_ON(ret);
2429 extent_slot = path->slots[0];
2430 }
2431 } else {
2432 btrfs_print_leaf(extent_root, path->nodes[0]);
2433 WARN_ON(1);
2434 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
2435 "root %llu gen %llu owner %llu\n",
2436 (unsigned long long)bytenr,
2437 (unsigned long long)root_objectid,
2438 (unsigned long long)ref_generation,
2439 (unsigned long long)owner_objectid);
2440 }
2441
2442 leaf = path->nodes[0];
2443 ei = btrfs_item_ptr(leaf, extent_slot,
2444 struct btrfs_extent_item);
2445 refs = btrfs_extent_refs(leaf, ei);
2446 BUG_ON(refs == 0);
2447 refs -= 1;
2448 btrfs_set_extent_refs(leaf, ei, refs);
2449
2450 btrfs_mark_buffer_dirty(leaf);
2451
2452 if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
2453 struct btrfs_extent_ref *ref;
2454 ref = btrfs_item_ptr(leaf, path->slots[0],
2455 struct btrfs_extent_ref);
2456 BUG_ON(btrfs_ref_num_refs(leaf, ref) != 1);
2457 /* if the back ref and the extent are next to each other
2458 * they get deleted below in one shot
2459 */
2460 path->slots[0] = extent_slot;
2461 num_to_del = 2;
2462 } else if (found_extent) {
2463 /* otherwise delete the extent back ref */
2464 ret = remove_extent_backref(trans, extent_root, path);
2465 BUG_ON(ret);
2466 /* if refs are 0, we need to setup the path for deletion */
2467 if (refs == 0) {
2468 btrfs_release_path(extent_root, path);
2469 ret = btrfs_search_slot(trans, extent_root, &key, path,
2470 -1, 1);
2471 BUG_ON(ret);
2472 }
2473 }
2474
2475 if (refs == 0) {
2476 u64 super_used;
2477 u64 root_used;
2478
2479 if (pin) {
2480 mutex_lock(&root->fs_info->pinned_mutex);
2481 ret = pin_down_bytes(trans, root, bytenr, num_bytes,
2482 owner_objectid >= BTRFS_FIRST_FREE_OBJECTID);
2483 mutex_unlock(&root->fs_info->pinned_mutex);
2484 if (ret > 0)
2485 mark_free = 1;
2486 BUG_ON(ret < 0);
2487 }
2488 /* block accounting for super block */
2489 spin_lock(&info->delalloc_lock);
2490 super_used = btrfs_super_bytes_used(&info->super_copy);
2491 btrfs_set_super_bytes_used(&info->super_copy,
2492 super_used - num_bytes);
2493
2494 /* block accounting for root item */
2495 root_used = btrfs_root_used(&root->root_item);
2496 btrfs_set_root_used(&root->root_item,
2497 root_used - num_bytes);
2498 spin_unlock(&info->delalloc_lock);
2499 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
2500 num_to_del);
2501 BUG_ON(ret);
2502 btrfs_release_path(extent_root, path);
2503
2504 if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
2505 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
2506 BUG_ON(ret);
2507 }
2508
2509 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
2510 mark_free);
2511 BUG_ON(ret);
2512 }
2513 btrfs_free_path(path);
2514 finish_current_insert(trans, extent_root, 0);
2515 return ret;
2516}
2517
2518/*
2519 * find all the blocks marked as pending in the radix tree and remove
2520 * them from the extent map
2521 */
2522static int del_pending_extents(struct btrfs_trans_handle *trans,
2523 struct btrfs_root *extent_root, int all)
2524{
2525 int ret;
2526 int err = 0;
2527 u64 start;
2528 u64 end;
2529 u64 priv;
2530 u64 search = 0;
2531 int nr = 0, skipped = 0;
2532 struct extent_io_tree *pending_del;
2533 struct extent_io_tree *extent_ins;
2534 struct pending_extent_op *extent_op;
2535 struct btrfs_fs_info *info = extent_root->fs_info;
2536 struct list_head delete_list;
2537
2538 INIT_LIST_HEAD(&delete_list);
2539 extent_ins = &extent_root->fs_info->extent_ins;
2540 pending_del = &extent_root->fs_info->pending_del;
2541
2542again:
2543 mutex_lock(&info->extent_ins_mutex);
2544 while (1) {
2545 ret = find_first_extent_bit(pending_del, search, &start, &end,
2546 EXTENT_WRITEBACK);
2547 if (ret) {
2548 if (all && skipped && !nr) {
2549 search = 0;
2550 continue;
2551 }
2552 mutex_unlock(&info->extent_ins_mutex);
2553 break;
2554 }
2555
2556 ret = try_lock_extent(extent_ins, start, end, GFP_NOFS);
2557 if (!ret) {
2558 search = end+1;
2559 skipped = 1;
2560
2561 if (need_resched()) {
2562 mutex_unlock(&info->extent_ins_mutex);
2563 cond_resched();
2564 mutex_lock(&info->extent_ins_mutex);
2565 }
2566
2567 continue;
2568 }
2569 BUG_ON(ret < 0);
2570
2571 ret = get_state_private(pending_del, start, &priv);
2572 BUG_ON(ret);
2573 extent_op = (struct pending_extent_op *)(unsigned long)priv;
2574
2575 clear_extent_bits(pending_del, start, end, EXTENT_WRITEBACK,
2576 GFP_NOFS);
2577 if (!test_range_bit(extent_ins, start, end,
2578 EXTENT_WRITEBACK, 0)) {
2579 list_add_tail(&extent_op->list, &delete_list);
2580 nr++;
2581 } else {
2582 kfree(extent_op);
2583
2584 ret = get_state_private(&info->extent_ins, start,
2585 &priv);
2586 BUG_ON(ret);
2587 extent_op = (struct pending_extent_op *)
2588 (unsigned long)priv;
2589
2590 clear_extent_bits(&info->extent_ins, start, end,
2591 EXTENT_WRITEBACK, GFP_NOFS);
2592
2593 if (extent_op->type == PENDING_BACKREF_UPDATE) {
2594 list_add_tail(&extent_op->list, &delete_list);
2595 search = end + 1;
2596 nr++;
2597 continue;
2598 }
2599
2600 mutex_lock(&extent_root->fs_info->pinned_mutex);
2601 ret = pin_down_bytes(trans, extent_root, start,
2602 end + 1 - start, 0);
2603 mutex_unlock(&extent_root->fs_info->pinned_mutex);
2604
2605 ret = update_block_group(trans, extent_root, start,
2606 end + 1 - start, 0, ret > 0);
2607
2608 unlock_extent(extent_ins, start, end, GFP_NOFS);
2609 BUG_ON(ret);
2610 kfree(extent_op);
2611 }
2612 if (ret)
2613 err = ret;
2614
2615 search = end + 1;
2616
2617 if (need_resched()) {
2618 mutex_unlock(&info->extent_ins_mutex);
2619 cond_resched();
2620 mutex_lock(&info->extent_ins_mutex);
2621 }
2622 }
2623
2624 if (nr) {
2625 ret = free_extents(trans, extent_root, &delete_list);
2626 BUG_ON(ret);
2627 }
2628
2629 if (all && skipped) {
2630 INIT_LIST_HEAD(&delete_list);
2631 search = 0;
2632 nr = 0;
2633 goto again;
2634 }
2635
2636 return err;
2637}
2638
2639/*
2640 * remove an extent from the root, returns 0 on success
2641 */
2642static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
2643 struct btrfs_root *root,
2644 u64 bytenr, u64 num_bytes, u64 parent,
2645 u64 root_objectid, u64 ref_generation,
2646 u64 owner_objectid, int pin)
2647{
2648 struct btrfs_root *extent_root = root->fs_info->extent_root;
2649 int pending_ret;
2650 int ret;
2651
2652 WARN_ON(num_bytes < root->sectorsize);
2653 if (root == extent_root) {
2654 struct pending_extent_op *extent_op = NULL;
2655
2656 mutex_lock(&root->fs_info->extent_ins_mutex);
2657 if (test_range_bit(&root->fs_info->extent_ins, bytenr,
2658 bytenr + num_bytes - 1, EXTENT_WRITEBACK, 0)) {
2659 u64 priv;
2660 ret = get_state_private(&root->fs_info->extent_ins,
2661 bytenr, &priv);
2662 BUG_ON(ret);
2663 extent_op = (struct pending_extent_op *)
2664 (unsigned long)priv;
2665
2666 extent_op->del = 1;
2667 if (extent_op->type == PENDING_EXTENT_INSERT) {
2668 mutex_unlock(&root->fs_info->extent_ins_mutex);
2669 return 0;
2670 }
2671 }
2672
2673 if (extent_op) {
2674 ref_generation = extent_op->orig_generation;
2675 parent = extent_op->orig_parent;
2676 }
2677
2678 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2679 BUG_ON(!extent_op);
2680
2681 extent_op->type = PENDING_EXTENT_DELETE;
2682 extent_op->bytenr = bytenr;
2683 extent_op->num_bytes = num_bytes;
2684 extent_op->parent = parent;
2685 extent_op->orig_parent = parent;
2686 extent_op->generation = ref_generation;
2687 extent_op->orig_generation = ref_generation;
2688 extent_op->level = (int)owner_objectid;
2689 INIT_LIST_HEAD(&extent_op->list);
2690 extent_op->del = 0;
2691
2692 set_extent_bits(&root->fs_info->pending_del,
2693 bytenr, bytenr + num_bytes - 1,
2694 EXTENT_WRITEBACK, GFP_NOFS);
2695 set_state_private(&root->fs_info->pending_del,
2696 bytenr, (unsigned long)extent_op);
2697 mutex_unlock(&root->fs_info->extent_ins_mutex);
2698 return 0;
2699 }
2700 /* if metadata always pin */
2701 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
2702 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
2703 struct btrfs_block_group_cache *cache;
2704
2705 /* btrfs_free_reserved_extent */
2706 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
2707 BUG_ON(!cache);
2708 btrfs_add_free_space(cache, bytenr, num_bytes);
2709 put_block_group(cache);
2710 update_reserved_extents(root, bytenr, num_bytes, 0);
2711 return 0;
2712 }
2713 pin = 1;
2714 }
2715
2716 /* if data pin when any transaction has committed this */
2717 if (ref_generation != trans->transid)
2718 pin = 1;
2719
2720 ret = __free_extent(trans, root, bytenr, num_bytes, parent,
2721 root_objectid, ref_generation,
2722 owner_objectid, pin, pin == 0);
2723
2724 finish_current_insert(trans, root->fs_info->extent_root, 0);
2725 pending_ret = del_pending_extents(trans, root->fs_info->extent_root, 0);
2726 return ret ? ret : pending_ret;
2727}
2728
2729int btrfs_free_extent(struct btrfs_trans_handle *trans,
2730 struct btrfs_root *root,
2731 u64 bytenr, u64 num_bytes, u64 parent,
2732 u64 root_objectid, u64 ref_generation,
2733 u64 owner_objectid, int pin)
2734{
2735 int ret;
2736
2737 ret = __btrfs_free_extent(trans, root, bytenr, num_bytes, parent,
2738 root_objectid, ref_generation,
2739 owner_objectid, pin);
2740 return ret;
2741}
2742
2743static u64 stripe_align(struct btrfs_root *root, u64 val)
2744{
2745 u64 mask = ((u64)root->stripesize - 1);
2746 u64 ret = (val + mask) & ~mask;
2747 return ret;
2748}
2749
2750/*
2751 * walks the btree of allocated extents and find a hole of a given size.
2752 * The key ins is changed to record the hole:
2753 * ins->objectid == block start
2754 * ins->flags = BTRFS_EXTENT_ITEM_KEY
2755 * ins->offset == number of blocks
2756 * Any available blocks before search_start are skipped.
2757 */
2758static noinline int find_free_extent(struct btrfs_trans_handle *trans,
2759 struct btrfs_root *orig_root,
2760 u64 num_bytes, u64 empty_size,
2761 u64 search_start, u64 search_end,
2762 u64 hint_byte, struct btrfs_key *ins,
2763 u64 exclude_start, u64 exclude_nr,
2764 int data)
2765{
2766 int ret = 0;
2767 struct btrfs_root *root = orig_root->fs_info->extent_root;
2768 u64 total_needed = num_bytes;
2769 u64 *last_ptr = NULL;
2770 u64 last_wanted = 0;
2771 struct btrfs_block_group_cache *block_group = NULL;
2772 int chunk_alloc_done = 0;
2773 int empty_cluster = 2 * 1024 * 1024;
2774 int allowed_chunk_alloc = 0;
2775 struct list_head *head = NULL, *cur = NULL;
2776 int loop = 0;
2777 int extra_loop = 0;
2778 struct btrfs_space_info *space_info;
2779
2780 WARN_ON(num_bytes < root->sectorsize);
2781 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
2782 ins->objectid = 0;
2783 ins->offset = 0;
2784
2785 if (orig_root->ref_cows || empty_size)
2786 allowed_chunk_alloc = 1;
2787
2788 if (data & BTRFS_BLOCK_GROUP_METADATA) {
2789 last_ptr = &root->fs_info->last_alloc;
2790 empty_cluster = 64 * 1024;
2791 }
2792
2793 if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD))
2794 last_ptr = &root->fs_info->last_data_alloc;
2795
2796 if (last_ptr) {
2797 if (*last_ptr) {
2798 hint_byte = *last_ptr;
2799 last_wanted = *last_ptr;
2800 } else
2801 empty_size += empty_cluster;
2802 } else {
2803 empty_cluster = 0;
2804 }
2805 search_start = max(search_start, first_logical_byte(root, 0));
2806 search_start = max(search_start, hint_byte);
2807
2808 if (last_wanted && search_start != last_wanted) {
2809 last_wanted = 0;
2810 empty_size += empty_cluster;
2811 }
2812
2813 total_needed += empty_size;
2814 block_group = btrfs_lookup_block_group(root->fs_info, search_start);
2815 if (!block_group)
2816 block_group = btrfs_lookup_first_block_group(root->fs_info,
2817 search_start);
2818 space_info = __find_space_info(root->fs_info, data);
2819
2820 down_read(&space_info->groups_sem);
2821 while (1) {
2822 struct btrfs_free_space *free_space;
2823 /*
2824 * the only way this happens if our hint points to a block
2825 * group thats not of the proper type, while looping this
2826 * should never happen
2827 */
2828 if (empty_size)
2829 extra_loop = 1;
2830
2831 if (!block_group)
2832 goto new_group_no_lock;
2833
2834 if (unlikely(!block_group->cached)) {
2835 mutex_lock(&block_group->cache_mutex);
2836 ret = cache_block_group(root, block_group);
2837 mutex_unlock(&block_group->cache_mutex);
2838 if (ret)
2839 break;
2840 }
2841
2842 mutex_lock(&block_group->alloc_mutex);
2843 if (unlikely(!block_group_bits(block_group, data)))
2844 goto new_group;
2845
2846 if (unlikely(block_group->ro))
2847 goto new_group;
2848
2849 free_space = btrfs_find_free_space(block_group, search_start,
2850 total_needed);
2851 if (free_space) {
2852 u64 start = block_group->key.objectid;
2853 u64 end = block_group->key.objectid +
2854 block_group->key.offset;
2855
2856 search_start = stripe_align(root, free_space->offset);
2857
2858 /* move on to the next group */
2859 if (search_start + num_bytes >= search_end)
2860 goto new_group;
2861
2862 /* move on to the next group */
2863 if (search_start + num_bytes > end)
2864 goto new_group;
2865
2866 if (last_wanted && search_start != last_wanted) {
2867 total_needed += empty_cluster;
2868 empty_size += empty_cluster;
2869 last_wanted = 0;
2870 /*
2871 * if search_start is still in this block group
2872 * then we just re-search this block group
2873 */
2874 if (search_start >= start &&
2875 search_start < end) {
2876 mutex_unlock(&block_group->alloc_mutex);
2877 continue;
2878 }
2879
2880 /* else we go to the next block group */
2881 goto new_group;
2882 }
2883
2884 if (exclude_nr > 0 &&
2885 (search_start + num_bytes > exclude_start &&
2886 search_start < exclude_start + exclude_nr)) {
2887 search_start = exclude_start + exclude_nr;
2888 /*
2889 * if search_start is still in this block group
2890 * then we just re-search this block group
2891 */
2892 if (search_start >= start &&
2893 search_start < end) {
2894 mutex_unlock(&block_group->alloc_mutex);
2895 last_wanted = 0;
2896 continue;
2897 }
2898
2899 /* else we go to the next block group */
2900 goto new_group;
2901 }
2902
2903 ins->objectid = search_start;
2904 ins->offset = num_bytes;
2905
2906 btrfs_remove_free_space_lock(block_group, search_start,
2907 num_bytes);
2908 /* we are all good, lets return */
2909 mutex_unlock(&block_group->alloc_mutex);
2910 break;
2911 }
2912new_group:
2913 mutex_unlock(&block_group->alloc_mutex);
2914 put_block_group(block_group);
2915 block_group = NULL;
2916new_group_no_lock:
2917 /* don't try to compare new allocations against the
2918 * last allocation any more
2919 */
2920 last_wanted = 0;
2921
2922 /*
2923 * Here's how this works.
2924 * loop == 0: we were searching a block group via a hint
2925 * and didn't find anything, so we start at
2926 * the head of the block groups and keep searching
2927 * loop == 1: we're searching through all of the block groups
2928 * if we hit the head again we have searched
2929 * all of the block groups for this space and we
2930 * need to try and allocate, if we cant error out.
2931 * loop == 2: we allocated more space and are looping through
2932 * all of the block groups again.
2933 */
2934 if (loop == 0) {
2935 head = &space_info->block_groups;
2936 cur = head->next;
2937 loop++;
2938 } else if (loop == 1 && cur == head) {
2939 int keep_going;
2940
2941 /* at this point we give up on the empty_size
2942 * allocations and just try to allocate the min
2943 * space.
2944 *
2945 * The extra_loop field was set if an empty_size
2946 * allocation was attempted above, and if this
2947 * is try we need to try the loop again without
2948 * the additional empty_size.
2949 */
2950 total_needed -= empty_size;
2951 empty_size = 0;
2952 keep_going = extra_loop;
2953 loop++;
2954
2955 if (allowed_chunk_alloc && !chunk_alloc_done) {
2956 up_read(&space_info->groups_sem);
2957 ret = do_chunk_alloc(trans, root, num_bytes +
2958 2 * 1024 * 1024, data, 1);
2959 down_read(&space_info->groups_sem);
2960 if (ret < 0)
2961 goto loop_check;
2962 head = &space_info->block_groups;
2963 /*
2964 * we've allocated a new chunk, keep
2965 * trying
2966 */
2967 keep_going = 1;
2968 chunk_alloc_done = 1;
2969 } else if (!allowed_chunk_alloc) {
2970 space_info->force_alloc = 1;
2971 }
2972loop_check:
2973 if (keep_going) {
2974 cur = head->next;
2975 extra_loop = 0;
2976 } else {
2977 break;
2978 }
2979 } else if (cur == head) {
2980 break;
2981 }
2982
2983 block_group = list_entry(cur, struct btrfs_block_group_cache,
2984 list);
2985 atomic_inc(&block_group->count);
2986
2987 search_start = block_group->key.objectid;
2988 cur = cur->next;
2989 }
2990
2991 /* we found what we needed */
2992 if (ins->objectid) {
2993 if (!(data & BTRFS_BLOCK_GROUP_DATA))
2994 trans->block_group = block_group->key.objectid;
2995
2996 if (last_ptr)
2997 *last_ptr = ins->objectid + ins->offset;
2998 ret = 0;
2999 } else if (!ret) {
3000 printk(KERN_ERR "btrfs searching for %llu bytes, "
3001 "num_bytes %llu, loop %d, allowed_alloc %d\n",
3002 (unsigned long long)total_needed,
3003 (unsigned long long)num_bytes,
3004 loop, allowed_chunk_alloc);
3005 ret = -ENOSPC;
3006 }
3007 if (block_group)
3008 put_block_group(block_group);
3009
3010 up_read(&space_info->groups_sem);
3011 return ret;
3012}
3013
3014static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
3015{
3016 struct btrfs_block_group_cache *cache;
3017 struct list_head *l;
3018
3019 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
3020 (unsigned long long)(info->total_bytes - info->bytes_used -
3021 info->bytes_pinned - info->bytes_reserved),
3022 (info->full) ? "" : "not ");
3023
3024 down_read(&info->groups_sem);
3025 list_for_each(l, &info->block_groups) {
3026 cache = list_entry(l, struct btrfs_block_group_cache, list);
3027 spin_lock(&cache->lock);
3028 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
3029 "%llu pinned %llu reserved\n",
3030 (unsigned long long)cache->key.objectid,
3031 (unsigned long long)cache->key.offset,
3032 (unsigned long long)btrfs_block_group_used(&cache->item),
3033 (unsigned long long)cache->pinned,
3034 (unsigned long long)cache->reserved);
3035 btrfs_dump_free_space(cache, bytes);
3036 spin_unlock(&cache->lock);
3037 }
3038 up_read(&info->groups_sem);
3039}
3040
3041static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
3042 struct btrfs_root *root,
3043 u64 num_bytes, u64 min_alloc_size,
3044 u64 empty_size, u64 hint_byte,
3045 u64 search_end, struct btrfs_key *ins,
3046 u64 data)
3047{
3048 int ret;
3049 u64 search_start = 0;
3050 u64 alloc_profile;
3051 struct btrfs_fs_info *info = root->fs_info;
3052
3053 if (data) {
3054 alloc_profile = info->avail_data_alloc_bits &
3055 info->data_alloc_profile;
3056 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
3057 } else if (root == root->fs_info->chunk_root) {
3058 alloc_profile = info->avail_system_alloc_bits &
3059 info->system_alloc_profile;
3060 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
3061 } else {
3062 alloc_profile = info->avail_metadata_alloc_bits &
3063 info->metadata_alloc_profile;
3064 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
3065 }
3066again:
3067 data = btrfs_reduce_alloc_profile(root, data);
3068 /*
3069 * the only place that sets empty_size is btrfs_realloc_node, which
3070 * is not called recursively on allocations
3071 */
3072 if (empty_size || root->ref_cows) {
3073 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
3074 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3075 2 * 1024 * 1024,
3076 BTRFS_BLOCK_GROUP_METADATA |
3077 (info->metadata_alloc_profile &
3078 info->avail_metadata_alloc_bits), 0);
3079 }
3080 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3081 num_bytes + 2 * 1024 * 1024, data, 0);
3082 }
3083
3084 WARN_ON(num_bytes < root->sectorsize);
3085 ret = find_free_extent(trans, root, num_bytes, empty_size,
3086 search_start, search_end, hint_byte, ins,
3087 trans->alloc_exclude_start,
3088 trans->alloc_exclude_nr, data);
3089
3090 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
3091 num_bytes = num_bytes >> 1;
3092 num_bytes = num_bytes & ~(root->sectorsize - 1);
3093 num_bytes = max(num_bytes, min_alloc_size);
3094 do_chunk_alloc(trans, root->fs_info->extent_root,
3095 num_bytes, data, 1);
3096 goto again;
3097 }
3098 if (ret) {
3099 struct btrfs_space_info *sinfo;
3100
3101 sinfo = __find_space_info(root->fs_info, data);
3102 printk(KERN_ERR "btrfs allocation failed flags %llu, "
3103 "wanted %llu\n", (unsigned long long)data,
3104 (unsigned long long)num_bytes);
3105 dump_space_info(sinfo, num_bytes);
3106 BUG();
3107 }
3108
3109 return ret;
3110}
3111
3112int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
3113{
3114 struct btrfs_block_group_cache *cache;
3115 int ret = 0;
3116
3117 cache = btrfs_lookup_block_group(root->fs_info, start);
3118 if (!cache) {
3119 printk(KERN_ERR "Unable to find block group for %llu\n",
3120 (unsigned long long)start);
3121 return -ENOSPC;
3122 }
3123
3124 ret = btrfs_discard_extent(root, start, len);
3125
3126 btrfs_add_free_space(cache, start, len);
3127 put_block_group(cache);
3128 update_reserved_extents(root, start, len, 0);
3129
3130 return ret;
3131}
3132
3133int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
3134 struct btrfs_root *root,
3135 u64 num_bytes, u64 min_alloc_size,
3136 u64 empty_size, u64 hint_byte,
3137 u64 search_end, struct btrfs_key *ins,
3138 u64 data)
3139{
3140 int ret;
3141 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
3142 empty_size, hint_byte, search_end, ins,
3143 data);
3144 update_reserved_extents(root, ins->objectid, ins->offset, 1);
3145 return ret;
3146}
3147
3148static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
3149 struct btrfs_root *root, u64 parent,
3150 u64 root_objectid, u64 ref_generation,
3151 u64 owner, struct btrfs_key *ins)
3152{
3153 int ret;
3154 int pending_ret;
3155 u64 super_used;
3156 u64 root_used;
3157 u64 num_bytes = ins->offset;
3158 u32 sizes[2];
3159 struct btrfs_fs_info *info = root->fs_info;
3160 struct btrfs_root *extent_root = info->extent_root;
3161 struct btrfs_extent_item *extent_item;
3162 struct btrfs_extent_ref *ref;
3163 struct btrfs_path *path;
3164 struct btrfs_key keys[2];
3165
3166 if (parent == 0)
3167 parent = ins->objectid;
3168
3169 /* block accounting for super block */
3170 spin_lock(&info->delalloc_lock);
3171 super_used = btrfs_super_bytes_used(&info->super_copy);
3172 btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
3173
3174 /* block accounting for root item */
3175 root_used = btrfs_root_used(&root->root_item);
3176 btrfs_set_root_used(&root->root_item, root_used + num_bytes);
3177 spin_unlock(&info->delalloc_lock);
3178
3179 if (root == extent_root) {
3180 struct pending_extent_op *extent_op;
3181
3182 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
3183 BUG_ON(!extent_op);
3184
3185 extent_op->type = PENDING_EXTENT_INSERT;
3186 extent_op->bytenr = ins->objectid;
3187 extent_op->num_bytes = ins->offset;
3188 extent_op->parent = parent;
3189 extent_op->orig_parent = 0;
3190 extent_op->generation = ref_generation;
3191 extent_op->orig_generation = 0;
3192 extent_op->level = (int)owner;
3193 INIT_LIST_HEAD(&extent_op->list);
3194 extent_op->del = 0;
3195
3196 mutex_lock(&root->fs_info->extent_ins_mutex);
3197 set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
3198 ins->objectid + ins->offset - 1,
3199 EXTENT_WRITEBACK, GFP_NOFS);
3200 set_state_private(&root->fs_info->extent_ins,
3201 ins->objectid, (unsigned long)extent_op);
3202 mutex_unlock(&root->fs_info->extent_ins_mutex);
3203 goto update_block;
3204 }
3205
3206 memcpy(&keys[0], ins, sizeof(*ins));
3207 keys[1].objectid = ins->objectid;
3208 keys[1].type = BTRFS_EXTENT_REF_KEY;
3209 keys[1].offset = parent;
3210 sizes[0] = sizeof(*extent_item);
3211 sizes[1] = sizeof(*ref);
3212
3213 path = btrfs_alloc_path();
3214 BUG_ON(!path);
3215
3216 ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
3217 sizes, 2);
3218 BUG_ON(ret);
3219
3220 extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3221 struct btrfs_extent_item);
3222 btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
3223 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3224 struct btrfs_extent_ref);
3225
3226 btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
3227 btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
3228 btrfs_set_ref_objectid(path->nodes[0], ref, owner);
3229 btrfs_set_ref_num_refs(path->nodes[0], ref, 1);
3230
3231 btrfs_mark_buffer_dirty(path->nodes[0]);
3232
3233 trans->alloc_exclude_start = 0;
3234 trans->alloc_exclude_nr = 0;
3235 btrfs_free_path(path);
3236 finish_current_insert(trans, extent_root, 0);
3237 pending_ret = del_pending_extents(trans, extent_root, 0);
3238
3239 if (ret)
3240 goto out;
3241 if (pending_ret) {
3242 ret = pending_ret;
3243 goto out;
3244 }
3245
3246update_block:
3247 ret = update_block_group(trans, root, ins->objectid,
3248 ins->offset, 1, 0);
3249 if (ret) {
3250 printk(KERN_ERR "btrfs update block group failed for %llu "
3251 "%llu\n", (unsigned long long)ins->objectid,
3252 (unsigned long long)ins->offset);
3253 BUG();
3254 }
3255out:
3256 return ret;
3257}
3258
3259int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
3260 struct btrfs_root *root, u64 parent,
3261 u64 root_objectid, u64 ref_generation,
3262 u64 owner, struct btrfs_key *ins)
3263{
3264 int ret;
3265
3266 if (root_objectid == BTRFS_TREE_LOG_OBJECTID)
3267 return 0;
3268 ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
3269 ref_generation, owner, ins);
3270 update_reserved_extents(root, ins->objectid, ins->offset, 0);
3271 return ret;
3272}
3273
3274/*
3275 * this is used by the tree logging recovery code. It records that
3276 * an extent has been allocated and makes sure to clear the free
3277 * space cache bits as well
3278 */
3279int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
3280 struct btrfs_root *root, u64 parent,
3281 u64 root_objectid, u64 ref_generation,
3282 u64 owner, struct btrfs_key *ins)
3283{
3284 int ret;
3285 struct btrfs_block_group_cache *block_group;
3286
3287 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
3288 mutex_lock(&block_group->cache_mutex);
3289 cache_block_group(root, block_group);
3290 mutex_unlock(&block_group->cache_mutex);
3291
3292 ret = btrfs_remove_free_space(block_group, ins->objectid,
3293 ins->offset);
3294 BUG_ON(ret);
3295 put_block_group(block_group);
3296 ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
3297 ref_generation, owner, ins);
3298 return ret;
3299}
3300
3301/*
3302 * finds a free extent and does all the dirty work required for allocation
3303 * returns the key for the extent through ins, and a tree buffer for
3304 * the first block of the extent through buf.
3305 *
3306 * returns 0 if everything worked, non-zero otherwise.
3307 */
3308int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
3309 struct btrfs_root *root,
3310 u64 num_bytes, u64 parent, u64 min_alloc_size,
3311 u64 root_objectid, u64 ref_generation,
3312 u64 owner_objectid, u64 empty_size, u64 hint_byte,
3313 u64 search_end, struct btrfs_key *ins, u64 data)
3314{
3315 int ret;
3316
3317 ret = __btrfs_reserve_extent(trans, root, num_bytes,
3318 min_alloc_size, empty_size, hint_byte,
3319 search_end, ins, data);
3320 BUG_ON(ret);
3321 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
3322 ret = __btrfs_alloc_reserved_extent(trans, root, parent,
3323 root_objectid, ref_generation,
3324 owner_objectid, ins);
3325 BUG_ON(ret);
3326
3327 } else {
3328 update_reserved_extents(root, ins->objectid, ins->offset, 1);
3329 }
3330 return ret;
3331}
3332
3333struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
3334 struct btrfs_root *root,
3335 u64 bytenr, u32 blocksize)
3336{
3337 struct extent_buffer *buf;
3338
3339 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
3340 if (!buf)
3341 return ERR_PTR(-ENOMEM);
3342 btrfs_set_header_generation(buf, trans->transid);
3343 btrfs_tree_lock(buf);
3344 clean_tree_block(trans, root, buf);
3345 btrfs_set_buffer_uptodate(buf);
3346 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
3347 set_extent_dirty(&root->dirty_log_pages, buf->start,
3348 buf->start + buf->len - 1, GFP_NOFS);
3349 } else {
3350 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
3351 buf->start + buf->len - 1, GFP_NOFS);
3352 }
3353 trans->blocks_used++;
3354 return buf;
3355}
3356
3357/*
3358 * helper function to allocate a block for a given tree
3359 * returns the tree buffer or NULL.
3360 */
3361struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
3362 struct btrfs_root *root,
3363 u32 blocksize, u64 parent,
3364 u64 root_objectid,
3365 u64 ref_generation,
3366 int level,
3367 u64 hint,
3368 u64 empty_size)
3369{
3370 struct btrfs_key ins;
3371 int ret;
3372 struct extent_buffer *buf;
3373
3374 ret = btrfs_alloc_extent(trans, root, blocksize, parent, blocksize,
3375 root_objectid, ref_generation, level,
3376 empty_size, hint, (u64)-1, &ins, 0);
3377 if (ret) {
3378 BUG_ON(ret > 0);
3379 return ERR_PTR(ret);
3380 }
3381
3382 buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize);
3383 return buf;
3384}
3385
3386int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
3387 struct btrfs_root *root, struct extent_buffer *leaf)
3388{
3389 u64 leaf_owner;
3390 u64 leaf_generation;
3391 struct btrfs_key key;
3392 struct btrfs_file_extent_item *fi;
3393 int i;
3394 int nritems;
3395 int ret;
3396
3397 BUG_ON(!btrfs_is_leaf(leaf));
3398 nritems = btrfs_header_nritems(leaf);
3399 leaf_owner = btrfs_header_owner(leaf);
3400 leaf_generation = btrfs_header_generation(leaf);
3401
3402 for (i = 0; i < nritems; i++) {
3403 u64 disk_bytenr;
3404 cond_resched();
3405
3406 btrfs_item_key_to_cpu(leaf, &key, i);
3407 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3408 continue;
3409 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3410 if (btrfs_file_extent_type(leaf, fi) ==
3411 BTRFS_FILE_EXTENT_INLINE)
3412 continue;
3413 /*
3414 * FIXME make sure to insert a trans record that
3415 * repeats the snapshot del on crash
3416 */
3417 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
3418 if (disk_bytenr == 0)
3419 continue;
3420
3421 ret = __btrfs_free_extent(trans, root, disk_bytenr,
3422 btrfs_file_extent_disk_num_bytes(leaf, fi),
3423 leaf->start, leaf_owner, leaf_generation,
3424 key.objectid, 0);
3425 BUG_ON(ret);
3426
3427 atomic_inc(&root->fs_info->throttle_gen);
3428 wake_up(&root->fs_info->transaction_throttle);
3429 cond_resched();
3430 }
3431 return 0;
3432}
3433
3434static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
3435 struct btrfs_root *root,
3436 struct btrfs_leaf_ref *ref)
3437{
3438 int i;
3439 int ret;
3440 struct btrfs_extent_info *info = ref->extents;
3441
3442 for (i = 0; i < ref->nritems; i++) {
3443 ret = __btrfs_free_extent(trans, root, info->bytenr,
3444 info->num_bytes, ref->bytenr,
3445 ref->owner, ref->generation,
3446 info->objectid, 0);
3447
3448 atomic_inc(&root->fs_info->throttle_gen);
3449 wake_up(&root->fs_info->transaction_throttle);
3450 cond_resched();
3451
3452 BUG_ON(ret);
3453 info++;
3454 }
3455
3456 return 0;
3457}
3458
3459static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start,
3460 u64 len, u32 *refs)
3461{
3462 int ret;
3463
3464 ret = btrfs_lookup_extent_ref(NULL, root, start, len, refs);
3465 BUG_ON(ret);
3466
3467#if 0 /* some debugging code in case we see problems here */
3468 /* if the refs count is one, it won't get increased again. But
3469 * if the ref count is > 1, someone may be decreasing it at
3470 * the same time we are.
3471 */
3472 if (*refs != 1) {
3473 struct extent_buffer *eb = NULL;
3474 eb = btrfs_find_create_tree_block(root, start, len);
3475 if (eb)
3476 btrfs_tree_lock(eb);
3477
3478 mutex_lock(&root->fs_info->alloc_mutex);
3479 ret = lookup_extent_ref(NULL, root, start, len, refs);
3480 BUG_ON(ret);
3481 mutex_unlock(&root->fs_info->alloc_mutex);
3482
3483 if (eb) {
3484 btrfs_tree_unlock(eb);
3485 free_extent_buffer(eb);
3486 }
3487 if (*refs == 1) {
3488 printk(KERN_ERR "btrfs block %llu went down to one "
3489 "during drop_snap\n", (unsigned long long)start);
3490 }
3491
3492 }
3493#endif
3494
3495 cond_resched();
3496 return ret;
3497}
3498
3499/*
3500 * helper function for drop_snapshot, this walks down the tree dropping ref
3501 * counts as it goes.
3502 */
3503static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
3504 struct btrfs_root *root,
3505 struct btrfs_path *path, int *level)
3506{
3507 u64 root_owner;
3508 u64 root_gen;
3509 u64 bytenr;
3510 u64 ptr_gen;
3511 struct extent_buffer *next;
3512 struct extent_buffer *cur;
3513 struct extent_buffer *parent;
3514 struct btrfs_leaf_ref *ref;
3515 u32 blocksize;
3516 int ret;
3517 u32 refs;
3518
3519 WARN_ON(*level < 0);
3520 WARN_ON(*level >= BTRFS_MAX_LEVEL);
3521 ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
3522 path->nodes[*level]->len, &refs);
3523 BUG_ON(ret);
3524 if (refs > 1)
3525 goto out;
3526
3527 /*
3528 * walk down to the last node level and free all the leaves
3529 */
3530 while (*level >= 0) {
3531 WARN_ON(*level < 0);
3532 WARN_ON(*level >= BTRFS_MAX_LEVEL);
3533 cur = path->nodes[*level];
3534
3535 if (btrfs_header_level(cur) != *level)
3536 WARN_ON(1);
3537
3538 if (path->slots[*level] >=
3539 btrfs_header_nritems(cur))
3540 break;
3541 if (*level == 0) {
3542 ret = btrfs_drop_leaf_ref(trans, root, cur);
3543 BUG_ON(ret);
3544 break;
3545 }
3546 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
3547 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
3548 blocksize = btrfs_level_size(root, *level - 1);
3549
3550 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
3551 BUG_ON(ret);
3552 if (refs != 1) {
3553 parent = path->nodes[*level];
3554 root_owner = btrfs_header_owner(parent);
3555 root_gen = btrfs_header_generation(parent);
3556 path->slots[*level]++;
3557
3558 ret = __btrfs_free_extent(trans, root, bytenr,
3559 blocksize, parent->start,
3560 root_owner, root_gen,
3561 *level - 1, 1);
3562 BUG_ON(ret);
3563
3564 atomic_inc(&root->fs_info->throttle_gen);
3565 wake_up(&root->fs_info->transaction_throttle);
3566 cond_resched();
3567
3568 continue;
3569 }
3570 /*
3571 * at this point, we have a single ref, and since the
3572 * only place referencing this extent is a dead root
3573 * the reference count should never go higher.
3574 * So, we don't need to check it again
3575 */
3576 if (*level == 1) {
3577 ref = btrfs_lookup_leaf_ref(root, bytenr);
3578 if (ref && ref->generation != ptr_gen) {
3579 btrfs_free_leaf_ref(root, ref);
3580 ref = NULL;
3581 }
3582 if (ref) {
3583 ret = cache_drop_leaf_ref(trans, root, ref);
3584 BUG_ON(ret);
3585 btrfs_remove_leaf_ref(root, ref);
3586 btrfs_free_leaf_ref(root, ref);
3587 *level = 0;
3588 break;
3589 }
3590 }
3591 next = btrfs_find_tree_block(root, bytenr, blocksize);
3592 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
3593 free_extent_buffer(next);
3594
3595 next = read_tree_block(root, bytenr, blocksize,
3596 ptr_gen);
3597 cond_resched();
3598#if 0
3599 /*
3600 * this is a debugging check and can go away
3601 * the ref should never go all the way down to 1
3602 * at this point
3603 */
3604 ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
3605 &refs);
3606 BUG_ON(ret);
3607 WARN_ON(refs != 1);
3608#endif
3609 }
3610 WARN_ON(*level <= 0);
3611 if (path->nodes[*level-1])
3612 free_extent_buffer(path->nodes[*level-1]);
3613 path->nodes[*level-1] = next;
3614 *level = btrfs_header_level(next);
3615 path->slots[*level] = 0;
3616 cond_resched();
3617 }
3618out:
3619 WARN_ON(*level < 0);
3620 WARN_ON(*level >= BTRFS_MAX_LEVEL);
3621
3622 if (path->nodes[*level] == root->node) {
3623 parent = path->nodes[*level];
3624 bytenr = path->nodes[*level]->start;
3625 } else {
3626 parent = path->nodes[*level + 1];
3627 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
3628 }
3629
3630 blocksize = btrfs_level_size(root, *level);
3631 root_owner = btrfs_header_owner(parent);
3632 root_gen = btrfs_header_generation(parent);
3633
3634 ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
3635 parent->start, root_owner, root_gen,
3636 *level, 1);
3637 free_extent_buffer(path->nodes[*level]);
3638 path->nodes[*level] = NULL;
3639 *level += 1;
3640 BUG_ON(ret);
3641
3642 cond_resched();
3643 return 0;
3644}
3645
3646/*
3647 * helper function for drop_subtree, this function is similar to
3648 * walk_down_tree. The main difference is that it checks reference
3649 * counts while tree blocks are locked.
3650 */
3651static noinline int walk_down_subtree(struct btrfs_trans_handle *trans,
3652 struct btrfs_root *root,
3653 struct btrfs_path *path, int *level)
3654{
3655 struct extent_buffer *next;
3656 struct extent_buffer *cur;
3657 struct extent_buffer *parent;
3658 u64 bytenr;
3659 u64 ptr_gen;
3660 u32 blocksize;
3661 u32 refs;
3662 int ret;
3663
3664 cur = path->nodes[*level];
3665 ret = btrfs_lookup_extent_ref(trans, root, cur->start, cur->len,
3666 &refs);
3667 BUG_ON(ret);
3668 if (refs > 1)
3669 goto out;
3670
3671 while (*level >= 0) {
3672 cur = path->nodes[*level];
3673 if (*level == 0) {
3674 ret = btrfs_drop_leaf_ref(trans, root, cur);
3675 BUG_ON(ret);
3676 clean_tree_block(trans, root, cur);
3677 break;
3678 }
3679 if (path->slots[*level] >= btrfs_header_nritems(cur)) {
3680 clean_tree_block(trans, root, cur);
3681 break;
3682 }
3683
3684 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
3685 blocksize = btrfs_level_size(root, *level - 1);
3686 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
3687
3688 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
3689 btrfs_tree_lock(next);
3690
3691 ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize,
3692 &refs);
3693 BUG_ON(ret);
3694 if (refs > 1) {
3695 parent = path->nodes[*level];
3696 ret = btrfs_free_extent(trans, root, bytenr,
3697 blocksize, parent->start,
3698 btrfs_header_owner(parent),
3699 btrfs_header_generation(parent),
3700 *level - 1, 1);
3701 BUG_ON(ret);
3702 path->slots[*level]++;
3703 btrfs_tree_unlock(next);
3704 free_extent_buffer(next);
3705 continue;
3706 }
3707
3708 *level = btrfs_header_level(next);
3709 path->nodes[*level] = next;
3710 path->slots[*level] = 0;
3711 path->locks[*level] = 1;
3712 cond_resched();
3713 }
3714out:
3715 parent = path->nodes[*level + 1];
3716 bytenr = path->nodes[*level]->start;
3717 blocksize = path->nodes[*level]->len;
3718
3719 ret = btrfs_free_extent(trans, root, bytenr, blocksize,
3720 parent->start, btrfs_header_owner(parent),
3721 btrfs_header_generation(parent), *level, 1);
3722 BUG_ON(ret);
3723
3724 if (path->locks[*level]) {
3725 btrfs_tree_unlock(path->nodes[*level]);
3726 path->locks[*level] = 0;
3727 }
3728 free_extent_buffer(path->nodes[*level]);
3729 path->nodes[*level] = NULL;
3730 *level += 1;
3731 cond_resched();
3732 return 0;
3733}
3734
3735/*
3736 * helper for dropping snapshots. This walks back up the tree in the path
3737 * to find the first node higher up where we haven't yet gone through
3738 * all the slots
3739 */
3740static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
3741 struct btrfs_root *root,
3742 struct btrfs_path *path,
3743 int *level, int max_level)
3744{
3745 u64 root_owner;
3746 u64 root_gen;
3747 struct btrfs_root_item *root_item = &root->root_item;
3748 int i;
3749 int slot;
3750 int ret;
3751
3752 for (i = *level; i < max_level && path->nodes[i]; i++) {
3753 slot = path->slots[i];
3754 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
3755 struct extent_buffer *node;
3756 struct btrfs_disk_key disk_key;
3757 node = path->nodes[i];
3758 path->slots[i]++;
3759 *level = i;
3760 WARN_ON(*level == 0);
3761 btrfs_node_key(node, &disk_key, path->slots[i]);
3762 memcpy(&root_item->drop_progress,
3763 &disk_key, sizeof(disk_key));
3764 root_item->drop_level = i;
3765 return 0;
3766 } else {
3767 struct extent_buffer *parent;
3768 if (path->nodes[*level] == root->node)
3769 parent = path->nodes[*level];
3770 else
3771 parent = path->nodes[*level + 1];
3772
3773 root_owner = btrfs_header_owner(parent);
3774 root_gen = btrfs_header_generation(parent);
3775
3776 clean_tree_block(trans, root, path->nodes[*level]);
3777 ret = btrfs_free_extent(trans, root,
3778 path->nodes[*level]->start,
3779 path->nodes[*level]->len,
3780 parent->start, root_owner,
3781 root_gen, *level, 1);
3782 BUG_ON(ret);
3783 if (path->locks[*level]) {
3784 btrfs_tree_unlock(path->nodes[*level]);
3785 path->locks[*level] = 0;
3786 }
3787 free_extent_buffer(path->nodes[*level]);
3788 path->nodes[*level] = NULL;
3789 *level = i + 1;
3790 }
3791 }
3792 return 1;
3793}
3794
3795/*
3796 * drop the reference count on the tree rooted at 'snap'. This traverses
3797 * the tree freeing any blocks that have a ref count of zero after being
3798 * decremented.
3799 */
3800int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
3801 *root)
3802{
3803 int ret = 0;
3804 int wret;
3805 int level;
3806 struct btrfs_path *path;
3807 int i;
3808 int orig_level;
3809 struct btrfs_root_item *root_item = &root->root_item;
3810
3811 WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
3812 path = btrfs_alloc_path();
3813 BUG_ON(!path);
3814
3815 level = btrfs_header_level(root->node);
3816 orig_level = level;
3817 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
3818 path->nodes[level] = root->node;
3819 extent_buffer_get(root->node);
3820 path->slots[level] = 0;
3821 } else {
3822 struct btrfs_key key;
3823 struct btrfs_disk_key found_key;
3824 struct extent_buffer *node;
3825
3826 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
3827 level = root_item->drop_level;
3828 path->lowest_level = level;
3829 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3830 if (wret < 0) {
3831 ret = wret;
3832 goto out;
3833 }
3834 node = path->nodes[level];
3835 btrfs_node_key(node, &found_key, path->slots[level]);
3836 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
3837 sizeof(found_key)));
3838 /*
3839 * unlock our path, this is safe because only this
3840 * function is allowed to delete this snapshot
3841 */
3842 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3843 if (path->nodes[i] && path->locks[i]) {
3844 path->locks[i] = 0;
3845 btrfs_tree_unlock(path->nodes[i]);
3846 }
3847 }
3848 }
3849 while (1) {
3850 wret = walk_down_tree(trans, root, path, &level);
3851 if (wret > 0)
3852 break;
3853 if (wret < 0)
3854 ret = wret;
3855
3856 wret = walk_up_tree(trans, root, path, &level,
3857 BTRFS_MAX_LEVEL);
3858 if (wret > 0)
3859 break;
3860 if (wret < 0)
3861 ret = wret;
3862 if (trans->transaction->in_commit) {
3863 ret = -EAGAIN;
3864 break;
3865 }
3866 atomic_inc(&root->fs_info->throttle_gen);
3867 wake_up(&root->fs_info->transaction_throttle);
3868 }
3869 for (i = 0; i <= orig_level; i++) {
3870 if (path->nodes[i]) {
3871 free_extent_buffer(path->nodes[i]);
3872 path->nodes[i] = NULL;
3873 }
3874 }
3875out:
3876 btrfs_free_path(path);
3877 return ret;
3878}
3879
3880int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
3881 struct btrfs_root *root,
3882 struct extent_buffer *node,
3883 struct extent_buffer *parent)
3884{
3885 struct btrfs_path *path;
3886 int level;
3887 int parent_level;
3888 int ret = 0;
3889 int wret;
3890
3891 path = btrfs_alloc_path();
3892 BUG_ON(!path);
3893
3894 BUG_ON(!btrfs_tree_locked(parent));
3895 parent_level = btrfs_header_level(parent);
3896 extent_buffer_get(parent);
3897 path->nodes[parent_level] = parent;
3898 path->slots[parent_level] = btrfs_header_nritems(parent);
3899
3900 BUG_ON(!btrfs_tree_locked(node));
3901 level = btrfs_header_level(node);
3902 extent_buffer_get(node);
3903 path->nodes[level] = node;
3904 path->slots[level] = 0;
3905
3906 while (1) {
3907 wret = walk_down_subtree(trans, root, path, &level);
3908 if (wret < 0)
3909 ret = wret;
3910 if (wret != 0)
3911 break;
3912
3913 wret = walk_up_tree(trans, root, path, &level, parent_level);
3914 if (wret < 0)
3915 ret = wret;
3916 if (wret != 0)
3917 break;
3918 }
3919
3920 btrfs_free_path(path);
3921 return ret;
3922}
3923
3924static unsigned long calc_ra(unsigned long start, unsigned long last,
3925 unsigned long nr)
3926{
3927 return min(last, start + nr - 1);
3928}
3929
3930static noinline int relocate_inode_pages(struct inode *inode, u64 start,
3931 u64 len)
3932{
3933 u64 page_start;
3934 u64 page_end;
3935 unsigned long first_index;
3936 unsigned long last_index;
3937 unsigned long i;
3938 struct page *page;
3939 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3940 struct file_ra_state *ra;
3941 struct btrfs_ordered_extent *ordered;
3942 unsigned int total_read = 0;
3943 unsigned int total_dirty = 0;
3944 int ret = 0;
3945
3946 ra = kzalloc(sizeof(*ra), GFP_NOFS);
3947
3948 mutex_lock(&inode->i_mutex);
3949 first_index = start >> PAGE_CACHE_SHIFT;
3950 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
3951
3952 /* make sure the dirty trick played by the caller work */
3953 ret = invalidate_inode_pages2_range(inode->i_mapping,
3954 first_index, last_index);
3955 if (ret)
3956 goto out_unlock;
3957
3958 file_ra_state_init(ra, inode->i_mapping);
3959
3960 for (i = first_index ; i <= last_index; i++) {
3961 if (total_read % ra->ra_pages == 0) {
3962 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
3963 calc_ra(i, last_index, ra->ra_pages));
3964 }
3965 total_read++;
3966again:
3967 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
3968 BUG_ON(1);
3969 page = grab_cache_page(inode->i_mapping, i);
3970 if (!page) {
3971 ret = -ENOMEM;
3972 goto out_unlock;
3973 }
3974 if (!PageUptodate(page)) {
3975 btrfs_readpage(NULL, page);
3976 lock_page(page);
3977 if (!PageUptodate(page)) {
3978 unlock_page(page);
3979 page_cache_release(page);
3980 ret = -EIO;
3981 goto out_unlock;
3982 }
3983 }
3984 wait_on_page_writeback(page);
3985
3986 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
3987 page_end = page_start + PAGE_CACHE_SIZE - 1;
3988 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
3989
3990 ordered = btrfs_lookup_ordered_extent(inode, page_start);
3991 if (ordered) {
3992 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3993 unlock_page(page);
3994 page_cache_release(page);
3995 btrfs_start_ordered_extent(inode, ordered, 1);
3996 btrfs_put_ordered_extent(ordered);
3997 goto again;
3998 }
3999 set_page_extent_mapped(page);
4000
4001 if (i == first_index)
4002 set_extent_bits(io_tree, page_start, page_end,
4003 EXTENT_BOUNDARY, GFP_NOFS);
4004 btrfs_set_extent_delalloc(inode, page_start, page_end);
4005
4006 set_page_dirty(page);
4007 total_dirty++;
4008
4009 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4010 unlock_page(page);
4011 page_cache_release(page);
4012 }
4013
4014out_unlock:
4015 kfree(ra);
4016 mutex_unlock(&inode->i_mutex);
4017 balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
4018 return ret;
4019}
4020
4021static noinline int relocate_data_extent(struct inode *reloc_inode,
4022 struct btrfs_key *extent_key,
4023 u64 offset)
4024{
4025 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
4026 struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
4027 struct extent_map *em;
4028 u64 start = extent_key->objectid - offset;
4029 u64 end = start + extent_key->offset - 1;
4030
4031 em = alloc_extent_map(GFP_NOFS);
4032 BUG_ON(!em || IS_ERR(em));
4033
4034 em->start = start;
4035 em->len = extent_key->offset;
4036 em->block_len = extent_key->offset;
4037 em->block_start = extent_key->objectid;
4038 em->bdev = root->fs_info->fs_devices->latest_bdev;
4039 set_bit(EXTENT_FLAG_PINNED, &em->flags);
4040
4041 /* setup extent map to cheat btrfs_readpage */
4042 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
4043 while (1) {
4044 int ret;
4045 spin_lock(&em_tree->lock);
4046 ret = add_extent_mapping(em_tree, em);
4047 spin_unlock(&em_tree->lock);
4048 if (ret != -EEXIST) {
4049 free_extent_map(em);
4050 break;
4051 }
4052 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
4053 }
4054 unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
4055
4056 return relocate_inode_pages(reloc_inode, start, extent_key->offset);
4057}
4058
4059struct btrfs_ref_path {
4060 u64 extent_start;
4061 u64 nodes[BTRFS_MAX_LEVEL];
4062 u64 root_objectid;
4063 u64 root_generation;
4064 u64 owner_objectid;
4065 u32 num_refs;
4066 int lowest_level;
4067 int current_level;
4068 int shared_level;
4069
4070 struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
4071 u64 new_nodes[BTRFS_MAX_LEVEL];
4072};
4073
4074struct disk_extent {
4075 u64 ram_bytes;
4076 u64 disk_bytenr;
4077 u64 disk_num_bytes;
4078 u64 offset;
4079 u64 num_bytes;
4080 u8 compression;
4081 u8 encryption;
4082 u16 other_encoding;
4083};
4084
4085static int is_cowonly_root(u64 root_objectid)
4086{
4087 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
4088 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
4089 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
4090 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
4091 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
4092 root_objectid == BTRFS_CSUM_TREE_OBJECTID)
4093 return 1;
4094 return 0;
4095}
4096
4097static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
4098 struct btrfs_root *extent_root,
4099 struct btrfs_ref_path *ref_path,
4100 int first_time)
4101{
4102 struct extent_buffer *leaf;
4103 struct btrfs_path *path;
4104 struct btrfs_extent_ref *ref;
4105 struct btrfs_key key;
4106 struct btrfs_key found_key;
4107 u64 bytenr;
4108 u32 nritems;
4109 int level;
4110 int ret = 1;
4111
4112 path = btrfs_alloc_path();
4113 if (!path)
4114 return -ENOMEM;
4115
4116 if (first_time) {
4117 ref_path->lowest_level = -1;
4118 ref_path->current_level = -1;
4119 ref_path->shared_level = -1;
4120 goto walk_up;
4121 }
4122walk_down:
4123 level = ref_path->current_level - 1;
4124 while (level >= -1) {
4125 u64 parent;
4126 if (level < ref_path->lowest_level)
4127 break;
4128
4129 if (level >= 0)
4130 bytenr = ref_path->nodes[level];
4131 else
4132 bytenr = ref_path->extent_start;
4133 BUG_ON(bytenr == 0);
4134
4135 parent = ref_path->nodes[level + 1];
4136 ref_path->nodes[level + 1] = 0;
4137 ref_path->current_level = level;
4138 BUG_ON(parent == 0);
4139
4140 key.objectid = bytenr;
4141 key.offset = parent + 1;
4142 key.type = BTRFS_EXTENT_REF_KEY;
4143
4144 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
4145 if (ret < 0)
4146 goto out;
4147 BUG_ON(ret == 0);
4148
4149 leaf = path->nodes[0];
4150 nritems = btrfs_header_nritems(leaf);
4151 if (path->slots[0] >= nritems) {
4152 ret = btrfs_next_leaf(extent_root, path);
4153 if (ret < 0)
4154 goto out;
4155 if (ret > 0)
4156 goto next;
4157 leaf = path->nodes[0];
4158 }
4159
4160 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4161 if (found_key.objectid == bytenr &&
4162 found_key.type == BTRFS_EXTENT_REF_KEY) {
4163 if (level < ref_path->shared_level)
4164 ref_path->shared_level = level;
4165 goto found;
4166 }
4167next:
4168 level--;
4169 btrfs_release_path(extent_root, path);
4170 cond_resched();
4171 }
4172 /* reached lowest level */
4173 ret = 1;
4174 goto out;
4175walk_up:
4176 level = ref_path->current_level;
4177 while (level < BTRFS_MAX_LEVEL - 1) {
4178 u64 ref_objectid;
4179
4180 if (level >= 0)
4181 bytenr = ref_path->nodes[level];
4182 else
4183 bytenr = ref_path->extent_start;
4184
4185 BUG_ON(bytenr == 0);
4186
4187 key.objectid = bytenr;
4188 key.offset = 0;
4189 key.type = BTRFS_EXTENT_REF_KEY;
4190
4191 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
4192 if (ret < 0)
4193 goto out;
4194
4195 leaf = path->nodes[0];
4196 nritems = btrfs_header_nritems(leaf);
4197 if (path->slots[0] >= nritems) {
4198 ret = btrfs_next_leaf(extent_root, path);
4199 if (ret < 0)
4200 goto out;
4201 if (ret > 0) {
4202 /* the extent was freed by someone */
4203 if (ref_path->lowest_level == level)
4204 goto out;
4205 btrfs_release_path(extent_root, path);
4206 goto walk_down;
4207 }
4208 leaf = path->nodes[0];
4209 }
4210
4211 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4212 if (found_key.objectid != bytenr ||
4213 found_key.type != BTRFS_EXTENT_REF_KEY) {
4214 /* the extent was freed by someone */
4215 if (ref_path->lowest_level == level) {
4216 ret = 1;
4217 goto out;
4218 }
4219 btrfs_release_path(extent_root, path);
4220 goto walk_down;
4221 }
4222found:
4223 ref = btrfs_item_ptr(leaf, path->slots[0],
4224 struct btrfs_extent_ref);
4225 ref_objectid = btrfs_ref_objectid(leaf, ref);
4226 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4227 if (first_time) {
4228 level = (int)ref_objectid;
4229 BUG_ON(level >= BTRFS_MAX_LEVEL);
4230 ref_path->lowest_level = level;
4231 ref_path->current_level = level;
4232 ref_path->nodes[level] = bytenr;
4233 } else {
4234 WARN_ON(ref_objectid != level);
4235 }
4236 } else {
4237 WARN_ON(level != -1);
4238 }
4239 first_time = 0;
4240
4241 if (ref_path->lowest_level == level) {
4242 ref_path->owner_objectid = ref_objectid;
4243 ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
4244 }
4245
4246 /*
4247 * the block is tree root or the block isn't in reference
4248 * counted tree.
4249 */
4250 if (found_key.objectid == found_key.offset ||
4251 is_cowonly_root(btrfs_ref_root(leaf, ref))) {
4252 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
4253 ref_path->root_generation =
4254 btrfs_ref_generation(leaf, ref);
4255 if (level < 0) {
4256 /* special reference from the tree log */
4257 ref_path->nodes[0] = found_key.offset;
4258 ref_path->current_level = 0;
4259 }
4260 ret = 0;
4261 goto out;
4262 }
4263
4264 level++;
4265 BUG_ON(ref_path->nodes[level] != 0);
4266 ref_path->nodes[level] = found_key.offset;
4267 ref_path->current_level = level;
4268
4269 /*
4270 * the reference was created in the running transaction,
4271 * no need to continue walking up.
4272 */
4273 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
4274 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
4275 ref_path->root_generation =
4276 btrfs_ref_generation(leaf, ref);
4277 ret = 0;
4278 goto out;
4279 }
4280
4281 btrfs_release_path(extent_root, path);
4282 cond_resched();
4283 }
4284 /* reached max tree level, but no tree root found. */
4285 BUG();
4286out:
4287 btrfs_free_path(path);
4288 return ret;
4289}
4290
4291static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
4292 struct btrfs_root *extent_root,
4293 struct btrfs_ref_path *ref_path,
4294 u64 extent_start)
4295{
4296 memset(ref_path, 0, sizeof(*ref_path));
4297 ref_path->extent_start = extent_start;
4298
4299 return __next_ref_path(trans, extent_root, ref_path, 1);
4300}
4301
4302static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
4303 struct btrfs_root *extent_root,
4304 struct btrfs_ref_path *ref_path)
4305{
4306 return __next_ref_path(trans, extent_root, ref_path, 0);
4307}
4308
4309static noinline int get_new_locations(struct inode *reloc_inode,
4310 struct btrfs_key *extent_key,
4311 u64 offset, int no_fragment,
4312 struct disk_extent **extents,
4313 int *nr_extents)
4314{
4315 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
4316 struct btrfs_path *path;
4317 struct btrfs_file_extent_item *fi;
4318 struct extent_buffer *leaf;
4319 struct disk_extent *exts = *extents;
4320 struct btrfs_key found_key;
4321 u64 cur_pos;
4322 u64 last_byte;
4323 u32 nritems;
4324 int nr = 0;
4325 int max = *nr_extents;
4326 int ret;
4327
4328 WARN_ON(!no_fragment && *extents);
4329 if (!exts) {
4330 max = 1;
4331 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
4332 if (!exts)
4333 return -ENOMEM;
4334 }
4335
4336 path = btrfs_alloc_path();
4337 BUG_ON(!path);
4338
4339 cur_pos = extent_key->objectid - offset;
4340 last_byte = extent_key->objectid + extent_key->offset;
4341 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
4342 cur_pos, 0);
4343 if (ret < 0)
4344 goto out;
4345 if (ret > 0) {
4346 ret = -ENOENT;
4347 goto out;
4348 }
4349
4350 while (1) {
4351 leaf = path->nodes[0];
4352 nritems = btrfs_header_nritems(leaf);
4353 if (path->slots[0] >= nritems) {
4354 ret = btrfs_next_leaf(root, path);
4355 if (ret < 0)
4356 goto out;
4357 if (ret > 0)
4358 break;
4359 leaf = path->nodes[0];
4360 }
4361
4362 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4363 if (found_key.offset != cur_pos ||
4364 found_key.type != BTRFS_EXTENT_DATA_KEY ||
4365 found_key.objectid != reloc_inode->i_ino)
4366 break;
4367
4368 fi = btrfs_item_ptr(leaf, path->slots[0],
4369 struct btrfs_file_extent_item);
4370 if (btrfs_file_extent_type(leaf, fi) !=
4371 BTRFS_FILE_EXTENT_REG ||
4372 btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
4373 break;
4374
4375 if (nr == max) {
4376 struct disk_extent *old = exts;
4377 max *= 2;
4378 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
4379 memcpy(exts, old, sizeof(*exts) * nr);
4380 if (old != *extents)
4381 kfree(old);
4382 }
4383
4384 exts[nr].disk_bytenr =
4385 btrfs_file_extent_disk_bytenr(leaf, fi);
4386 exts[nr].disk_num_bytes =
4387 btrfs_file_extent_disk_num_bytes(leaf, fi);
4388 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
4389 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4390 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
4391 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
4392 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
4393 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
4394 fi);
4395 BUG_ON(exts[nr].offset > 0);
4396 BUG_ON(exts[nr].compression || exts[nr].encryption);
4397 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
4398
4399 cur_pos += exts[nr].num_bytes;
4400 nr++;
4401
4402 if (cur_pos + offset >= last_byte)
4403 break;
4404
4405 if (no_fragment) {
4406 ret = 1;
4407 goto out;
4408 }
4409 path->slots[0]++;
4410 }
4411
4412 BUG_ON(cur_pos + offset > last_byte);
4413 if (cur_pos + offset < last_byte) {
4414 ret = -ENOENT;
4415 goto out;
4416 }
4417 ret = 0;
4418out:
4419 btrfs_free_path(path);
4420 if (ret) {
4421 if (exts != *extents)
4422 kfree(exts);
4423 } else {
4424 *extents = exts;
4425 *nr_extents = nr;
4426 }
4427 return ret;
4428}
4429
4430static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
4431 struct btrfs_root *root,
4432 struct btrfs_path *path,
4433 struct btrfs_key *extent_key,
4434 struct btrfs_key *leaf_key,
4435 struct btrfs_ref_path *ref_path,
4436 struct disk_extent *new_extents,
4437 int nr_extents)
4438{
4439 struct extent_buffer *leaf;
4440 struct btrfs_file_extent_item *fi;
4441 struct inode *inode = NULL;
4442 struct btrfs_key key;
4443 u64 lock_start = 0;
4444 u64 lock_end = 0;
4445 u64 num_bytes;
4446 u64 ext_offset;
4447 u64 first_pos;
4448 u32 nritems;
4449 int nr_scaned = 0;
4450 int extent_locked = 0;
4451 int extent_type;
4452 int ret;
4453
4454 memcpy(&key, leaf_key, sizeof(key));
4455 first_pos = INT_LIMIT(loff_t) - extent_key->offset;
4456 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
4457 if (key.objectid < ref_path->owner_objectid ||
4458 (key.objectid == ref_path->owner_objectid &&
4459 key.type < BTRFS_EXTENT_DATA_KEY)) {
4460 key.objectid = ref_path->owner_objectid;
4461 key.type = BTRFS_EXTENT_DATA_KEY;
4462 key.offset = 0;
4463 }
4464 }
4465
4466 while (1) {
4467 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4468 if (ret < 0)
4469 goto out;
4470
4471 leaf = path->nodes[0];
4472 nritems = btrfs_header_nritems(leaf);
4473next:
4474 if (extent_locked && ret > 0) {
4475 /*
4476 * the file extent item was modified by someone
4477 * before the extent got locked.
4478 */
4479 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4480 lock_end, GFP_NOFS);
4481 extent_locked = 0;
4482 }
4483
4484 if (path->slots[0] >= nritems) {
4485 if (++nr_scaned > 2)
4486 break;
4487
4488 BUG_ON(extent_locked);
4489 ret = btrfs_next_leaf(root, path);
4490 if (ret < 0)
4491 goto out;
4492 if (ret > 0)
4493 break;
4494 leaf = path->nodes[0];
4495 nritems = btrfs_header_nritems(leaf);
4496 }
4497
4498 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4499
4500 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
4501 if ((key.objectid > ref_path->owner_objectid) ||
4502 (key.objectid == ref_path->owner_objectid &&
4503 key.type > BTRFS_EXTENT_DATA_KEY) ||
4504 (key.offset >= first_pos + extent_key->offset))
4505 break;
4506 }
4507
4508 if (inode && key.objectid != inode->i_ino) {
4509 BUG_ON(extent_locked);
4510 btrfs_release_path(root, path);
4511 mutex_unlock(&inode->i_mutex);
4512 iput(inode);
4513 inode = NULL;
4514 continue;
4515 }
4516
4517 if (key.type != BTRFS_EXTENT_DATA_KEY) {
4518 path->slots[0]++;
4519 ret = 1;
4520 goto next;
4521 }
4522 fi = btrfs_item_ptr(leaf, path->slots[0],
4523 struct btrfs_file_extent_item);
4524 extent_type = btrfs_file_extent_type(leaf, fi);
4525 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
4526 extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
4527 (btrfs_file_extent_disk_bytenr(leaf, fi) !=
4528 extent_key->objectid)) {
4529 path->slots[0]++;
4530 ret = 1;
4531 goto next;
4532 }
4533
4534 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4535 ext_offset = btrfs_file_extent_offset(leaf, fi);
4536
4537 if (first_pos > key.offset - ext_offset)
4538 first_pos = key.offset - ext_offset;
4539
4540 if (!extent_locked) {
4541 lock_start = key.offset;
4542 lock_end = lock_start + num_bytes - 1;
4543 } else {
4544 if (lock_start > key.offset ||
4545 lock_end + 1 < key.offset + num_bytes) {
4546 unlock_extent(&BTRFS_I(inode)->io_tree,
4547 lock_start, lock_end, GFP_NOFS);
4548 extent_locked = 0;
4549 }
4550 }
4551
4552 if (!inode) {
4553 btrfs_release_path(root, path);
4554
4555 inode = btrfs_iget_locked(root->fs_info->sb,
4556 key.objectid, root);
4557 if (inode->i_state & I_NEW) {
4558 BTRFS_I(inode)->root = root;
4559 BTRFS_I(inode)->location.objectid =
4560 key.objectid;
4561 BTRFS_I(inode)->location.type =
4562 BTRFS_INODE_ITEM_KEY;
4563 BTRFS_I(inode)->location.offset = 0;
4564 btrfs_read_locked_inode(inode);
4565 unlock_new_inode(inode);
4566 }
4567 /*
4568 * some code call btrfs_commit_transaction while
4569 * holding the i_mutex, so we can't use mutex_lock
4570 * here.
4571 */
4572 if (is_bad_inode(inode) ||
4573 !mutex_trylock(&inode->i_mutex)) {
4574 iput(inode);
4575 inode = NULL;
4576 key.offset = (u64)-1;
4577 goto skip;
4578 }
4579 }
4580
4581 if (!extent_locked) {
4582 struct btrfs_ordered_extent *ordered;
4583
4584 btrfs_release_path(root, path);
4585
4586 lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4587 lock_end, GFP_NOFS);
4588 ordered = btrfs_lookup_first_ordered_extent(inode,
4589 lock_end);
4590 if (ordered &&
4591 ordered->file_offset <= lock_end &&
4592 ordered->file_offset + ordered->len > lock_start) {
4593 unlock_extent(&BTRFS_I(inode)->io_tree,
4594 lock_start, lock_end, GFP_NOFS);
4595 btrfs_start_ordered_extent(inode, ordered, 1);
4596 btrfs_put_ordered_extent(ordered);
4597 key.offset += num_bytes;
4598 goto skip;
4599 }
4600 if (ordered)
4601 btrfs_put_ordered_extent(ordered);
4602
4603 extent_locked = 1;
4604 continue;
4605 }
4606
4607 if (nr_extents == 1) {
4608 /* update extent pointer in place */
4609 btrfs_set_file_extent_disk_bytenr(leaf, fi,
4610 new_extents[0].disk_bytenr);
4611 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4612 new_extents[0].disk_num_bytes);
4613 btrfs_mark_buffer_dirty(leaf);
4614
4615 btrfs_drop_extent_cache(inode, key.offset,
4616 key.offset + num_bytes - 1, 0);
4617
4618 ret = btrfs_inc_extent_ref(trans, root,
4619 new_extents[0].disk_bytenr,
4620 new_extents[0].disk_num_bytes,
4621 leaf->start,
4622 root->root_key.objectid,
4623 trans->transid,
4624 key.objectid);
4625 BUG_ON(ret);
4626
4627 ret = btrfs_free_extent(trans, root,
4628 extent_key->objectid,
4629 extent_key->offset,
4630 leaf->start,
4631 btrfs_header_owner(leaf),
4632 btrfs_header_generation(leaf),
4633 key.objectid, 0);
4634 BUG_ON(ret);
4635
4636 btrfs_release_path(root, path);
4637 key.offset += num_bytes;
4638 } else {
4639 BUG_ON(1);
4640#if 0
4641 u64 alloc_hint;
4642 u64 extent_len;
4643 int i;
4644 /*
4645 * drop old extent pointer at first, then insert the
4646 * new pointers one bye one
4647 */
4648 btrfs_release_path(root, path);
4649 ret = btrfs_drop_extents(trans, root, inode, key.offset,
4650 key.offset + num_bytes,
4651 key.offset, &alloc_hint);
4652 BUG_ON(ret);
4653
4654 for (i = 0; i < nr_extents; i++) {
4655 if (ext_offset >= new_extents[i].num_bytes) {
4656 ext_offset -= new_extents[i].num_bytes;
4657 continue;
4658 }
4659 extent_len = min(new_extents[i].num_bytes -
4660 ext_offset, num_bytes);
4661
4662 ret = btrfs_insert_empty_item(trans, root,
4663 path, &key,
4664 sizeof(*fi));
4665 BUG_ON(ret);
4666
4667 leaf = path->nodes[0];
4668 fi = btrfs_item_ptr(leaf, path->slots[0],
4669 struct btrfs_file_extent_item);
4670 btrfs_set_file_extent_generation(leaf, fi,
4671 trans->transid);
4672 btrfs_set_file_extent_type(leaf, fi,
4673 BTRFS_FILE_EXTENT_REG);
4674 btrfs_set_file_extent_disk_bytenr(leaf, fi,
4675 new_extents[i].disk_bytenr);
4676 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4677 new_extents[i].disk_num_bytes);
4678 btrfs_set_file_extent_ram_bytes(leaf, fi,
4679 new_extents[i].ram_bytes);
4680
4681 btrfs_set_file_extent_compression(leaf, fi,
4682 new_extents[i].compression);
4683 btrfs_set_file_extent_encryption(leaf, fi,
4684 new_extents[i].encryption);
4685 btrfs_set_file_extent_other_encoding(leaf, fi,
4686 new_extents[i].other_encoding);
4687
4688 btrfs_set_file_extent_num_bytes(leaf, fi,
4689 extent_len);
4690 ext_offset += new_extents[i].offset;
4691 btrfs_set_file_extent_offset(leaf, fi,
4692 ext_offset);
4693 btrfs_mark_buffer_dirty(leaf);
4694
4695 btrfs_drop_extent_cache(inode, key.offset,
4696 key.offset + extent_len - 1, 0);
4697
4698 ret = btrfs_inc_extent_ref(trans, root,
4699 new_extents[i].disk_bytenr,
4700 new_extents[i].disk_num_bytes,
4701 leaf->start,
4702 root->root_key.objectid,
4703 trans->transid, key.objectid);
4704 BUG_ON(ret);
4705 btrfs_release_path(root, path);
4706
4707 inode_add_bytes(inode, extent_len);
4708
4709 ext_offset = 0;
4710 num_bytes -= extent_len;
4711 key.offset += extent_len;
4712
4713 if (num_bytes == 0)
4714 break;
4715 }
4716 BUG_ON(i >= nr_extents);
4717#endif
4718 }
4719
4720 if (extent_locked) {
4721 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4722 lock_end, GFP_NOFS);
4723 extent_locked = 0;
4724 }
4725skip:
4726 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
4727 key.offset >= first_pos + extent_key->offset)
4728 break;
4729
4730 cond_resched();
4731 }
4732 ret = 0;
4733out:
4734 btrfs_release_path(root, path);
4735 if (inode) {
4736 mutex_unlock(&inode->i_mutex);
4737 if (extent_locked) {
4738 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4739 lock_end, GFP_NOFS);
4740 }
4741 iput(inode);
4742 }
4743 return ret;
4744}
4745
4746int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
4747 struct btrfs_root *root,
4748 struct extent_buffer *buf, u64 orig_start)
4749{
4750 int level;
4751 int ret;
4752
4753 BUG_ON(btrfs_header_generation(buf) != trans->transid);
4754 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
4755
4756 level = btrfs_header_level(buf);
4757 if (level == 0) {
4758 struct btrfs_leaf_ref *ref;
4759 struct btrfs_leaf_ref *orig_ref;
4760
4761 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
4762 if (!orig_ref)
4763 return -ENOENT;
4764
4765 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
4766 if (!ref) {
4767 btrfs_free_leaf_ref(root, orig_ref);
4768 return -ENOMEM;
4769 }
4770
4771 ref->nritems = orig_ref->nritems;
4772 memcpy(ref->extents, orig_ref->extents,
4773 sizeof(ref->extents[0]) * ref->nritems);
4774
4775 btrfs_free_leaf_ref(root, orig_ref);
4776
4777 ref->root_gen = trans->transid;
4778 ref->bytenr = buf->start;
4779 ref->owner = btrfs_header_owner(buf);
4780 ref->generation = btrfs_header_generation(buf);
4781 ret = btrfs_add_leaf_ref(root, ref, 0);
4782 WARN_ON(ret);
4783 btrfs_free_leaf_ref(root, ref);
4784 }
4785 return 0;
4786}
4787
4788static noinline int invalidate_extent_cache(struct btrfs_root *root,
4789 struct extent_buffer *leaf,
4790 struct btrfs_block_group_cache *group,
4791 struct btrfs_root *target_root)
4792{
4793 struct btrfs_key key;
4794 struct inode *inode = NULL;
4795 struct btrfs_file_extent_item *fi;
4796 u64 num_bytes;
4797 u64 skip_objectid = 0;
4798 u32 nritems;
4799 u32 i;
4800
4801 nritems = btrfs_header_nritems(leaf);
4802 for (i = 0; i < nritems; i++) {
4803 btrfs_item_key_to_cpu(leaf, &key, i);
4804 if (key.objectid == skip_objectid ||
4805 key.type != BTRFS_EXTENT_DATA_KEY)
4806 continue;
4807 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
4808 if (btrfs_file_extent_type(leaf, fi) ==
4809 BTRFS_FILE_EXTENT_INLINE)
4810 continue;
4811 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
4812 continue;
4813 if (!inode || inode->i_ino != key.objectid) {
4814 iput(inode);
4815 inode = btrfs_ilookup(target_root->fs_info->sb,
4816 key.objectid, target_root, 1);
4817 }
4818 if (!inode) {
4819 skip_objectid = key.objectid;
4820 continue;
4821 }
4822 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4823
4824 lock_extent(&BTRFS_I(inode)->io_tree, key.offset,
4825 key.offset + num_bytes - 1, GFP_NOFS);
4826 btrfs_drop_extent_cache(inode, key.offset,
4827 key.offset + num_bytes - 1, 1);
4828 unlock_extent(&BTRFS_I(inode)->io_tree, key.offset,
4829 key.offset + num_bytes - 1, GFP_NOFS);
4830 cond_resched();
4831 }
4832 iput(inode);
4833 return 0;
4834}
4835
4836static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
4837 struct btrfs_root *root,
4838 struct extent_buffer *leaf,
4839 struct btrfs_block_group_cache *group,
4840 struct inode *reloc_inode)
4841{
4842 struct btrfs_key key;
4843 struct btrfs_key extent_key;
4844 struct btrfs_file_extent_item *fi;
4845 struct btrfs_leaf_ref *ref;
4846 struct disk_extent *new_extent;
4847 u64 bytenr;
4848 u64 num_bytes;
4849 u32 nritems;
4850 u32 i;
4851 int ext_index;
4852 int nr_extent;
4853 int ret;
4854
4855 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
4856 BUG_ON(!new_extent);
4857
4858 ref = btrfs_lookup_leaf_ref(root, leaf->start);
4859 BUG_ON(!ref);
4860
4861 ext_index = -1;
4862 nritems = btrfs_header_nritems(leaf);
4863 for (i = 0; i < nritems; i++) {
4864 btrfs_item_key_to_cpu(leaf, &key, i);
4865 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
4866 continue;
4867 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
4868 if (btrfs_file_extent_type(leaf, fi) ==
4869 BTRFS_FILE_EXTENT_INLINE)
4870 continue;
4871 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
4872 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
4873 if (bytenr == 0)
4874 continue;
4875
4876 ext_index++;
4877 if (bytenr >= group->key.objectid + group->key.offset ||
4878 bytenr + num_bytes <= group->key.objectid)
4879 continue;
4880
4881 extent_key.objectid = bytenr;
4882 extent_key.offset = num_bytes;
4883 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
4884 nr_extent = 1;
4885 ret = get_new_locations(reloc_inode, &extent_key,
4886 group->key.objectid, 1,
4887 &new_extent, &nr_extent);
4888 if (ret > 0)
4889 continue;
4890 BUG_ON(ret < 0);
4891
4892 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
4893 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
4894 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
4895 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
4896
4897 btrfs_set_file_extent_disk_bytenr(leaf, fi,
4898 new_extent->disk_bytenr);
4899 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4900 new_extent->disk_num_bytes);
4901 btrfs_mark_buffer_dirty(leaf);
4902
4903 ret = btrfs_inc_extent_ref(trans, root,
4904 new_extent->disk_bytenr,
4905 new_extent->disk_num_bytes,
4906 leaf->start,
4907 root->root_key.objectid,
4908 trans->transid, key.objectid);
4909 BUG_ON(ret);
4910 ret = btrfs_free_extent(trans, root,
4911 bytenr, num_bytes, leaf->start,
4912 btrfs_header_owner(leaf),
4913 btrfs_header_generation(leaf),
4914 key.objectid, 0);
4915 BUG_ON(ret);
4916 cond_resched();
4917 }
4918 kfree(new_extent);
4919 BUG_ON(ext_index + 1 != ref->nritems);
4920 btrfs_free_leaf_ref(root, ref);
4921 return 0;
4922}
4923
4924int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
4925 struct btrfs_root *root)
4926{
4927 struct btrfs_root *reloc_root;
4928 int ret;
4929
4930 if (root->reloc_root) {
4931 reloc_root = root->reloc_root;
4932 root->reloc_root = NULL;
4933 list_add(&reloc_root->dead_list,
4934 &root->fs_info->dead_reloc_roots);
4935
4936 btrfs_set_root_bytenr(&reloc_root->root_item,
4937 reloc_root->node->start);
4938 btrfs_set_root_level(&root->root_item,
4939 btrfs_header_level(reloc_root->node));
4940 memset(&reloc_root->root_item.drop_progress, 0,
4941 sizeof(struct btrfs_disk_key));
4942 reloc_root->root_item.drop_level = 0;
4943
4944 ret = btrfs_update_root(trans, root->fs_info->tree_root,
4945 &reloc_root->root_key,
4946 &reloc_root->root_item);
4947 BUG_ON(ret);
4948 }
4949 return 0;
4950}
4951
4952int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
4953{
4954 struct btrfs_trans_handle *trans;
4955 struct btrfs_root *reloc_root;
4956 struct btrfs_root *prev_root = NULL;
4957 struct list_head dead_roots;
4958 int ret;
4959 unsigned long nr;
4960
4961 INIT_LIST_HEAD(&dead_roots);
4962 list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
4963
4964 while (!list_empty(&dead_roots)) {
4965 reloc_root = list_entry(dead_roots.prev,
4966 struct btrfs_root, dead_list);
4967 list_del_init(&reloc_root->dead_list);
4968
4969 BUG_ON(reloc_root->commit_root != NULL);
4970 while (1) {
4971 trans = btrfs_join_transaction(root, 1);
4972 BUG_ON(!trans);
4973
4974 mutex_lock(&root->fs_info->drop_mutex);
4975 ret = btrfs_drop_snapshot(trans, reloc_root);
4976 if (ret != -EAGAIN)
4977 break;
4978 mutex_unlock(&root->fs_info->drop_mutex);
4979
4980 nr = trans->blocks_used;
4981 ret = btrfs_end_transaction(trans, root);
4982 BUG_ON(ret);
4983 btrfs_btree_balance_dirty(root, nr);
4984 }
4985
4986 free_extent_buffer(reloc_root->node);
4987
4988 ret = btrfs_del_root(trans, root->fs_info->tree_root,
4989 &reloc_root->root_key);
4990 BUG_ON(ret);
4991 mutex_unlock(&root->fs_info->drop_mutex);
4992
4993 nr = trans->blocks_used;
4994 ret = btrfs_end_transaction(trans, root);
4995 BUG_ON(ret);
4996 btrfs_btree_balance_dirty(root, nr);
4997
4998 kfree(prev_root);
4999 prev_root = reloc_root;
5000 }
5001 if (prev_root) {
5002 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
5003 kfree(prev_root);
5004 }
5005 return 0;
5006}
5007
5008int btrfs_add_dead_reloc_root(struct btrfs_root *root)
5009{
5010 list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
5011 return 0;
5012}
5013
5014int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
5015{
5016 struct btrfs_root *reloc_root;
5017 struct btrfs_trans_handle *trans;
5018 struct btrfs_key location;
5019 int found;
5020 int ret;
5021
5022 mutex_lock(&root->fs_info->tree_reloc_mutex);
5023 ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
5024 BUG_ON(ret);
5025 found = !list_empty(&root->fs_info->dead_reloc_roots);
5026 mutex_unlock(&root->fs_info->tree_reloc_mutex);
5027
5028 if (found) {
5029 trans = btrfs_start_transaction(root, 1);
5030 BUG_ON(!trans);
5031 ret = btrfs_commit_transaction(trans, root);
5032 BUG_ON(ret);
5033 }
5034
5035 location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
5036 location.offset = (u64)-1;
5037 location.type = BTRFS_ROOT_ITEM_KEY;
5038
5039 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
5040 BUG_ON(!reloc_root);
5041 btrfs_orphan_cleanup(reloc_root);
5042 return 0;
5043}
5044
5045static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
5046 struct btrfs_root *root)
5047{
5048 struct btrfs_root *reloc_root;
5049 struct extent_buffer *eb;
5050 struct btrfs_root_item *root_item;
5051 struct btrfs_key root_key;
5052 int ret;
5053
5054 BUG_ON(!root->ref_cows);
5055 if (root->reloc_root)
5056 return 0;
5057
5058 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
5059 BUG_ON(!root_item);
5060
5061 ret = btrfs_copy_root(trans, root, root->commit_root,
5062 &eb, BTRFS_TREE_RELOC_OBJECTID);
5063 BUG_ON(ret);
5064
5065 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
5066 root_key.offset = root->root_key.objectid;
5067 root_key.type = BTRFS_ROOT_ITEM_KEY;
5068
5069 memcpy(root_item, &root->root_item, sizeof(root_item));
5070 btrfs_set_root_refs(root_item, 0);
5071 btrfs_set_root_bytenr(root_item, eb->start);
5072 btrfs_set_root_level(root_item, btrfs_header_level(eb));
5073 btrfs_set_root_generation(root_item, trans->transid);
5074
5075 btrfs_tree_unlock(eb);
5076 free_extent_buffer(eb);
5077
5078 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
5079 &root_key, root_item);
5080 BUG_ON(ret);
5081 kfree(root_item);
5082
5083 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
5084 &root_key);
5085 BUG_ON(!reloc_root);
5086 reloc_root->last_trans = trans->transid;
5087 reloc_root->commit_root = NULL;
5088 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
5089
5090 root->reloc_root = reloc_root;
5091 return 0;
5092}
5093
5094/*
5095 * Core function of space balance.
5096 *
5097 * The idea is using reloc trees to relocate tree blocks in reference
5098 * counted roots. There is one reloc tree for each subvol, and all
5099 * reloc trees share same root key objectid. Reloc trees are snapshots
5100 * of the latest committed roots of subvols (root->commit_root).
5101 *
5102 * To relocate a tree block referenced by a subvol, there are two steps.
5103 * COW the block through subvol's reloc tree, then update block pointer
5104 * in the subvol to point to the new block. Since all reloc trees share
5105 * same root key objectid, doing special handing for tree blocks owned
5106 * by them is easy. Once a tree block has been COWed in one reloc tree,
5107 * we can use the resulting new block directly when the same block is
5108 * required to COW again through other reloc trees. By this way, relocated
5109 * tree blocks are shared between reloc trees, so they are also shared
5110 * between subvols.
5111 */
5112static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
5113 struct btrfs_root *root,
5114 struct btrfs_path *path,
5115 struct btrfs_key *first_key,
5116 struct btrfs_ref_path *ref_path,
5117 struct btrfs_block_group_cache *group,
5118 struct inode *reloc_inode)
5119{
5120 struct btrfs_root *reloc_root;
5121 struct extent_buffer *eb = NULL;
5122 struct btrfs_key *keys;
5123 u64 *nodes;
5124 int level;
5125 int shared_level;
5126 int lowest_level = 0;
5127 int ret;
5128
5129 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
5130 lowest_level = ref_path->owner_objectid;
5131
5132 if (!root->ref_cows) {
5133 path->lowest_level = lowest_level;
5134 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
5135 BUG_ON(ret < 0);
5136 path->lowest_level = 0;
5137 btrfs_release_path(root, path);
5138 return 0;
5139 }
5140
5141 mutex_lock(&root->fs_info->tree_reloc_mutex);
5142 ret = init_reloc_tree(trans, root);
5143 BUG_ON(ret);
5144 reloc_root = root->reloc_root;
5145
5146 shared_level = ref_path->shared_level;
5147 ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
5148
5149 keys = ref_path->node_keys;
5150 nodes = ref_path->new_nodes;
5151 memset(&keys[shared_level + 1], 0,
5152 sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
5153 memset(&nodes[shared_level + 1], 0,
5154 sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
5155
5156 if (nodes[lowest_level] == 0) {
5157 path->lowest_level = lowest_level;
5158 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
5159 0, 1);
5160 BUG_ON(ret);
5161 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
5162 eb = path->nodes[level];
5163 if (!eb || eb == reloc_root->node)
5164 break;
5165 nodes[level] = eb->start;
5166 if (level == 0)
5167 btrfs_item_key_to_cpu(eb, &keys[level], 0);
5168 else
5169 btrfs_node_key_to_cpu(eb, &keys[level], 0);
5170 }
5171 if (nodes[0] &&
5172 ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5173 eb = path->nodes[0];
5174 ret = replace_extents_in_leaf(trans, reloc_root, eb,
5175 group, reloc_inode);
5176 BUG_ON(ret);
5177 }
5178 btrfs_release_path(reloc_root, path);
5179 } else {
5180 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
5181 lowest_level);
5182 BUG_ON(ret);
5183 }
5184
5185 /*
5186 * replace tree blocks in the fs tree with tree blocks in
5187 * the reloc tree.
5188 */
5189 ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
5190 BUG_ON(ret < 0);
5191
5192 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5193 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
5194 0, 0);
5195 BUG_ON(ret);
5196 extent_buffer_get(path->nodes[0]);
5197 eb = path->nodes[0];
5198 btrfs_release_path(reloc_root, path);
5199 ret = invalidate_extent_cache(reloc_root, eb, group, root);
5200 BUG_ON(ret);
5201 free_extent_buffer(eb);
5202 }
5203
5204 mutex_unlock(&root->fs_info->tree_reloc_mutex);
5205 path->lowest_level = 0;
5206 return 0;
5207}
5208
5209static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
5210 struct btrfs_root *root,
5211 struct btrfs_path *path,
5212 struct btrfs_key *first_key,
5213 struct btrfs_ref_path *ref_path)
5214{
5215 int ret;
5216
5217 ret = relocate_one_path(trans, root, path, first_key,
5218 ref_path, NULL, NULL);
5219 BUG_ON(ret);
5220
5221 if (root == root->fs_info->extent_root)
5222 btrfs_extent_post_op(trans, root);
5223
5224 return 0;
5225}
5226
5227static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
5228 struct btrfs_root *extent_root,
5229 struct btrfs_path *path,
5230 struct btrfs_key *extent_key)
5231{
5232 int ret;
5233
5234 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
5235 if (ret)
5236 goto out;
5237 ret = btrfs_del_item(trans, extent_root, path);
5238out:
5239 btrfs_release_path(extent_root, path);
5240 return ret;
5241}
5242
5243static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
5244 struct btrfs_ref_path *ref_path)
5245{
5246 struct btrfs_key root_key;
5247
5248 root_key.objectid = ref_path->root_objectid;
5249 root_key.type = BTRFS_ROOT_ITEM_KEY;
5250 if (is_cowonly_root(ref_path->root_objectid))
5251 root_key.offset = 0;
5252 else
5253 root_key.offset = (u64)-1;
5254
5255 return btrfs_read_fs_root_no_name(fs_info, &root_key);
5256}
5257
5258static noinline int relocate_one_extent(struct btrfs_root *extent_root,
5259 struct btrfs_path *path,
5260 struct btrfs_key *extent_key,
5261 struct btrfs_block_group_cache *group,
5262 struct inode *reloc_inode, int pass)
5263{
5264 struct btrfs_trans_handle *trans;
5265 struct btrfs_root *found_root;
5266 struct btrfs_ref_path *ref_path = NULL;
5267 struct disk_extent *new_extents = NULL;
5268 int nr_extents = 0;
5269 int loops;
5270 int ret;
5271 int level;
5272 struct btrfs_key first_key;
5273 u64 prev_block = 0;
5274
5275
5276 trans = btrfs_start_transaction(extent_root, 1);
5277 BUG_ON(!trans);
5278
5279 if (extent_key->objectid == 0) {
5280 ret = del_extent_zero(trans, extent_root, path, extent_key);
5281 goto out;
5282 }
5283
5284 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
5285 if (!ref_path) {
5286 ret = -ENOMEM;
5287 goto out;
5288 }
5289
5290 for (loops = 0; ; loops++) {
5291 if (loops == 0) {
5292 ret = btrfs_first_ref_path(trans, extent_root, ref_path,
5293 extent_key->objectid);
5294 } else {
5295 ret = btrfs_next_ref_path(trans, extent_root, ref_path);
5296 }
5297 if (ret < 0)
5298 goto out;
5299 if (ret > 0)
5300 break;
5301
5302 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
5303 ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
5304 continue;
5305
5306 found_root = read_ref_root(extent_root->fs_info, ref_path);
5307 BUG_ON(!found_root);
5308 /*
5309 * for reference counted tree, only process reference paths
5310 * rooted at the latest committed root.
5311 */
5312 if (found_root->ref_cows &&
5313 ref_path->root_generation != found_root->root_key.offset)
5314 continue;
5315
5316 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5317 if (pass == 0) {
5318 /*
5319 * copy data extents to new locations
5320 */
5321 u64 group_start = group->key.objectid;
5322 ret = relocate_data_extent(reloc_inode,
5323 extent_key,
5324 group_start);
5325 if (ret < 0)
5326 goto out;
5327 break;
5328 }
5329 level = 0;
5330 } else {
5331 level = ref_path->owner_objectid;
5332 }
5333
5334 if (prev_block != ref_path->nodes[level]) {
5335 struct extent_buffer *eb;
5336 u64 block_start = ref_path->nodes[level];
5337 u64 block_size = btrfs_level_size(found_root, level);
5338
5339 eb = read_tree_block(found_root, block_start,
5340 block_size, 0);
5341 btrfs_tree_lock(eb);
5342 BUG_ON(level != btrfs_header_level(eb));
5343
5344 if (level == 0)
5345 btrfs_item_key_to_cpu(eb, &first_key, 0);
5346 else
5347 btrfs_node_key_to_cpu(eb, &first_key, 0);
5348
5349 btrfs_tree_unlock(eb);
5350 free_extent_buffer(eb);
5351 prev_block = block_start;
5352 }
5353
5354 btrfs_record_root_in_trans(found_root);
5355 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5356 /*
5357 * try to update data extent references while
5358 * keeping metadata shared between snapshots.
5359 */
5360 if (pass == 1) {
5361 ret = relocate_one_path(trans, found_root,
5362 path, &first_key, ref_path,
5363 group, reloc_inode);
5364 if (ret < 0)
5365 goto out;
5366 continue;
5367 }
5368 /*
5369 * use fallback method to process the remaining
5370 * references.
5371 */
5372 if (!new_extents) {
5373 u64 group_start = group->key.objectid;
5374 new_extents = kmalloc(sizeof(*new_extents),
5375 GFP_NOFS);
5376 nr_extents = 1;
5377 ret = get_new_locations(reloc_inode,
5378 extent_key,
5379 group_start, 1,
5380 &new_extents,
5381 &nr_extents);
5382 if (ret)
5383 goto out;
5384 }
5385 ret = replace_one_extent(trans, found_root,
5386 path, extent_key,
5387 &first_key, ref_path,
5388 new_extents, nr_extents);
5389 } else {
5390 ret = relocate_tree_block(trans, found_root, path,
5391 &first_key, ref_path);
5392 }
5393 if (ret < 0)
5394 goto out;
5395 }
5396 ret = 0;
5397out:
5398 btrfs_end_transaction(trans, extent_root);
5399 kfree(new_extents);
5400 kfree(ref_path);
5401 return ret;
5402}
5403
5404static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
5405{
5406 u64 num_devices;
5407 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
5408 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
5409
5410 num_devices = root->fs_info->fs_devices->rw_devices;
5411 if (num_devices == 1) {
5412 stripped |= BTRFS_BLOCK_GROUP_DUP;
5413 stripped = flags & ~stripped;
5414
5415 /* turn raid0 into single device chunks */
5416 if (flags & BTRFS_BLOCK_GROUP_RAID0)
5417 return stripped;
5418
5419 /* turn mirroring into duplication */
5420 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
5421 BTRFS_BLOCK_GROUP_RAID10))
5422 return stripped | BTRFS_BLOCK_GROUP_DUP;
5423 return flags;
5424 } else {
5425 /* they already had raid on here, just return */
5426 if (flags & stripped)
5427 return flags;
5428
5429 stripped |= BTRFS_BLOCK_GROUP_DUP;
5430 stripped = flags & ~stripped;
5431
5432 /* switch duplicated blocks with raid1 */
5433 if (flags & BTRFS_BLOCK_GROUP_DUP)
5434 return stripped | BTRFS_BLOCK_GROUP_RAID1;
5435
5436 /* turn single device chunks into raid0 */
5437 return stripped | BTRFS_BLOCK_GROUP_RAID0;
5438 }
5439 return flags;
5440}
5441
5442static int __alloc_chunk_for_shrink(struct btrfs_root *root,
5443 struct btrfs_block_group_cache *shrink_block_group,
5444 int force)
5445{
5446 struct btrfs_trans_handle *trans;
5447 u64 new_alloc_flags;
5448 u64 calc;
5449
5450 spin_lock(&shrink_block_group->lock);
5451 if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
5452 spin_unlock(&shrink_block_group->lock);
5453
5454 trans = btrfs_start_transaction(root, 1);
5455 spin_lock(&shrink_block_group->lock);
5456
5457 new_alloc_flags = update_block_group_flags(root,
5458 shrink_block_group->flags);
5459 if (new_alloc_flags != shrink_block_group->flags) {
5460 calc =
5461 btrfs_block_group_used(&shrink_block_group->item);
5462 } else {
5463 calc = shrink_block_group->key.offset;
5464 }
5465 spin_unlock(&shrink_block_group->lock);
5466
5467 do_chunk_alloc(trans, root->fs_info->extent_root,
5468 calc + 2 * 1024 * 1024, new_alloc_flags, force);
5469
5470 btrfs_end_transaction(trans, root);
5471 } else
5472 spin_unlock(&shrink_block_group->lock);
5473 return 0;
5474}
5475
5476static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
5477 struct btrfs_root *root,
5478 u64 objectid, u64 size)
5479{
5480 struct btrfs_path *path;
5481 struct btrfs_inode_item *item;
5482 struct extent_buffer *leaf;
5483 int ret;
5484
5485 path = btrfs_alloc_path();
5486 if (!path)
5487 return -ENOMEM;
5488
5489 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
5490 if (ret)
5491 goto out;
5492
5493 leaf = path->nodes[0];
5494 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
5495 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
5496 btrfs_set_inode_generation(leaf, item, 1);
5497 btrfs_set_inode_size(leaf, item, size);
5498 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
5499 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS);
5500 btrfs_mark_buffer_dirty(leaf);
5501 btrfs_release_path(root, path);
5502out:
5503 btrfs_free_path(path);
5504 return ret;
5505}
5506
5507static noinline struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
5508 struct btrfs_block_group_cache *group)
5509{
5510 struct inode *inode = NULL;
5511 struct btrfs_trans_handle *trans;
5512 struct btrfs_root *root;
5513 struct btrfs_key root_key;
5514 u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
5515 int err = 0;
5516
5517 root_key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
5518 root_key.type = BTRFS_ROOT_ITEM_KEY;
5519 root_key.offset = (u64)-1;
5520 root = btrfs_read_fs_root_no_name(fs_info, &root_key);
5521 if (IS_ERR(root))
5522 return ERR_CAST(root);
5523
5524 trans = btrfs_start_transaction(root, 1);
5525 BUG_ON(!trans);
5526
5527 err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
5528 if (err)
5529 goto out;
5530
5531 err = __insert_orphan_inode(trans, root, objectid, group->key.offset);
5532 BUG_ON(err);
5533
5534 err = btrfs_insert_file_extent(trans, root, objectid, 0, 0, 0,
5535 group->key.offset, 0, group->key.offset,
5536 0, 0, 0);
5537 BUG_ON(err);
5538
5539 inode = btrfs_iget_locked(root->fs_info->sb, objectid, root);
5540 if (inode->i_state & I_NEW) {
5541 BTRFS_I(inode)->root = root;
5542 BTRFS_I(inode)->location.objectid = objectid;
5543 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
5544 BTRFS_I(inode)->location.offset = 0;
5545 btrfs_read_locked_inode(inode);
5546 unlock_new_inode(inode);
5547 BUG_ON(is_bad_inode(inode));
5548 } else {
5549 BUG_ON(1);
5550 }
5551 BTRFS_I(inode)->index_cnt = group->key.objectid;
5552
5553 err = btrfs_orphan_add(trans, inode);
5554out:
5555 btrfs_end_transaction(trans, root);
5556 if (err) {
5557 if (inode)
5558 iput(inode);
5559 inode = ERR_PTR(err);
5560 }
5561 return inode;
5562}
5563
5564int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
5565{
5566
5567 struct btrfs_ordered_sum *sums;
5568 struct btrfs_sector_sum *sector_sum;
5569 struct btrfs_ordered_extent *ordered;
5570 struct btrfs_root *root = BTRFS_I(inode)->root;
5571 struct list_head list;
5572 size_t offset;
5573 int ret;
5574 u64 disk_bytenr;
5575
5576 INIT_LIST_HEAD(&list);
5577
5578 ordered = btrfs_lookup_ordered_extent(inode, file_pos);
5579 BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
5580
5581 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
5582 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
5583 disk_bytenr + len - 1, &list);
5584
5585 while (!list_empty(&list)) {
5586 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
5587 list_del_init(&sums->list);
5588
5589 sector_sum = sums->sums;
5590 sums->bytenr = ordered->start;
5591
5592 offset = 0;
5593 while (offset < sums->len) {
5594 sector_sum->bytenr += ordered->start - disk_bytenr;
5595 sector_sum++;
5596 offset += root->sectorsize;
5597 }
5598
5599 btrfs_add_ordered_sum(inode, ordered, sums);
5600 }
5601 btrfs_put_ordered_extent(ordered);
5602 return 0;
5603}
5604
5605int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start)
5606{
5607 struct btrfs_trans_handle *trans;
5608 struct btrfs_path *path;
5609 struct btrfs_fs_info *info = root->fs_info;
5610 struct extent_buffer *leaf;
5611 struct inode *reloc_inode;
5612 struct btrfs_block_group_cache *block_group;
5613 struct btrfs_key key;
5614 u64 skipped;
5615 u64 cur_byte;
5616 u64 total_found;
5617 u32 nritems;
5618 int ret;
5619 int progress;
5620 int pass = 0;
5621
5622 root = root->fs_info->extent_root;
5623
5624 block_group = btrfs_lookup_block_group(info, group_start);
5625 BUG_ON(!block_group);
5626
5627 printk(KERN_INFO "btrfs relocating block group %llu flags %llu\n",
5628 (unsigned long long)block_group->key.objectid,
5629 (unsigned long long)block_group->flags);
5630
5631 path = btrfs_alloc_path();
5632 BUG_ON(!path);
5633
5634 reloc_inode = create_reloc_inode(info, block_group);
5635 BUG_ON(IS_ERR(reloc_inode));
5636
5637 __alloc_chunk_for_shrink(root, block_group, 1);
5638 set_block_group_readonly(block_group);
5639
5640 btrfs_start_delalloc_inodes(info->tree_root);
5641 btrfs_wait_ordered_extents(info->tree_root, 0);
5642again:
5643 skipped = 0;
5644 total_found = 0;
5645 progress = 0;
5646 key.objectid = block_group->key.objectid;
5647 key.offset = 0;
5648 key.type = 0;
5649 cur_byte = key.objectid;
5650
5651 trans = btrfs_start_transaction(info->tree_root, 1);
5652 btrfs_commit_transaction(trans, info->tree_root);
5653
5654 mutex_lock(&root->fs_info->cleaner_mutex);
5655 btrfs_clean_old_snapshots(info->tree_root);
5656 btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1);
5657 mutex_unlock(&root->fs_info->cleaner_mutex);
5658
5659 while (1) {
5660 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5661 if (ret < 0)
5662 goto out;
5663next:
5664 leaf = path->nodes[0];
5665 nritems = btrfs_header_nritems(leaf);
5666 if (path->slots[0] >= nritems) {
5667 ret = btrfs_next_leaf(root, path);
5668 if (ret < 0)
5669 goto out;
5670 if (ret == 1) {
5671 ret = 0;
5672 break;
5673 }
5674 leaf = path->nodes[0];
5675 nritems = btrfs_header_nritems(leaf);
5676 }
5677
5678 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5679
5680 if (key.objectid >= block_group->key.objectid +
5681 block_group->key.offset)
5682 break;
5683
5684 if (progress && need_resched()) {
5685 btrfs_release_path(root, path);
5686 cond_resched();
5687 progress = 0;
5688 continue;
5689 }
5690 progress = 1;
5691
5692 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY ||
5693 key.objectid + key.offset <= cur_byte) {
5694 path->slots[0]++;
5695 goto next;
5696 }
5697
5698 total_found++;
5699 cur_byte = key.objectid + key.offset;
5700 btrfs_release_path(root, path);
5701
5702 __alloc_chunk_for_shrink(root, block_group, 0);
5703 ret = relocate_one_extent(root, path, &key, block_group,
5704 reloc_inode, pass);
5705 BUG_ON(ret < 0);
5706 if (ret > 0)
5707 skipped++;
5708
5709 key.objectid = cur_byte;
5710 key.type = 0;
5711 key.offset = 0;
5712 }
5713
5714 btrfs_release_path(root, path);
5715
5716 if (pass == 0) {
5717 btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1);
5718 invalidate_mapping_pages(reloc_inode->i_mapping, 0, -1);
5719 }
5720
5721 if (total_found > 0) {
5722 printk(KERN_INFO "btrfs found %llu extents in pass %d\n",
5723 (unsigned long long)total_found, pass);
5724 pass++;
5725 if (total_found == skipped && pass > 2) {
5726 iput(reloc_inode);
5727 reloc_inode = create_reloc_inode(info, block_group);
5728 pass = 0;
5729 }
5730 goto again;
5731 }
5732
5733 /* delete reloc_inode */
5734 iput(reloc_inode);
5735
5736 /* unpin extents in this range */
5737 trans = btrfs_start_transaction(info->tree_root, 1);
5738 btrfs_commit_transaction(trans, info->tree_root);
5739
5740 spin_lock(&block_group->lock);
5741 WARN_ON(block_group->pinned > 0);
5742 WARN_ON(block_group->reserved > 0);
5743 WARN_ON(btrfs_block_group_used(&block_group->item) > 0);
5744 spin_unlock(&block_group->lock);
5745 put_block_group(block_group);
5746 ret = 0;
5747out:
5748 btrfs_free_path(path);
5749 return ret;
5750}
5751
5752static int find_first_block_group(struct btrfs_root *root,
5753 struct btrfs_path *path, struct btrfs_key *key)
5754{
5755 int ret = 0;
5756 struct btrfs_key found_key;
5757 struct extent_buffer *leaf;
5758 int slot;
5759
5760 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
5761 if (ret < 0)
5762 goto out;
5763
5764 while (1) {
5765 slot = path->slots[0];
5766 leaf = path->nodes[0];
5767 if (slot >= btrfs_header_nritems(leaf)) {
5768 ret = btrfs_next_leaf(root, path);
5769 if (ret == 0)
5770 continue;
5771 if (ret < 0)
5772 goto out;
5773 break;
5774 }
5775 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5776
5777 if (found_key.objectid >= key->objectid &&
5778 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
5779 ret = 0;
5780 goto out;
5781 }
5782 path->slots[0]++;
5783 }
5784 ret = -ENOENT;
5785out:
5786 return ret;
5787}
5788
5789int btrfs_free_block_groups(struct btrfs_fs_info *info)
5790{
5791 struct btrfs_block_group_cache *block_group;
5792 struct rb_node *n;
5793
5794 spin_lock(&info->block_group_cache_lock);
5795 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
5796 block_group = rb_entry(n, struct btrfs_block_group_cache,
5797 cache_node);
5798 rb_erase(&block_group->cache_node,
5799 &info->block_group_cache_tree);
5800 spin_unlock(&info->block_group_cache_lock);
5801
5802 btrfs_remove_free_space_cache(block_group);
5803 down_write(&block_group->space_info->groups_sem);
5804 list_del(&block_group->list);
5805 up_write(&block_group->space_info->groups_sem);
5806
5807 WARN_ON(atomic_read(&block_group->count) != 1);
5808 kfree(block_group);
5809
5810 spin_lock(&info->block_group_cache_lock);
5811 }
5812 spin_unlock(&info->block_group_cache_lock);
5813 return 0;
5814}
5815
5816int btrfs_read_block_groups(struct btrfs_root *root)
5817{
5818 struct btrfs_path *path;
5819 int ret;
5820 struct btrfs_block_group_cache *cache;
5821 struct btrfs_fs_info *info = root->fs_info;
5822 struct btrfs_space_info *space_info;
5823 struct btrfs_key key;
5824 struct btrfs_key found_key;
5825 struct extent_buffer *leaf;
5826
5827 root = info->extent_root;
5828 key.objectid = 0;
5829 key.offset = 0;
5830 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
5831 path = btrfs_alloc_path();
5832 if (!path)
5833 return -ENOMEM;
5834
5835 while (1) {
5836 ret = find_first_block_group(root, path, &key);
5837 if (ret > 0) {
5838 ret = 0;
5839 goto error;
5840 }
5841 if (ret != 0)
5842 goto error;
5843
5844 leaf = path->nodes[0];
5845 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5846 cache = kzalloc(sizeof(*cache), GFP_NOFS);
5847 if (!cache) {
5848 ret = -ENOMEM;
5849 break;
5850 }
5851
5852 atomic_set(&cache->count, 1);
5853 spin_lock_init(&cache->lock);
5854 mutex_init(&cache->alloc_mutex);
5855 mutex_init(&cache->cache_mutex);
5856 INIT_LIST_HEAD(&cache->list);
5857 read_extent_buffer(leaf, &cache->item,
5858 btrfs_item_ptr_offset(leaf, path->slots[0]),
5859 sizeof(cache->item));
5860 memcpy(&cache->key, &found_key, sizeof(found_key));
5861
5862 key.objectid = found_key.objectid + found_key.offset;
5863 btrfs_release_path(root, path);
5864 cache->flags = btrfs_block_group_flags(&cache->item);
5865
5866 ret = update_space_info(info, cache->flags, found_key.offset,
5867 btrfs_block_group_used(&cache->item),
5868 &space_info);
5869 BUG_ON(ret);
5870 cache->space_info = space_info;
5871 down_write(&space_info->groups_sem);
5872 list_add_tail(&cache->list, &space_info->block_groups);
5873 up_write(&space_info->groups_sem);
5874
5875 ret = btrfs_add_block_group_cache(root->fs_info, cache);
5876 BUG_ON(ret);
5877
5878 set_avail_alloc_bits(root->fs_info, cache->flags);
5879 if (btrfs_chunk_readonly(root, cache->key.objectid))
5880 set_block_group_readonly(cache);
5881 }
5882 ret = 0;
5883error:
5884 btrfs_free_path(path);
5885 return ret;
5886}
5887
5888int btrfs_make_block_group(struct btrfs_trans_handle *trans,
5889 struct btrfs_root *root, u64 bytes_used,
5890 u64 type, u64 chunk_objectid, u64 chunk_offset,
5891 u64 size)
5892{
5893 int ret;
5894 struct btrfs_root *extent_root;
5895 struct btrfs_block_group_cache *cache;
5896
5897 extent_root = root->fs_info->extent_root;
5898
5899 root->fs_info->last_trans_new_blockgroup = trans->transid;
5900
5901 cache = kzalloc(sizeof(*cache), GFP_NOFS);
5902 if (!cache)
5903 return -ENOMEM;
5904
5905 cache->key.objectid = chunk_offset;
5906 cache->key.offset = size;
5907 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
5908 atomic_set(&cache->count, 1);
5909 spin_lock_init(&cache->lock);
5910 mutex_init(&cache->alloc_mutex);
5911 mutex_init(&cache->cache_mutex);
5912 INIT_LIST_HEAD(&cache->list);
5913
5914 btrfs_set_block_group_used(&cache->item, bytes_used);
5915 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
5916 cache->flags = type;
5917 btrfs_set_block_group_flags(&cache->item, type);
5918
5919 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
5920 &cache->space_info);
5921 BUG_ON(ret);
5922 down_write(&cache->space_info->groups_sem);
5923 list_add_tail(&cache->list, &cache->space_info->block_groups);
5924 up_write(&cache->space_info->groups_sem);
5925
5926 ret = btrfs_add_block_group_cache(root->fs_info, cache);
5927 BUG_ON(ret);
5928
5929 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
5930 sizeof(cache->item));
5931 BUG_ON(ret);
5932
5933 finish_current_insert(trans, extent_root, 0);
5934 ret = del_pending_extents(trans, extent_root, 0);
5935 BUG_ON(ret);
5936 set_avail_alloc_bits(extent_root->fs_info, type);
5937
5938 return 0;
5939}
5940
5941int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
5942 struct btrfs_root *root, u64 group_start)
5943{
5944 struct btrfs_path *path;
5945 struct btrfs_block_group_cache *block_group;
5946 struct btrfs_key key;
5947 int ret;
5948
5949 root = root->fs_info->extent_root;
5950
5951 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
5952 BUG_ON(!block_group);
5953 BUG_ON(!block_group->ro);
5954
5955 memcpy(&key, &block_group->key, sizeof(key));
5956
5957 path = btrfs_alloc_path();
5958 BUG_ON(!path);
5959
5960 btrfs_remove_free_space_cache(block_group);
5961 rb_erase(&block_group->cache_node,
5962 &root->fs_info->block_group_cache_tree);
5963 down_write(&block_group->space_info->groups_sem);
5964 list_del(&block_group->list);
5965 up_write(&block_group->space_info->groups_sem);
5966
5967 spin_lock(&block_group->space_info->lock);
5968 block_group->space_info->total_bytes -= block_group->key.offset;
5969 block_group->space_info->bytes_readonly -= block_group->key.offset;
5970 spin_unlock(&block_group->space_info->lock);
5971 block_group->space_info->full = 0;
5972
5973 put_block_group(block_group);
5974 put_block_group(block_group);
5975
5976 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
5977 if (ret > 0)
5978 ret = -EIO;
5979 if (ret < 0)
5980 goto out;
5981
5982 ret = btrfs_del_item(trans, root, path);
5983out:
5984 btrfs_free_path(path);
5985 return ret;
5986}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
new file mode 100644
index 000000000000..e086d407f1fa
--- /dev/null
+++ b/fs/btrfs/extent_io.c
@@ -0,0 +1,3717 @@
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/gfp.h>
6#include <linux/pagemap.h>
7#include <linux/page-flags.h>
8#include <linux/module.h>
9#include <linux/spinlock.h>
10#include <linux/blkdev.h>
11#include <linux/swap.h>
12#include <linux/version.h>
13#include <linux/writeback.h>
14#include <linux/pagevec.h>
15#include "extent_io.h"
16#include "extent_map.h"
17#include "compat.h"
18#include "ctree.h"
19#include "btrfs_inode.h"
20
21/* temporary define until extent_map moves out of btrfs */
22struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
23 unsigned long extra_flags,
24 void (*ctor)(void *, struct kmem_cache *,
25 unsigned long));
26
27static struct kmem_cache *extent_state_cache;
28static struct kmem_cache *extent_buffer_cache;
29
30static LIST_HEAD(buffers);
31static LIST_HEAD(states);
32
33#define LEAK_DEBUG 0
34#ifdef LEAK_DEBUG
35static DEFINE_SPINLOCK(leak_lock);
36#endif
37
38#define BUFFER_LRU_MAX 64
39
40struct tree_entry {
41 u64 start;
42 u64 end;
43 struct rb_node rb_node;
44};
45
46struct extent_page_data {
47 struct bio *bio;
48 struct extent_io_tree *tree;
49 get_extent_t *get_extent;
50
51 /* tells writepage not to lock the state bits for this range
52 * it still does the unlocking
53 */
54 int extent_locked;
55};
56
57int __init extent_io_init(void)
58{
59 extent_state_cache = btrfs_cache_create("extent_state",
60 sizeof(struct extent_state), 0,
61 NULL);
62 if (!extent_state_cache)
63 return -ENOMEM;
64
65 extent_buffer_cache = btrfs_cache_create("extent_buffers",
66 sizeof(struct extent_buffer), 0,
67 NULL);
68 if (!extent_buffer_cache)
69 goto free_state_cache;
70 return 0;
71
72free_state_cache:
73 kmem_cache_destroy(extent_state_cache);
74 return -ENOMEM;
75}
76
77void extent_io_exit(void)
78{
79 struct extent_state *state;
80 struct extent_buffer *eb;
81
82 while (!list_empty(&states)) {
83 state = list_entry(states.next, struct extent_state, leak_list);
84 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
85 "state %lu in tree %p refs %d\n",
86 (unsigned long long)state->start,
87 (unsigned long long)state->end,
88 state->state, state->tree, atomic_read(&state->refs));
89 list_del(&state->leak_list);
90 kmem_cache_free(extent_state_cache, state);
91
92 }
93
94 while (!list_empty(&buffers)) {
95 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
96 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
97 "refs %d\n", (unsigned long long)eb->start,
98 eb->len, atomic_read(&eb->refs));
99 list_del(&eb->leak_list);
100 kmem_cache_free(extent_buffer_cache, eb);
101 }
102 if (extent_state_cache)
103 kmem_cache_destroy(extent_state_cache);
104 if (extent_buffer_cache)
105 kmem_cache_destroy(extent_buffer_cache);
106}
107
108void extent_io_tree_init(struct extent_io_tree *tree,
109 struct address_space *mapping, gfp_t mask)
110{
111 tree->state.rb_node = NULL;
112 tree->buffer.rb_node = NULL;
113 tree->ops = NULL;
114 tree->dirty_bytes = 0;
115 spin_lock_init(&tree->lock);
116 spin_lock_init(&tree->buffer_lock);
117 tree->mapping = mapping;
118}
119
120static struct extent_state *alloc_extent_state(gfp_t mask)
121{
122 struct extent_state *state;
123#ifdef LEAK_DEBUG
124 unsigned long flags;
125#endif
126
127 state = kmem_cache_alloc(extent_state_cache, mask);
128 if (!state)
129 return state;
130 state->state = 0;
131 state->private = 0;
132 state->tree = NULL;
133#ifdef LEAK_DEBUG
134 spin_lock_irqsave(&leak_lock, flags);
135 list_add(&state->leak_list, &states);
136 spin_unlock_irqrestore(&leak_lock, flags);
137#endif
138 atomic_set(&state->refs, 1);
139 init_waitqueue_head(&state->wq);
140 return state;
141}
142
143static void free_extent_state(struct extent_state *state)
144{
145 if (!state)
146 return;
147 if (atomic_dec_and_test(&state->refs)) {
148#ifdef LEAK_DEBUG
149 unsigned long flags;
150#endif
151 WARN_ON(state->tree);
152#ifdef LEAK_DEBUG
153 spin_lock_irqsave(&leak_lock, flags);
154 list_del(&state->leak_list);
155 spin_unlock_irqrestore(&leak_lock, flags);
156#endif
157 kmem_cache_free(extent_state_cache, state);
158 }
159}
160
161static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
162 struct rb_node *node)
163{
164 struct rb_node **p = &root->rb_node;
165 struct rb_node *parent = NULL;
166 struct tree_entry *entry;
167
168 while (*p) {
169 parent = *p;
170 entry = rb_entry(parent, struct tree_entry, rb_node);
171
172 if (offset < entry->start)
173 p = &(*p)->rb_left;
174 else if (offset > entry->end)
175 p = &(*p)->rb_right;
176 else
177 return parent;
178 }
179
180 entry = rb_entry(node, struct tree_entry, rb_node);
181 rb_link_node(node, parent, p);
182 rb_insert_color(node, root);
183 return NULL;
184}
185
186static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
187 struct rb_node **prev_ret,
188 struct rb_node **next_ret)
189{
190 struct rb_root *root = &tree->state;
191 struct rb_node *n = root->rb_node;
192 struct rb_node *prev = NULL;
193 struct rb_node *orig_prev = NULL;
194 struct tree_entry *entry;
195 struct tree_entry *prev_entry = NULL;
196
197 while (n) {
198 entry = rb_entry(n, struct tree_entry, rb_node);
199 prev = n;
200 prev_entry = entry;
201
202 if (offset < entry->start)
203 n = n->rb_left;
204 else if (offset > entry->end)
205 n = n->rb_right;
206 else
207 return n;
208 }
209
210 if (prev_ret) {
211 orig_prev = prev;
212 while (prev && offset > prev_entry->end) {
213 prev = rb_next(prev);
214 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
215 }
216 *prev_ret = prev;
217 prev = orig_prev;
218 }
219
220 if (next_ret) {
221 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
222 while (prev && offset < prev_entry->start) {
223 prev = rb_prev(prev);
224 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
225 }
226 *next_ret = prev;
227 }
228 return NULL;
229}
230
231static inline struct rb_node *tree_search(struct extent_io_tree *tree,
232 u64 offset)
233{
234 struct rb_node *prev = NULL;
235 struct rb_node *ret;
236
237 ret = __etree_search(tree, offset, &prev, NULL);
238 if (!ret)
239 return prev;
240 return ret;
241}
242
243static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
244 u64 offset, struct rb_node *node)
245{
246 struct rb_root *root = &tree->buffer;
247 struct rb_node **p = &root->rb_node;
248 struct rb_node *parent = NULL;
249 struct extent_buffer *eb;
250
251 while (*p) {
252 parent = *p;
253 eb = rb_entry(parent, struct extent_buffer, rb_node);
254
255 if (offset < eb->start)
256 p = &(*p)->rb_left;
257 else if (offset > eb->start)
258 p = &(*p)->rb_right;
259 else
260 return eb;
261 }
262
263 rb_link_node(node, parent, p);
264 rb_insert_color(node, root);
265 return NULL;
266}
267
268static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
269 u64 offset)
270{
271 struct rb_root *root = &tree->buffer;
272 struct rb_node *n = root->rb_node;
273 struct extent_buffer *eb;
274
275 while (n) {
276 eb = rb_entry(n, struct extent_buffer, rb_node);
277 if (offset < eb->start)
278 n = n->rb_left;
279 else if (offset > eb->start)
280 n = n->rb_right;
281 else
282 return eb;
283 }
284 return NULL;
285}
286
287/*
288 * utility function to look for merge candidates inside a given range.
289 * Any extents with matching state are merged together into a single
290 * extent in the tree. Extents with EXTENT_IO in their state field
291 * are not merged because the end_io handlers need to be able to do
292 * operations on them without sleeping (or doing allocations/splits).
293 *
294 * This should be called with the tree lock held.
295 */
296static int merge_state(struct extent_io_tree *tree,
297 struct extent_state *state)
298{
299 struct extent_state *other;
300 struct rb_node *other_node;
301
302 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
303 return 0;
304
305 other_node = rb_prev(&state->rb_node);
306 if (other_node) {
307 other = rb_entry(other_node, struct extent_state, rb_node);
308 if (other->end == state->start - 1 &&
309 other->state == state->state) {
310 state->start = other->start;
311 other->tree = NULL;
312 rb_erase(&other->rb_node, &tree->state);
313 free_extent_state(other);
314 }
315 }
316 other_node = rb_next(&state->rb_node);
317 if (other_node) {
318 other = rb_entry(other_node, struct extent_state, rb_node);
319 if (other->start == state->end + 1 &&
320 other->state == state->state) {
321 other->start = state->start;
322 state->tree = NULL;
323 rb_erase(&state->rb_node, &tree->state);
324 free_extent_state(state);
325 }
326 }
327 return 0;
328}
329
330static void set_state_cb(struct extent_io_tree *tree,
331 struct extent_state *state,
332 unsigned long bits)
333{
334 if (tree->ops && tree->ops->set_bit_hook) {
335 tree->ops->set_bit_hook(tree->mapping->host, state->start,
336 state->end, state->state, bits);
337 }
338}
339
340static void clear_state_cb(struct extent_io_tree *tree,
341 struct extent_state *state,
342 unsigned long bits)
343{
344 if (tree->ops && tree->ops->clear_bit_hook) {
345 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
346 state->end, state->state, bits);
347 }
348}
349
350/*
351 * insert an extent_state struct into the tree. 'bits' are set on the
352 * struct before it is inserted.
353 *
354 * This may return -EEXIST if the extent is already there, in which case the
355 * state struct is freed.
356 *
357 * The tree lock is not taken internally. This is a utility function and
358 * probably isn't what you want to call (see set/clear_extent_bit).
359 */
360static int insert_state(struct extent_io_tree *tree,
361 struct extent_state *state, u64 start, u64 end,
362 int bits)
363{
364 struct rb_node *node;
365
366 if (end < start) {
367 printk(KERN_ERR "btrfs end < start %llu %llu\n",
368 (unsigned long long)end,
369 (unsigned long long)start);
370 WARN_ON(1);
371 }
372 if (bits & EXTENT_DIRTY)
373 tree->dirty_bytes += end - start + 1;
374 set_state_cb(tree, state, bits);
375 state->state |= bits;
376 state->start = start;
377 state->end = end;
378 node = tree_insert(&tree->state, end, &state->rb_node);
379 if (node) {
380 struct extent_state *found;
381 found = rb_entry(node, struct extent_state, rb_node);
382 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
383 "%llu %llu\n", (unsigned long long)found->start,
384 (unsigned long long)found->end,
385 (unsigned long long)start, (unsigned long long)end);
386 free_extent_state(state);
387 return -EEXIST;
388 }
389 state->tree = tree;
390 merge_state(tree, state);
391 return 0;
392}
393
394/*
395 * split a given extent state struct in two, inserting the preallocated
396 * struct 'prealloc' as the newly created second half. 'split' indicates an
397 * offset inside 'orig' where it should be split.
398 *
399 * Before calling,
400 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
401 * are two extent state structs in the tree:
402 * prealloc: [orig->start, split - 1]
403 * orig: [ split, orig->end ]
404 *
405 * The tree locks are not taken by this function. They need to be held
406 * by the caller.
407 */
408static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
409 struct extent_state *prealloc, u64 split)
410{
411 struct rb_node *node;
412 prealloc->start = orig->start;
413 prealloc->end = split - 1;
414 prealloc->state = orig->state;
415 orig->start = split;
416
417 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
418 if (node) {
419 struct extent_state *found;
420 found = rb_entry(node, struct extent_state, rb_node);
421 free_extent_state(prealloc);
422 return -EEXIST;
423 }
424 prealloc->tree = tree;
425 return 0;
426}
427
428/*
429 * utility function to clear some bits in an extent state struct.
430 * it will optionally wake up any one waiting on this state (wake == 1), or
431 * forcibly remove the state from the tree (delete == 1).
432 *
433 * If no bits are set on the state struct after clearing things, the
434 * struct is freed and removed from the tree
435 */
436static int clear_state_bit(struct extent_io_tree *tree,
437 struct extent_state *state, int bits, int wake,
438 int delete)
439{
440 int ret = state->state & bits;
441
442 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
443 u64 range = state->end - state->start + 1;
444 WARN_ON(range > tree->dirty_bytes);
445 tree->dirty_bytes -= range;
446 }
447 clear_state_cb(tree, state, bits);
448 state->state &= ~bits;
449 if (wake)
450 wake_up(&state->wq);
451 if (delete || state->state == 0) {
452 if (state->tree) {
453 clear_state_cb(tree, state, state->state);
454 rb_erase(&state->rb_node, &tree->state);
455 state->tree = NULL;
456 free_extent_state(state);
457 } else {
458 WARN_ON(1);
459 }
460 } else {
461 merge_state(tree, state);
462 }
463 return ret;
464}
465
466/*
467 * clear some bits on a range in the tree. This may require splitting
468 * or inserting elements in the tree, so the gfp mask is used to
469 * indicate which allocations or sleeping are allowed.
470 *
471 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
472 * the given range from the tree regardless of state (ie for truncate).
473 *
474 * the range [start, end] is inclusive.
475 *
476 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
477 * bits were already set, or zero if none of the bits were already set.
478 */
479int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
480 int bits, int wake, int delete, gfp_t mask)
481{
482 struct extent_state *state;
483 struct extent_state *prealloc = NULL;
484 struct rb_node *node;
485 int err;
486 int set = 0;
487
488again:
489 if (!prealloc && (mask & __GFP_WAIT)) {
490 prealloc = alloc_extent_state(mask);
491 if (!prealloc)
492 return -ENOMEM;
493 }
494
495 spin_lock(&tree->lock);
496 /*
497 * this search will find the extents that end after
498 * our range starts
499 */
500 node = tree_search(tree, start);
501 if (!node)
502 goto out;
503 state = rb_entry(node, struct extent_state, rb_node);
504 if (state->start > end)
505 goto out;
506 WARN_ON(state->end < start);
507
508 /*
509 * | ---- desired range ---- |
510 * | state | or
511 * | ------------- state -------------- |
512 *
513 * We need to split the extent we found, and may flip
514 * bits on second half.
515 *
516 * If the extent we found extends past our range, we
517 * just split and search again. It'll get split again
518 * the next time though.
519 *
520 * If the extent we found is inside our range, we clear
521 * the desired bit on it.
522 */
523
524 if (state->start < start) {
525 if (!prealloc)
526 prealloc = alloc_extent_state(GFP_ATOMIC);
527 err = split_state(tree, state, prealloc, start);
528 BUG_ON(err == -EEXIST);
529 prealloc = NULL;
530 if (err)
531 goto out;
532 if (state->end <= end) {
533 start = state->end + 1;
534 set |= clear_state_bit(tree, state, bits,
535 wake, delete);
536 } else {
537 start = state->start;
538 }
539 goto search_again;
540 }
541 /*
542 * | ---- desired range ---- |
543 * | state |
544 * We need to split the extent, and clear the bit
545 * on the first half
546 */
547 if (state->start <= end && state->end > end) {
548 if (!prealloc)
549 prealloc = alloc_extent_state(GFP_ATOMIC);
550 err = split_state(tree, state, prealloc, end + 1);
551 BUG_ON(err == -EEXIST);
552
553 if (wake)
554 wake_up(&state->wq);
555 set |= clear_state_bit(tree, prealloc, bits,
556 wake, delete);
557 prealloc = NULL;
558 goto out;
559 }
560
561 start = state->end + 1;
562 set |= clear_state_bit(tree, state, bits, wake, delete);
563 goto search_again;
564
565out:
566 spin_unlock(&tree->lock);
567 if (prealloc)
568 free_extent_state(prealloc);
569
570 return set;
571
572search_again:
573 if (start > end)
574 goto out;
575 spin_unlock(&tree->lock);
576 if (mask & __GFP_WAIT)
577 cond_resched();
578 goto again;
579}
580
581static int wait_on_state(struct extent_io_tree *tree,
582 struct extent_state *state)
583 __releases(tree->lock)
584 __acquires(tree->lock)
585{
586 DEFINE_WAIT(wait);
587 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
588 spin_unlock(&tree->lock);
589 schedule();
590 spin_lock(&tree->lock);
591 finish_wait(&state->wq, &wait);
592 return 0;
593}
594
595/*
596 * waits for one or more bits to clear on a range in the state tree.
597 * The range [start, end] is inclusive.
598 * The tree lock is taken by this function
599 */
600int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
601{
602 struct extent_state *state;
603 struct rb_node *node;
604
605 spin_lock(&tree->lock);
606again:
607 while (1) {
608 /*
609 * this search will find all the extents that end after
610 * our range starts
611 */
612 node = tree_search(tree, start);
613 if (!node)
614 break;
615
616 state = rb_entry(node, struct extent_state, rb_node);
617
618 if (state->start > end)
619 goto out;
620
621 if (state->state & bits) {
622 start = state->start;
623 atomic_inc(&state->refs);
624 wait_on_state(tree, state);
625 free_extent_state(state);
626 goto again;
627 }
628 start = state->end + 1;
629
630 if (start > end)
631 break;
632
633 if (need_resched()) {
634 spin_unlock(&tree->lock);
635 cond_resched();
636 spin_lock(&tree->lock);
637 }
638 }
639out:
640 spin_unlock(&tree->lock);
641 return 0;
642}
643
644static void set_state_bits(struct extent_io_tree *tree,
645 struct extent_state *state,
646 int bits)
647{
648 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
649 u64 range = state->end - state->start + 1;
650 tree->dirty_bytes += range;
651 }
652 set_state_cb(tree, state, bits);
653 state->state |= bits;
654}
655
656/*
657 * set some bits on a range in the tree. This may require allocations
658 * or sleeping, so the gfp mask is used to indicate what is allowed.
659 *
660 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
661 * range already has the desired bits set. The start of the existing
662 * range is returned in failed_start in this case.
663 *
664 * [start, end] is inclusive
665 * This takes the tree lock.
666 */
667static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
668 int bits, int exclusive, u64 *failed_start,
669 gfp_t mask)
670{
671 struct extent_state *state;
672 struct extent_state *prealloc = NULL;
673 struct rb_node *node;
674 int err = 0;
675 int set;
676 u64 last_start;
677 u64 last_end;
678again:
679 if (!prealloc && (mask & __GFP_WAIT)) {
680 prealloc = alloc_extent_state(mask);
681 if (!prealloc)
682 return -ENOMEM;
683 }
684
685 spin_lock(&tree->lock);
686 /*
687 * this search will find all the extents that end after
688 * our range starts.
689 */
690 node = tree_search(tree, start);
691 if (!node) {
692 err = insert_state(tree, prealloc, start, end, bits);
693 prealloc = NULL;
694 BUG_ON(err == -EEXIST);
695 goto out;
696 }
697
698 state = rb_entry(node, struct extent_state, rb_node);
699 last_start = state->start;
700 last_end = state->end;
701
702 /*
703 * | ---- desired range ---- |
704 * | state |
705 *
706 * Just lock what we found and keep going
707 */
708 if (state->start == start && state->end <= end) {
709 set = state->state & bits;
710 if (set && exclusive) {
711 *failed_start = state->start;
712 err = -EEXIST;
713 goto out;
714 }
715 set_state_bits(tree, state, bits);
716 start = state->end + 1;
717 merge_state(tree, state);
718 goto search_again;
719 }
720
721 /*
722 * | ---- desired range ---- |
723 * | state |
724 * or
725 * | ------------- state -------------- |
726 *
727 * We need to split the extent we found, and may flip bits on
728 * second half.
729 *
730 * If the extent we found extends past our
731 * range, we just split and search again. It'll get split
732 * again the next time though.
733 *
734 * If the extent we found is inside our range, we set the
735 * desired bit on it.
736 */
737 if (state->start < start) {
738 set = state->state & bits;
739 if (exclusive && set) {
740 *failed_start = start;
741 err = -EEXIST;
742 goto out;
743 }
744 err = split_state(tree, state, prealloc, start);
745 BUG_ON(err == -EEXIST);
746 prealloc = NULL;
747 if (err)
748 goto out;
749 if (state->end <= end) {
750 set_state_bits(tree, state, bits);
751 start = state->end + 1;
752 merge_state(tree, state);
753 } else {
754 start = state->start;
755 }
756 goto search_again;
757 }
758 /*
759 * | ---- desired range ---- |
760 * | state | or | state |
761 *
762 * There's a hole, we need to insert something in it and
763 * ignore the extent we found.
764 */
765 if (state->start > start) {
766 u64 this_end;
767 if (end < last_start)
768 this_end = end;
769 else
770 this_end = last_start - 1;
771 err = insert_state(tree, prealloc, start, this_end,
772 bits);
773 prealloc = NULL;
774 BUG_ON(err == -EEXIST);
775 if (err)
776 goto out;
777 start = this_end + 1;
778 goto search_again;
779 }
780 /*
781 * | ---- desired range ---- |
782 * | state |
783 * We need to split the extent, and set the bit
784 * on the first half
785 */
786 if (state->start <= end && state->end > end) {
787 set = state->state & bits;
788 if (exclusive && set) {
789 *failed_start = start;
790 err = -EEXIST;
791 goto out;
792 }
793 err = split_state(tree, state, prealloc, end + 1);
794 BUG_ON(err == -EEXIST);
795
796 set_state_bits(tree, prealloc, bits);
797 merge_state(tree, prealloc);
798 prealloc = NULL;
799 goto out;
800 }
801
802 goto search_again;
803
804out:
805 spin_unlock(&tree->lock);
806 if (prealloc)
807 free_extent_state(prealloc);
808
809 return err;
810
811search_again:
812 if (start > end)
813 goto out;
814 spin_unlock(&tree->lock);
815 if (mask & __GFP_WAIT)
816 cond_resched();
817 goto again;
818}
819
820/* wrappers around set/clear extent bit */
821int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
822 gfp_t mask)
823{
824 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
825 mask);
826}
827
828int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
829 gfp_t mask)
830{
831 return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
832}
833
834int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
835 int bits, gfp_t mask)
836{
837 return set_extent_bit(tree, start, end, bits, 0, NULL,
838 mask);
839}
840
841int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
842 int bits, gfp_t mask)
843{
844 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
845}
846
847int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
848 gfp_t mask)
849{
850 return set_extent_bit(tree, start, end,
851 EXTENT_DELALLOC | EXTENT_DIRTY,
852 0, NULL, mask);
853}
854
855int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
856 gfp_t mask)
857{
858 return clear_extent_bit(tree, start, end,
859 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
860}
861
862int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
863 gfp_t mask)
864{
865 return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
866}
867
868int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
869 gfp_t mask)
870{
871 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
872 mask);
873}
874
875static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
876 gfp_t mask)
877{
878 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
879}
880
881int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
882 gfp_t mask)
883{
884 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
885 mask);
886}
887
888static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
889 u64 end, gfp_t mask)
890{
891 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
892}
893
894static int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
895 gfp_t mask)
896{
897 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
898 0, NULL, mask);
899}
900
901static int clear_extent_writeback(struct extent_io_tree *tree, u64 start,
902 u64 end, gfp_t mask)
903{
904 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
905}
906
907int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
908{
909 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
910}
911
912/*
913 * either insert or lock state struct between start and end use mask to tell
914 * us if waiting is desired.
915 */
916int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
917{
918 int err;
919 u64 failed_start;
920 while (1) {
921 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
922 &failed_start, mask);
923 if (err == -EEXIST && (mask & __GFP_WAIT)) {
924 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
925 start = failed_start;
926 } else {
927 break;
928 }
929 WARN_ON(start > end);
930 }
931 return err;
932}
933
934int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
935 gfp_t mask)
936{
937 int err;
938 u64 failed_start;
939
940 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
941 &failed_start, mask);
942 if (err == -EEXIST) {
943 if (failed_start > start)
944 clear_extent_bit(tree, start, failed_start - 1,
945 EXTENT_LOCKED, 1, 0, mask);
946 return 0;
947 }
948 return 1;
949}
950
951int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
952 gfp_t mask)
953{
954 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
955}
956
957/*
958 * helper function to set pages and extents in the tree dirty
959 */
960int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
961{
962 unsigned long index = start >> PAGE_CACHE_SHIFT;
963 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
964 struct page *page;
965
966 while (index <= end_index) {
967 page = find_get_page(tree->mapping, index);
968 BUG_ON(!page);
969 __set_page_dirty_nobuffers(page);
970 page_cache_release(page);
971 index++;
972 }
973 set_extent_dirty(tree, start, end, GFP_NOFS);
974 return 0;
975}
976
977/*
978 * helper function to set both pages and extents in the tree writeback
979 */
980static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
981{
982 unsigned long index = start >> PAGE_CACHE_SHIFT;
983 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
984 struct page *page;
985
986 while (index <= end_index) {
987 page = find_get_page(tree->mapping, index);
988 BUG_ON(!page);
989 set_page_writeback(page);
990 page_cache_release(page);
991 index++;
992 }
993 set_extent_writeback(tree, start, end, GFP_NOFS);
994 return 0;
995}
996
997/*
998 * find the first offset in the io tree with 'bits' set. zero is
999 * returned if we find something, and *start_ret and *end_ret are
1000 * set to reflect the state struct that was found.
1001 *
1002 * If nothing was found, 1 is returned, < 0 on error
1003 */
1004int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1005 u64 *start_ret, u64 *end_ret, int bits)
1006{
1007 struct rb_node *node;
1008 struct extent_state *state;
1009 int ret = 1;
1010
1011 spin_lock(&tree->lock);
1012 /*
1013 * this search will find all the extents that end after
1014 * our range starts.
1015 */
1016 node = tree_search(tree, start);
1017 if (!node)
1018 goto out;
1019
1020 while (1) {
1021 state = rb_entry(node, struct extent_state, rb_node);
1022 if (state->end >= start && (state->state & bits)) {
1023 *start_ret = state->start;
1024 *end_ret = state->end;
1025 ret = 0;
1026 break;
1027 }
1028 node = rb_next(node);
1029 if (!node)
1030 break;
1031 }
1032out:
1033 spin_unlock(&tree->lock);
1034 return ret;
1035}
1036
1037/* find the first state struct with 'bits' set after 'start', and
1038 * return it. tree->lock must be held. NULL will returned if
1039 * nothing was found after 'start'
1040 */
1041struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1042 u64 start, int bits)
1043{
1044 struct rb_node *node;
1045 struct extent_state *state;
1046
1047 /*
1048 * this search will find all the extents that end after
1049 * our range starts.
1050 */
1051 node = tree_search(tree, start);
1052 if (!node)
1053 goto out;
1054
1055 while (1) {
1056 state = rb_entry(node, struct extent_state, rb_node);
1057 if (state->end >= start && (state->state & bits))
1058 return state;
1059
1060 node = rb_next(node);
1061 if (!node)
1062 break;
1063 }
1064out:
1065 return NULL;
1066}
1067
1068/*
1069 * find a contiguous range of bytes in the file marked as delalloc, not
1070 * more than 'max_bytes'. start and end are used to return the range,
1071 *
1072 * 1 is returned if we find something, 0 if nothing was in the tree
1073 */
1074static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1075 u64 *start, u64 *end, u64 max_bytes)
1076{
1077 struct rb_node *node;
1078 struct extent_state *state;
1079 u64 cur_start = *start;
1080 u64 found = 0;
1081 u64 total_bytes = 0;
1082
1083 spin_lock(&tree->lock);
1084
1085 /*
1086 * this search will find all the extents that end after
1087 * our range starts.
1088 */
1089 node = tree_search(tree, cur_start);
1090 if (!node) {
1091 if (!found)
1092 *end = (u64)-1;
1093 goto out;
1094 }
1095
1096 while (1) {
1097 state = rb_entry(node, struct extent_state, rb_node);
1098 if (found && (state->start != cur_start ||
1099 (state->state & EXTENT_BOUNDARY))) {
1100 goto out;
1101 }
1102 if (!(state->state & EXTENT_DELALLOC)) {
1103 if (!found)
1104 *end = state->end;
1105 goto out;
1106 }
1107 if (!found)
1108 *start = state->start;
1109 found++;
1110 *end = state->end;
1111 cur_start = state->end + 1;
1112 node = rb_next(node);
1113 if (!node)
1114 break;
1115 total_bytes += state->end - state->start + 1;
1116 if (total_bytes >= max_bytes)
1117 break;
1118 }
1119out:
1120 spin_unlock(&tree->lock);
1121 return found;
1122}
1123
1124static noinline int __unlock_for_delalloc(struct inode *inode,
1125 struct page *locked_page,
1126 u64 start, u64 end)
1127{
1128 int ret;
1129 struct page *pages[16];
1130 unsigned long index = start >> PAGE_CACHE_SHIFT;
1131 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1132 unsigned long nr_pages = end_index - index + 1;
1133 int i;
1134
1135 if (index == locked_page->index && end_index == index)
1136 return 0;
1137
1138 while (nr_pages > 0) {
1139 ret = find_get_pages_contig(inode->i_mapping, index,
1140 min_t(unsigned long, nr_pages,
1141 ARRAY_SIZE(pages)), pages);
1142 for (i = 0; i < ret; i++) {
1143 if (pages[i] != locked_page)
1144 unlock_page(pages[i]);
1145 page_cache_release(pages[i]);
1146 }
1147 nr_pages -= ret;
1148 index += ret;
1149 cond_resched();
1150 }
1151 return 0;
1152}
1153
1154static noinline int lock_delalloc_pages(struct inode *inode,
1155 struct page *locked_page,
1156 u64 delalloc_start,
1157 u64 delalloc_end)
1158{
1159 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1160 unsigned long start_index = index;
1161 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1162 unsigned long pages_locked = 0;
1163 struct page *pages[16];
1164 unsigned long nrpages;
1165 int ret;
1166 int i;
1167
1168 /* the caller is responsible for locking the start index */
1169 if (index == locked_page->index && index == end_index)
1170 return 0;
1171
1172 /* skip the page at the start index */
1173 nrpages = end_index - index + 1;
1174 while (nrpages > 0) {
1175 ret = find_get_pages_contig(inode->i_mapping, index,
1176 min_t(unsigned long,
1177 nrpages, ARRAY_SIZE(pages)), pages);
1178 if (ret == 0) {
1179 ret = -EAGAIN;
1180 goto done;
1181 }
1182 /* now we have an array of pages, lock them all */
1183 for (i = 0; i < ret; i++) {
1184 /*
1185 * the caller is taking responsibility for
1186 * locked_page
1187 */
1188 if (pages[i] != locked_page) {
1189 lock_page(pages[i]);
1190 if (!PageDirty(pages[i]) ||
1191 pages[i]->mapping != inode->i_mapping) {
1192 ret = -EAGAIN;
1193 unlock_page(pages[i]);
1194 page_cache_release(pages[i]);
1195 goto done;
1196 }
1197 }
1198 page_cache_release(pages[i]);
1199 pages_locked++;
1200 }
1201 nrpages -= ret;
1202 index += ret;
1203 cond_resched();
1204 }
1205 ret = 0;
1206done:
1207 if (ret && pages_locked) {
1208 __unlock_for_delalloc(inode, locked_page,
1209 delalloc_start,
1210 ((u64)(start_index + pages_locked - 1)) <<
1211 PAGE_CACHE_SHIFT);
1212 }
1213 return ret;
1214}
1215
1216/*
1217 * find a contiguous range of bytes in the file marked as delalloc, not
1218 * more than 'max_bytes'. start and end are used to return the range,
1219 *
1220 * 1 is returned if we find something, 0 if nothing was in the tree
1221 */
1222static noinline u64 find_lock_delalloc_range(struct inode *inode,
1223 struct extent_io_tree *tree,
1224 struct page *locked_page,
1225 u64 *start, u64 *end,
1226 u64 max_bytes)
1227{
1228 u64 delalloc_start;
1229 u64 delalloc_end;
1230 u64 found;
1231 int ret;
1232 int loops = 0;
1233
1234again:
1235 /* step one, find a bunch of delalloc bytes starting at start */
1236 delalloc_start = *start;
1237 delalloc_end = 0;
1238 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1239 max_bytes);
1240 if (!found || delalloc_end <= *start) {
1241 *start = delalloc_start;
1242 *end = delalloc_end;
1243 return found;
1244 }
1245
1246 /*
1247 * start comes from the offset of locked_page. We have to lock
1248 * pages in order, so we can't process delalloc bytes before
1249 * locked_page
1250 */
1251 if (delalloc_start < *start)
1252 delalloc_start = *start;
1253
1254 /*
1255 * make sure to limit the number of pages we try to lock down
1256 * if we're looping.
1257 */
1258 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1259 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1260
1261 /* step two, lock all the pages after the page that has start */
1262 ret = lock_delalloc_pages(inode, locked_page,
1263 delalloc_start, delalloc_end);
1264 if (ret == -EAGAIN) {
1265 /* some of the pages are gone, lets avoid looping by
1266 * shortening the size of the delalloc range we're searching
1267 */
1268 if (!loops) {
1269 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1270 max_bytes = PAGE_CACHE_SIZE - offset;
1271 loops = 1;
1272 goto again;
1273 } else {
1274 found = 0;
1275 goto out_failed;
1276 }
1277 }
1278 BUG_ON(ret);
1279
1280 /* step three, lock the state bits for the whole range */
1281 lock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1282
1283 /* then test to make sure it is all still delalloc */
1284 ret = test_range_bit(tree, delalloc_start, delalloc_end,
1285 EXTENT_DELALLOC, 1);
1286 if (!ret) {
1287 unlock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1288 __unlock_for_delalloc(inode, locked_page,
1289 delalloc_start, delalloc_end);
1290 cond_resched();
1291 goto again;
1292 }
1293 *start = delalloc_start;
1294 *end = delalloc_end;
1295out_failed:
1296 return found;
1297}
1298
1299int extent_clear_unlock_delalloc(struct inode *inode,
1300 struct extent_io_tree *tree,
1301 u64 start, u64 end, struct page *locked_page,
1302 int unlock_pages,
1303 int clear_unlock,
1304 int clear_delalloc, int clear_dirty,
1305 int set_writeback,
1306 int end_writeback)
1307{
1308 int ret;
1309 struct page *pages[16];
1310 unsigned long index = start >> PAGE_CACHE_SHIFT;
1311 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1312 unsigned long nr_pages = end_index - index + 1;
1313 int i;
1314 int clear_bits = 0;
1315
1316 if (clear_unlock)
1317 clear_bits |= EXTENT_LOCKED;
1318 if (clear_dirty)
1319 clear_bits |= EXTENT_DIRTY;
1320
1321 if (clear_delalloc)
1322 clear_bits |= EXTENT_DELALLOC;
1323
1324 clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS);
1325 if (!(unlock_pages || clear_dirty || set_writeback || end_writeback))
1326 return 0;
1327
1328 while (nr_pages > 0) {
1329 ret = find_get_pages_contig(inode->i_mapping, index,
1330 min_t(unsigned long,
1331 nr_pages, ARRAY_SIZE(pages)), pages);
1332 for (i = 0; i < ret; i++) {
1333 if (pages[i] == locked_page) {
1334 page_cache_release(pages[i]);
1335 continue;
1336 }
1337 if (clear_dirty)
1338 clear_page_dirty_for_io(pages[i]);
1339 if (set_writeback)
1340 set_page_writeback(pages[i]);
1341 if (end_writeback)
1342 end_page_writeback(pages[i]);
1343 if (unlock_pages)
1344 unlock_page(pages[i]);
1345 page_cache_release(pages[i]);
1346 }
1347 nr_pages -= ret;
1348 index += ret;
1349 cond_resched();
1350 }
1351 return 0;
1352}
1353
1354/*
1355 * count the number of bytes in the tree that have a given bit(s)
1356 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1357 * cached. The total number found is returned.
1358 */
1359u64 count_range_bits(struct extent_io_tree *tree,
1360 u64 *start, u64 search_end, u64 max_bytes,
1361 unsigned long bits)
1362{
1363 struct rb_node *node;
1364 struct extent_state *state;
1365 u64 cur_start = *start;
1366 u64 total_bytes = 0;
1367 int found = 0;
1368
1369 if (search_end <= cur_start) {
1370 WARN_ON(1);
1371 return 0;
1372 }
1373
1374 spin_lock(&tree->lock);
1375 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1376 total_bytes = tree->dirty_bytes;
1377 goto out;
1378 }
1379 /*
1380 * this search will find all the extents that end after
1381 * our range starts.
1382 */
1383 node = tree_search(tree, cur_start);
1384 if (!node)
1385 goto out;
1386
1387 while (1) {
1388 state = rb_entry(node, struct extent_state, rb_node);
1389 if (state->start > search_end)
1390 break;
1391 if (state->end >= cur_start && (state->state & bits)) {
1392 total_bytes += min(search_end, state->end) + 1 -
1393 max(cur_start, state->start);
1394 if (total_bytes >= max_bytes)
1395 break;
1396 if (!found) {
1397 *start = state->start;
1398 found = 1;
1399 }
1400 }
1401 node = rb_next(node);
1402 if (!node)
1403 break;
1404 }
1405out:
1406 spin_unlock(&tree->lock);
1407 return total_bytes;
1408}
1409
1410#if 0
1411/*
1412 * helper function to lock both pages and extents in the tree.
1413 * pages must be locked first.
1414 */
1415static int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1416{
1417 unsigned long index = start >> PAGE_CACHE_SHIFT;
1418 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1419 struct page *page;
1420 int err;
1421
1422 while (index <= end_index) {
1423 page = grab_cache_page(tree->mapping, index);
1424 if (!page) {
1425 err = -ENOMEM;
1426 goto failed;
1427 }
1428 if (IS_ERR(page)) {
1429 err = PTR_ERR(page);
1430 goto failed;
1431 }
1432 index++;
1433 }
1434 lock_extent(tree, start, end, GFP_NOFS);
1435 return 0;
1436
1437failed:
1438 /*
1439 * we failed above in getting the page at 'index', so we undo here
1440 * up to but not including the page at 'index'
1441 */
1442 end_index = index;
1443 index = start >> PAGE_CACHE_SHIFT;
1444 while (index < end_index) {
1445 page = find_get_page(tree->mapping, index);
1446 unlock_page(page);
1447 page_cache_release(page);
1448 index++;
1449 }
1450 return err;
1451}
1452
1453/*
1454 * helper function to unlock both pages and extents in the tree.
1455 */
1456static int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1457{
1458 unsigned long index = start >> PAGE_CACHE_SHIFT;
1459 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1460 struct page *page;
1461
1462 while (index <= end_index) {
1463 page = find_get_page(tree->mapping, index);
1464 unlock_page(page);
1465 page_cache_release(page);
1466 index++;
1467 }
1468 unlock_extent(tree, start, end, GFP_NOFS);
1469 return 0;
1470}
1471#endif
1472
1473/*
1474 * set the private field for a given byte offset in the tree. If there isn't
1475 * an extent_state there already, this does nothing.
1476 */
1477int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1478{
1479 struct rb_node *node;
1480 struct extent_state *state;
1481 int ret = 0;
1482
1483 spin_lock(&tree->lock);
1484 /*
1485 * this search will find all the extents that end after
1486 * our range starts.
1487 */
1488 node = tree_search(tree, start);
1489 if (!node) {
1490 ret = -ENOENT;
1491 goto out;
1492 }
1493 state = rb_entry(node, struct extent_state, rb_node);
1494 if (state->start != start) {
1495 ret = -ENOENT;
1496 goto out;
1497 }
1498 state->private = private;
1499out:
1500 spin_unlock(&tree->lock);
1501 return ret;
1502}
1503
1504int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1505{
1506 struct rb_node *node;
1507 struct extent_state *state;
1508 int ret = 0;
1509
1510 spin_lock(&tree->lock);
1511 /*
1512 * this search will find all the extents that end after
1513 * our range starts.
1514 */
1515 node = tree_search(tree, start);
1516 if (!node) {
1517 ret = -ENOENT;
1518 goto out;
1519 }
1520 state = rb_entry(node, struct extent_state, rb_node);
1521 if (state->start != start) {
1522 ret = -ENOENT;
1523 goto out;
1524 }
1525 *private = state->private;
1526out:
1527 spin_unlock(&tree->lock);
1528 return ret;
1529}
1530
1531/*
1532 * searches a range in the state tree for a given mask.
1533 * If 'filled' == 1, this returns 1 only if every extent in the tree
1534 * has the bits set. Otherwise, 1 is returned if any bit in the
1535 * range is found set.
1536 */
1537int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1538 int bits, int filled)
1539{
1540 struct extent_state *state = NULL;
1541 struct rb_node *node;
1542 int bitset = 0;
1543
1544 spin_lock(&tree->lock);
1545 node = tree_search(tree, start);
1546 while (node && start <= end) {
1547 state = rb_entry(node, struct extent_state, rb_node);
1548
1549 if (filled && state->start > start) {
1550 bitset = 0;
1551 break;
1552 }
1553
1554 if (state->start > end)
1555 break;
1556
1557 if (state->state & bits) {
1558 bitset = 1;
1559 if (!filled)
1560 break;
1561 } else if (filled) {
1562 bitset = 0;
1563 break;
1564 }
1565 start = state->end + 1;
1566 if (start > end)
1567 break;
1568 node = rb_next(node);
1569 if (!node) {
1570 if (filled)
1571 bitset = 0;
1572 break;
1573 }
1574 }
1575 spin_unlock(&tree->lock);
1576 return bitset;
1577}
1578
1579/*
1580 * helper function to set a given page up to date if all the
1581 * extents in the tree for that page are up to date
1582 */
1583static int check_page_uptodate(struct extent_io_tree *tree,
1584 struct page *page)
1585{
1586 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1587 u64 end = start + PAGE_CACHE_SIZE - 1;
1588 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1589 SetPageUptodate(page);
1590 return 0;
1591}
1592
1593/*
1594 * helper function to unlock a page if all the extents in the tree
1595 * for that page are unlocked
1596 */
1597static int check_page_locked(struct extent_io_tree *tree,
1598 struct page *page)
1599{
1600 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1601 u64 end = start + PAGE_CACHE_SIZE - 1;
1602 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1603 unlock_page(page);
1604 return 0;
1605}
1606
1607/*
1608 * helper function to end page writeback if all the extents
1609 * in the tree for that page are done with writeback
1610 */
1611static int check_page_writeback(struct extent_io_tree *tree,
1612 struct page *page)
1613{
1614 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1615 u64 end = start + PAGE_CACHE_SIZE - 1;
1616 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1617 end_page_writeback(page);
1618 return 0;
1619}
1620
1621/* lots and lots of room for performance fixes in the end_bio funcs */
1622
1623/*
1624 * after a writepage IO is done, we need to:
1625 * clear the uptodate bits on error
1626 * clear the writeback bits in the extent tree for this IO
1627 * end_page_writeback if the page has no more pending IO
1628 *
1629 * Scheduling is not allowed, so the extent state tree is expected
1630 * to have one and only one object corresponding to this IO.
1631 */
1632static void end_bio_extent_writepage(struct bio *bio, int err)
1633{
1634 int uptodate = err == 0;
1635 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1636 struct extent_io_tree *tree;
1637 u64 start;
1638 u64 end;
1639 int whole_page;
1640 int ret;
1641
1642 do {
1643 struct page *page = bvec->bv_page;
1644 tree = &BTRFS_I(page->mapping->host)->io_tree;
1645
1646 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1647 bvec->bv_offset;
1648 end = start + bvec->bv_len - 1;
1649
1650 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1651 whole_page = 1;
1652 else
1653 whole_page = 0;
1654
1655 if (--bvec >= bio->bi_io_vec)
1656 prefetchw(&bvec->bv_page->flags);
1657 if (tree->ops && tree->ops->writepage_end_io_hook) {
1658 ret = tree->ops->writepage_end_io_hook(page, start,
1659 end, NULL, uptodate);
1660 if (ret)
1661 uptodate = 0;
1662 }
1663
1664 if (!uptodate && tree->ops &&
1665 tree->ops->writepage_io_failed_hook) {
1666 ret = tree->ops->writepage_io_failed_hook(bio, page,
1667 start, end, NULL);
1668 if (ret == 0) {
1669 uptodate = (err == 0);
1670 continue;
1671 }
1672 }
1673
1674 if (!uptodate) {
1675 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1676 ClearPageUptodate(page);
1677 SetPageError(page);
1678 }
1679
1680 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1681
1682 if (whole_page)
1683 end_page_writeback(page);
1684 else
1685 check_page_writeback(tree, page);
1686 } while (bvec >= bio->bi_io_vec);
1687
1688 bio_put(bio);
1689}
1690
1691/*
1692 * after a readpage IO is done, we need to:
1693 * clear the uptodate bits on error
1694 * set the uptodate bits if things worked
1695 * set the page up to date if all extents in the tree are uptodate
1696 * clear the lock bit in the extent tree
1697 * unlock the page if there are no other extents locked for it
1698 *
1699 * Scheduling is not allowed, so the extent state tree is expected
1700 * to have one and only one object corresponding to this IO.
1701 */
1702static void end_bio_extent_readpage(struct bio *bio, int err)
1703{
1704 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1705 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1706 struct extent_io_tree *tree;
1707 u64 start;
1708 u64 end;
1709 int whole_page;
1710 int ret;
1711
1712 if (err)
1713 uptodate = 0;
1714
1715 do {
1716 struct page *page = bvec->bv_page;
1717 tree = &BTRFS_I(page->mapping->host)->io_tree;
1718
1719 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1720 bvec->bv_offset;
1721 end = start + bvec->bv_len - 1;
1722
1723 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1724 whole_page = 1;
1725 else
1726 whole_page = 0;
1727
1728 if (--bvec >= bio->bi_io_vec)
1729 prefetchw(&bvec->bv_page->flags);
1730
1731 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1732 ret = tree->ops->readpage_end_io_hook(page, start, end,
1733 NULL);
1734 if (ret)
1735 uptodate = 0;
1736 }
1737 if (!uptodate && tree->ops &&
1738 tree->ops->readpage_io_failed_hook) {
1739 ret = tree->ops->readpage_io_failed_hook(bio, page,
1740 start, end, NULL);
1741 if (ret == 0) {
1742 uptodate =
1743 test_bit(BIO_UPTODATE, &bio->bi_flags);
1744 if (err)
1745 uptodate = 0;
1746 continue;
1747 }
1748 }
1749
1750 if (uptodate) {
1751 set_extent_uptodate(tree, start, end,
1752 GFP_ATOMIC);
1753 }
1754 unlock_extent(tree, start, end, GFP_ATOMIC);
1755
1756 if (whole_page) {
1757 if (uptodate) {
1758 SetPageUptodate(page);
1759 } else {
1760 ClearPageUptodate(page);
1761 SetPageError(page);
1762 }
1763 unlock_page(page);
1764 } else {
1765 if (uptodate) {
1766 check_page_uptodate(tree, page);
1767 } else {
1768 ClearPageUptodate(page);
1769 SetPageError(page);
1770 }
1771 check_page_locked(tree, page);
1772 }
1773 } while (bvec >= bio->bi_io_vec);
1774
1775 bio_put(bio);
1776}
1777
1778/*
1779 * IO done from prepare_write is pretty simple, we just unlock
1780 * the structs in the extent tree when done, and set the uptodate bits
1781 * as appropriate.
1782 */
1783static void end_bio_extent_preparewrite(struct bio *bio, int err)
1784{
1785 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1786 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1787 struct extent_io_tree *tree;
1788 u64 start;
1789 u64 end;
1790
1791 do {
1792 struct page *page = bvec->bv_page;
1793 tree = &BTRFS_I(page->mapping->host)->io_tree;
1794
1795 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1796 bvec->bv_offset;
1797 end = start + bvec->bv_len - 1;
1798
1799 if (--bvec >= bio->bi_io_vec)
1800 prefetchw(&bvec->bv_page->flags);
1801
1802 if (uptodate) {
1803 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1804 } else {
1805 ClearPageUptodate(page);
1806 SetPageError(page);
1807 }
1808
1809 unlock_extent(tree, start, end, GFP_ATOMIC);
1810
1811 } while (bvec >= bio->bi_io_vec);
1812
1813 bio_put(bio);
1814}
1815
1816static struct bio *
1817extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1818 gfp_t gfp_flags)
1819{
1820 struct bio *bio;
1821
1822 bio = bio_alloc(gfp_flags, nr_vecs);
1823
1824 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1825 while (!bio && (nr_vecs /= 2))
1826 bio = bio_alloc(gfp_flags, nr_vecs);
1827 }
1828
1829 if (bio) {
1830 bio->bi_size = 0;
1831 bio->bi_bdev = bdev;
1832 bio->bi_sector = first_sector;
1833 }
1834 return bio;
1835}
1836
1837static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1838 unsigned long bio_flags)
1839{
1840 int ret = 0;
1841 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1842 struct page *page = bvec->bv_page;
1843 struct extent_io_tree *tree = bio->bi_private;
1844 u64 start;
1845 u64 end;
1846
1847 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1848 end = start + bvec->bv_len - 1;
1849
1850 bio->bi_private = NULL;
1851
1852 bio_get(bio);
1853
1854 if (tree->ops && tree->ops->submit_bio_hook)
1855 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1856 mirror_num, bio_flags);
1857 else
1858 submit_bio(rw, bio);
1859 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1860 ret = -EOPNOTSUPP;
1861 bio_put(bio);
1862 return ret;
1863}
1864
1865static int submit_extent_page(int rw, struct extent_io_tree *tree,
1866 struct page *page, sector_t sector,
1867 size_t size, unsigned long offset,
1868 struct block_device *bdev,
1869 struct bio **bio_ret,
1870 unsigned long max_pages,
1871 bio_end_io_t end_io_func,
1872 int mirror_num,
1873 unsigned long prev_bio_flags,
1874 unsigned long bio_flags)
1875{
1876 int ret = 0;
1877 struct bio *bio;
1878 int nr;
1879 int contig = 0;
1880 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1881 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
1882 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
1883
1884 if (bio_ret && *bio_ret) {
1885 bio = *bio_ret;
1886 if (old_compressed)
1887 contig = bio->bi_sector == sector;
1888 else
1889 contig = bio->bi_sector + (bio->bi_size >> 9) ==
1890 sector;
1891
1892 if (prev_bio_flags != bio_flags || !contig ||
1893 (tree->ops && tree->ops->merge_bio_hook &&
1894 tree->ops->merge_bio_hook(page, offset, page_size, bio,
1895 bio_flags)) ||
1896 bio_add_page(bio, page, page_size, offset) < page_size) {
1897 ret = submit_one_bio(rw, bio, mirror_num,
1898 prev_bio_flags);
1899 bio = NULL;
1900 } else {
1901 return 0;
1902 }
1903 }
1904 if (this_compressed)
1905 nr = BIO_MAX_PAGES;
1906 else
1907 nr = bio_get_nr_vecs(bdev);
1908
1909 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1910
1911 bio_add_page(bio, page, page_size, offset);
1912 bio->bi_end_io = end_io_func;
1913 bio->bi_private = tree;
1914
1915 if (bio_ret)
1916 *bio_ret = bio;
1917 else
1918 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
1919
1920 return ret;
1921}
1922
1923void set_page_extent_mapped(struct page *page)
1924{
1925 if (!PagePrivate(page)) {
1926 SetPagePrivate(page);
1927 page_cache_get(page);
1928 set_page_private(page, EXTENT_PAGE_PRIVATE);
1929 }
1930}
1931
1932static void set_page_extent_head(struct page *page, unsigned long len)
1933{
1934 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1935}
1936
1937/*
1938 * basic readpage implementation. Locked extent state structs are inserted
1939 * into the tree that are removed when the IO is done (by the end_io
1940 * handlers)
1941 */
1942static int __extent_read_full_page(struct extent_io_tree *tree,
1943 struct page *page,
1944 get_extent_t *get_extent,
1945 struct bio **bio, int mirror_num,
1946 unsigned long *bio_flags)
1947{
1948 struct inode *inode = page->mapping->host;
1949 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1950 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1951 u64 end;
1952 u64 cur = start;
1953 u64 extent_offset;
1954 u64 last_byte = i_size_read(inode);
1955 u64 block_start;
1956 u64 cur_end;
1957 sector_t sector;
1958 struct extent_map *em;
1959 struct block_device *bdev;
1960 int ret;
1961 int nr = 0;
1962 size_t page_offset = 0;
1963 size_t iosize;
1964 size_t disk_io_size;
1965 size_t blocksize = inode->i_sb->s_blocksize;
1966 unsigned long this_bio_flag = 0;
1967
1968 set_page_extent_mapped(page);
1969
1970 end = page_end;
1971 lock_extent(tree, start, end, GFP_NOFS);
1972
1973 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
1974 char *userpage;
1975 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
1976
1977 if (zero_offset) {
1978 iosize = PAGE_CACHE_SIZE - zero_offset;
1979 userpage = kmap_atomic(page, KM_USER0);
1980 memset(userpage + zero_offset, 0, iosize);
1981 flush_dcache_page(page);
1982 kunmap_atomic(userpage, KM_USER0);
1983 }
1984 }
1985 while (cur <= end) {
1986 if (cur >= last_byte) {
1987 char *userpage;
1988 iosize = PAGE_CACHE_SIZE - page_offset;
1989 userpage = kmap_atomic(page, KM_USER0);
1990 memset(userpage + page_offset, 0, iosize);
1991 flush_dcache_page(page);
1992 kunmap_atomic(userpage, KM_USER0);
1993 set_extent_uptodate(tree, cur, cur + iosize - 1,
1994 GFP_NOFS);
1995 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1996 break;
1997 }
1998 em = get_extent(inode, page, page_offset, cur,
1999 end - cur + 1, 0);
2000 if (IS_ERR(em) || !em) {
2001 SetPageError(page);
2002 unlock_extent(tree, cur, end, GFP_NOFS);
2003 break;
2004 }
2005 extent_offset = cur - em->start;
2006 BUG_ON(extent_map_end(em) <= cur);
2007 BUG_ON(end < cur);
2008
2009 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2010 this_bio_flag = EXTENT_BIO_COMPRESSED;
2011
2012 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2013 cur_end = min(extent_map_end(em) - 1, end);
2014 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2015 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2016 disk_io_size = em->block_len;
2017 sector = em->block_start >> 9;
2018 } else {
2019 sector = (em->block_start + extent_offset) >> 9;
2020 disk_io_size = iosize;
2021 }
2022 bdev = em->bdev;
2023 block_start = em->block_start;
2024 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2025 block_start = EXTENT_MAP_HOLE;
2026 free_extent_map(em);
2027 em = NULL;
2028
2029 /* we've found a hole, just zero and go on */
2030 if (block_start == EXTENT_MAP_HOLE) {
2031 char *userpage;
2032 userpage = kmap_atomic(page, KM_USER0);
2033 memset(userpage + page_offset, 0, iosize);
2034 flush_dcache_page(page);
2035 kunmap_atomic(userpage, KM_USER0);
2036
2037 set_extent_uptodate(tree, cur, cur + iosize - 1,
2038 GFP_NOFS);
2039 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2040 cur = cur + iosize;
2041 page_offset += iosize;
2042 continue;
2043 }
2044 /* the get_extent function already copied into the page */
2045 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
2046 check_page_uptodate(tree, page);
2047 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2048 cur = cur + iosize;
2049 page_offset += iosize;
2050 continue;
2051 }
2052 /* we have an inline extent but it didn't get marked up
2053 * to date. Error out
2054 */
2055 if (block_start == EXTENT_MAP_INLINE) {
2056 SetPageError(page);
2057 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2058 cur = cur + iosize;
2059 page_offset += iosize;
2060 continue;
2061 }
2062
2063 ret = 0;
2064 if (tree->ops && tree->ops->readpage_io_hook) {
2065 ret = tree->ops->readpage_io_hook(page, cur,
2066 cur + iosize - 1);
2067 }
2068 if (!ret) {
2069 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2070 pnr -= page->index;
2071 ret = submit_extent_page(READ, tree, page,
2072 sector, disk_io_size, page_offset,
2073 bdev, bio, pnr,
2074 end_bio_extent_readpage, mirror_num,
2075 *bio_flags,
2076 this_bio_flag);
2077 nr++;
2078 *bio_flags = this_bio_flag;
2079 }
2080 if (ret)
2081 SetPageError(page);
2082 cur = cur + iosize;
2083 page_offset += iosize;
2084 }
2085 if (!nr) {
2086 if (!PageError(page))
2087 SetPageUptodate(page);
2088 unlock_page(page);
2089 }
2090 return 0;
2091}
2092
2093int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2094 get_extent_t *get_extent)
2095{
2096 struct bio *bio = NULL;
2097 unsigned long bio_flags = 0;
2098 int ret;
2099
2100 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2101 &bio_flags);
2102 if (bio)
2103 submit_one_bio(READ, bio, 0, bio_flags);
2104 return ret;
2105}
2106
2107/*
2108 * the writepage semantics are similar to regular writepage. extent
2109 * records are inserted to lock ranges in the tree, and as dirty areas
2110 * are found, they are marked writeback. Then the lock bits are removed
2111 * and the end_io handler clears the writeback ranges
2112 */
2113static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2114 void *data)
2115{
2116 struct inode *inode = page->mapping->host;
2117 struct extent_page_data *epd = data;
2118 struct extent_io_tree *tree = epd->tree;
2119 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2120 u64 delalloc_start;
2121 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2122 u64 end;
2123 u64 cur = start;
2124 u64 extent_offset;
2125 u64 last_byte = i_size_read(inode);
2126 u64 block_start;
2127 u64 iosize;
2128 u64 unlock_start;
2129 sector_t sector;
2130 struct extent_map *em;
2131 struct block_device *bdev;
2132 int ret;
2133 int nr = 0;
2134 size_t pg_offset = 0;
2135 size_t blocksize;
2136 loff_t i_size = i_size_read(inode);
2137 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2138 u64 nr_delalloc;
2139 u64 delalloc_end;
2140 int page_started;
2141 int compressed;
2142 unsigned long nr_written = 0;
2143
2144 WARN_ON(!PageLocked(page));
2145 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2146 if (page->index > end_index ||
2147 (page->index == end_index && !pg_offset)) {
2148 page->mapping->a_ops->invalidatepage(page, 0);
2149 unlock_page(page);
2150 return 0;
2151 }
2152
2153 if (page->index == end_index) {
2154 char *userpage;
2155
2156 userpage = kmap_atomic(page, KM_USER0);
2157 memset(userpage + pg_offset, 0,
2158 PAGE_CACHE_SIZE - pg_offset);
2159 kunmap_atomic(userpage, KM_USER0);
2160 flush_dcache_page(page);
2161 }
2162 pg_offset = 0;
2163
2164 set_page_extent_mapped(page);
2165
2166 delalloc_start = start;
2167 delalloc_end = 0;
2168 page_started = 0;
2169 if (!epd->extent_locked) {
2170 while (delalloc_end < page_end) {
2171 nr_delalloc = find_lock_delalloc_range(inode, tree,
2172 page,
2173 &delalloc_start,
2174 &delalloc_end,
2175 128 * 1024 * 1024);
2176 if (nr_delalloc == 0) {
2177 delalloc_start = delalloc_end + 1;
2178 continue;
2179 }
2180 tree->ops->fill_delalloc(inode, page, delalloc_start,
2181 delalloc_end, &page_started,
2182 &nr_written);
2183 delalloc_start = delalloc_end + 1;
2184 }
2185
2186 /* did the fill delalloc function already unlock and start
2187 * the IO?
2188 */
2189 if (page_started) {
2190 ret = 0;
2191 goto update_nr_written;
2192 }
2193 }
2194 lock_extent(tree, start, page_end, GFP_NOFS);
2195
2196 unlock_start = start;
2197
2198 if (tree->ops && tree->ops->writepage_start_hook) {
2199 ret = tree->ops->writepage_start_hook(page, start,
2200 page_end);
2201 if (ret == -EAGAIN) {
2202 unlock_extent(tree, start, page_end, GFP_NOFS);
2203 redirty_page_for_writepage(wbc, page);
2204 unlock_page(page);
2205 ret = 0;
2206 goto update_nr_written;
2207 }
2208 }
2209
2210 nr_written++;
2211
2212 end = page_end;
2213 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0))
2214 printk(KERN_ERR "btrfs delalloc bits after lock_extent\n");
2215
2216 if (last_byte <= start) {
2217 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
2218 unlock_extent(tree, start, page_end, GFP_NOFS);
2219 if (tree->ops && tree->ops->writepage_end_io_hook)
2220 tree->ops->writepage_end_io_hook(page, start,
2221 page_end, NULL, 1);
2222 unlock_start = page_end + 1;
2223 goto done;
2224 }
2225
2226 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
2227 blocksize = inode->i_sb->s_blocksize;
2228
2229 while (cur <= end) {
2230 if (cur >= last_byte) {
2231 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
2232 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2233 if (tree->ops && tree->ops->writepage_end_io_hook)
2234 tree->ops->writepage_end_io_hook(page, cur,
2235 page_end, NULL, 1);
2236 unlock_start = page_end + 1;
2237 break;
2238 }
2239 em = epd->get_extent(inode, page, pg_offset, cur,
2240 end - cur + 1, 1);
2241 if (IS_ERR(em) || !em) {
2242 SetPageError(page);
2243 break;
2244 }
2245
2246 extent_offset = cur - em->start;
2247 BUG_ON(extent_map_end(em) <= cur);
2248 BUG_ON(end < cur);
2249 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2250 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2251 sector = (em->block_start + extent_offset) >> 9;
2252 bdev = em->bdev;
2253 block_start = em->block_start;
2254 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2255 free_extent_map(em);
2256 em = NULL;
2257
2258 /*
2259 * compressed and inline extents are written through other
2260 * paths in the FS
2261 */
2262 if (compressed || block_start == EXTENT_MAP_HOLE ||
2263 block_start == EXTENT_MAP_INLINE) {
2264 clear_extent_dirty(tree, cur,
2265 cur + iosize - 1, GFP_NOFS);
2266
2267 unlock_extent(tree, unlock_start, cur + iosize - 1,
2268 GFP_NOFS);
2269
2270 /*
2271 * end_io notification does not happen here for
2272 * compressed extents
2273 */
2274 if (!compressed && tree->ops &&
2275 tree->ops->writepage_end_io_hook)
2276 tree->ops->writepage_end_io_hook(page, cur,
2277 cur + iosize - 1,
2278 NULL, 1);
2279 else if (compressed) {
2280 /* we don't want to end_page_writeback on
2281 * a compressed extent. this happens
2282 * elsewhere
2283 */
2284 nr++;
2285 }
2286
2287 cur += iosize;
2288 pg_offset += iosize;
2289 unlock_start = cur;
2290 continue;
2291 }
2292 /* leave this out until we have a page_mkwrite call */
2293 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2294 EXTENT_DIRTY, 0)) {
2295 cur = cur + iosize;
2296 pg_offset += iosize;
2297 continue;
2298 }
2299
2300 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2301 if (tree->ops && tree->ops->writepage_io_hook) {
2302 ret = tree->ops->writepage_io_hook(page, cur,
2303 cur + iosize - 1);
2304 } else {
2305 ret = 0;
2306 }
2307 if (ret) {
2308 SetPageError(page);
2309 } else {
2310 unsigned long max_nr = end_index + 1;
2311
2312 set_range_writeback(tree, cur, cur + iosize - 1);
2313 if (!PageWriteback(page)) {
2314 printk(KERN_ERR "btrfs warning page %lu not "
2315 "writeback, cur %llu end %llu\n",
2316 page->index, (unsigned long long)cur,
2317 (unsigned long long)end);
2318 }
2319
2320 ret = submit_extent_page(WRITE, tree, page, sector,
2321 iosize, pg_offset, bdev,
2322 &epd->bio, max_nr,
2323 end_bio_extent_writepage,
2324 0, 0, 0);
2325 if (ret)
2326 SetPageError(page);
2327 }
2328 cur = cur + iosize;
2329 pg_offset += iosize;
2330 nr++;
2331 }
2332done:
2333 if (nr == 0) {
2334 /* make sure the mapping tag for page dirty gets cleared */
2335 set_page_writeback(page);
2336 end_page_writeback(page);
2337 }
2338 if (unlock_start <= page_end)
2339 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2340 unlock_page(page);
2341
2342update_nr_written:
2343 wbc->nr_to_write -= nr_written;
2344 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2345 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2346 page->mapping->writeback_index = page->index + nr_written;
2347 return 0;
2348}
2349
2350/**
2351 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2352 * @mapping: address space structure to write
2353 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2354 * @writepage: function called for each page
2355 * @data: data passed to writepage function
2356 *
2357 * If a page is already under I/O, write_cache_pages() skips it, even
2358 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2359 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2360 * and msync() need to guarantee that all the data which was dirty at the time
2361 * the call was made get new I/O started against them. If wbc->sync_mode is
2362 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2363 * existing IO to complete.
2364 */
2365static int extent_write_cache_pages(struct extent_io_tree *tree,
2366 struct address_space *mapping,
2367 struct writeback_control *wbc,
2368 writepage_t writepage, void *data,
2369 void (*flush_fn)(void *))
2370{
2371 struct backing_dev_info *bdi = mapping->backing_dev_info;
2372 int ret = 0;
2373 int done = 0;
2374 struct pagevec pvec;
2375 int nr_pages;
2376 pgoff_t index;
2377 pgoff_t end; /* Inclusive */
2378 int scanned = 0;
2379 int range_whole = 0;
2380
2381 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2382 wbc->encountered_congestion = 1;
2383 return 0;
2384 }
2385
2386 pagevec_init(&pvec, 0);
2387 if (wbc->range_cyclic) {
2388 index = mapping->writeback_index; /* Start from prev offset */
2389 end = -1;
2390 } else {
2391 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2392 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2393 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2394 range_whole = 1;
2395 scanned = 1;
2396 }
2397retry:
2398 while (!done && (index <= end) &&
2399 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2400 PAGECACHE_TAG_DIRTY, min(end - index,
2401 (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2402 unsigned i;
2403
2404 scanned = 1;
2405 for (i = 0; i < nr_pages; i++) {
2406 struct page *page = pvec.pages[i];
2407
2408 /*
2409 * At this point we hold neither mapping->tree_lock nor
2410 * lock on the page itself: the page may be truncated or
2411 * invalidated (changing page->mapping to NULL), or even
2412 * swizzled back from swapper_space to tmpfs file
2413 * mapping
2414 */
2415 if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2416 tree->ops->write_cache_pages_lock_hook(page);
2417 else
2418 lock_page(page);
2419
2420 if (unlikely(page->mapping != mapping)) {
2421 unlock_page(page);
2422 continue;
2423 }
2424
2425 if (!wbc->range_cyclic && page->index > end) {
2426 done = 1;
2427 unlock_page(page);
2428 continue;
2429 }
2430
2431 if (wbc->sync_mode != WB_SYNC_NONE) {
2432 if (PageWriteback(page))
2433 flush_fn(data);
2434 wait_on_page_writeback(page);
2435 }
2436
2437 if (PageWriteback(page) ||
2438 !clear_page_dirty_for_io(page)) {
2439 unlock_page(page);
2440 continue;
2441 }
2442
2443 ret = (*writepage)(page, wbc, data);
2444
2445 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2446 unlock_page(page);
2447 ret = 0;
2448 }
2449 if (ret || wbc->nr_to_write <= 0)
2450 done = 1;
2451 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2452 wbc->encountered_congestion = 1;
2453 done = 1;
2454 }
2455 }
2456 pagevec_release(&pvec);
2457 cond_resched();
2458 }
2459 if (!scanned && !done) {
2460 /*
2461 * We hit the last page and there is more work to be done: wrap
2462 * back to the start of the file
2463 */
2464 scanned = 1;
2465 index = 0;
2466 goto retry;
2467 }
2468 return ret;
2469}
2470
2471static noinline void flush_write_bio(void *data)
2472{
2473 struct extent_page_data *epd = data;
2474 if (epd->bio) {
2475 submit_one_bio(WRITE, epd->bio, 0, 0);
2476 epd->bio = NULL;
2477 }
2478}
2479
2480int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2481 get_extent_t *get_extent,
2482 struct writeback_control *wbc)
2483{
2484 int ret;
2485 struct address_space *mapping = page->mapping;
2486 struct extent_page_data epd = {
2487 .bio = NULL,
2488 .tree = tree,
2489 .get_extent = get_extent,
2490 .extent_locked = 0,
2491 };
2492 struct writeback_control wbc_writepages = {
2493 .bdi = wbc->bdi,
2494 .sync_mode = WB_SYNC_NONE,
2495 .older_than_this = NULL,
2496 .nr_to_write = 64,
2497 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2498 .range_end = (loff_t)-1,
2499 };
2500
2501
2502 ret = __extent_writepage(page, wbc, &epd);
2503
2504 extent_write_cache_pages(tree, mapping, &wbc_writepages,
2505 __extent_writepage, &epd, flush_write_bio);
2506 if (epd.bio)
2507 submit_one_bio(WRITE, epd.bio, 0, 0);
2508 return ret;
2509}
2510
2511int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2512 u64 start, u64 end, get_extent_t *get_extent,
2513 int mode)
2514{
2515 int ret = 0;
2516 struct address_space *mapping = inode->i_mapping;
2517 struct page *page;
2518 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
2519 PAGE_CACHE_SHIFT;
2520
2521 struct extent_page_data epd = {
2522 .bio = NULL,
2523 .tree = tree,
2524 .get_extent = get_extent,
2525 .extent_locked = 1,
2526 };
2527 struct writeback_control wbc_writepages = {
2528 .bdi = inode->i_mapping->backing_dev_info,
2529 .sync_mode = mode,
2530 .older_than_this = NULL,
2531 .nr_to_write = nr_pages * 2,
2532 .range_start = start,
2533 .range_end = end + 1,
2534 };
2535
2536 while (start <= end) {
2537 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
2538 if (clear_page_dirty_for_io(page))
2539 ret = __extent_writepage(page, &wbc_writepages, &epd);
2540 else {
2541 if (tree->ops && tree->ops->writepage_end_io_hook)
2542 tree->ops->writepage_end_io_hook(page, start,
2543 start + PAGE_CACHE_SIZE - 1,
2544 NULL, 1);
2545 unlock_page(page);
2546 }
2547 page_cache_release(page);
2548 start += PAGE_CACHE_SIZE;
2549 }
2550
2551 if (epd.bio)
2552 submit_one_bio(WRITE, epd.bio, 0, 0);
2553 return ret;
2554}
2555
2556int extent_writepages(struct extent_io_tree *tree,
2557 struct address_space *mapping,
2558 get_extent_t *get_extent,
2559 struct writeback_control *wbc)
2560{
2561 int ret = 0;
2562 struct extent_page_data epd = {
2563 .bio = NULL,
2564 .tree = tree,
2565 .get_extent = get_extent,
2566 .extent_locked = 0,
2567 };
2568
2569 ret = extent_write_cache_pages(tree, mapping, wbc,
2570 __extent_writepage, &epd,
2571 flush_write_bio);
2572 if (epd.bio)
2573 submit_one_bio(WRITE, epd.bio, 0, 0);
2574 return ret;
2575}
2576
2577int extent_readpages(struct extent_io_tree *tree,
2578 struct address_space *mapping,
2579 struct list_head *pages, unsigned nr_pages,
2580 get_extent_t get_extent)
2581{
2582 struct bio *bio = NULL;
2583 unsigned page_idx;
2584 struct pagevec pvec;
2585 unsigned long bio_flags = 0;
2586
2587 pagevec_init(&pvec, 0);
2588 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2589 struct page *page = list_entry(pages->prev, struct page, lru);
2590
2591 prefetchw(&page->flags);
2592 list_del(&page->lru);
2593 /*
2594 * what we want to do here is call add_to_page_cache_lru,
2595 * but that isn't exported, so we reproduce it here
2596 */
2597 if (!add_to_page_cache(page, mapping,
2598 page->index, GFP_KERNEL)) {
2599
2600 /* open coding of lru_cache_add, also not exported */
2601 page_cache_get(page);
2602 if (!pagevec_add(&pvec, page))
2603 __pagevec_lru_add_file(&pvec);
2604 __extent_read_full_page(tree, page, get_extent,
2605 &bio, 0, &bio_flags);
2606 }
2607 page_cache_release(page);
2608 }
2609 if (pagevec_count(&pvec))
2610 __pagevec_lru_add_file(&pvec);
2611 BUG_ON(!list_empty(pages));
2612 if (bio)
2613 submit_one_bio(READ, bio, 0, bio_flags);
2614 return 0;
2615}
2616
2617/*
2618 * basic invalidatepage code, this waits on any locked or writeback
2619 * ranges corresponding to the page, and then deletes any extent state
2620 * records from the tree
2621 */
2622int extent_invalidatepage(struct extent_io_tree *tree,
2623 struct page *page, unsigned long offset)
2624{
2625 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2626 u64 end = start + PAGE_CACHE_SIZE - 1;
2627 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2628
2629 start += (offset + blocksize - 1) & ~(blocksize - 1);
2630 if (start > end)
2631 return 0;
2632
2633 lock_extent(tree, start, end, GFP_NOFS);
2634 wait_on_extent_writeback(tree, start, end);
2635 clear_extent_bit(tree, start, end,
2636 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2637 1, 1, GFP_NOFS);
2638 return 0;
2639}
2640
2641/*
2642 * simple commit_write call, set_range_dirty is used to mark both
2643 * the pages and the extent records as dirty
2644 */
2645int extent_commit_write(struct extent_io_tree *tree,
2646 struct inode *inode, struct page *page,
2647 unsigned from, unsigned to)
2648{
2649 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2650
2651 set_page_extent_mapped(page);
2652 set_page_dirty(page);
2653
2654 if (pos > inode->i_size) {
2655 i_size_write(inode, pos);
2656 mark_inode_dirty(inode);
2657 }
2658 return 0;
2659}
2660
2661int extent_prepare_write(struct extent_io_tree *tree,
2662 struct inode *inode, struct page *page,
2663 unsigned from, unsigned to, get_extent_t *get_extent)
2664{
2665 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2666 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2667 u64 block_start;
2668 u64 orig_block_start;
2669 u64 block_end;
2670 u64 cur_end;
2671 struct extent_map *em;
2672 unsigned blocksize = 1 << inode->i_blkbits;
2673 size_t page_offset = 0;
2674 size_t block_off_start;
2675 size_t block_off_end;
2676 int err = 0;
2677 int iocount = 0;
2678 int ret = 0;
2679 int isnew;
2680
2681 set_page_extent_mapped(page);
2682
2683 block_start = (page_start + from) & ~((u64)blocksize - 1);
2684 block_end = (page_start + to - 1) | (blocksize - 1);
2685 orig_block_start = block_start;
2686
2687 lock_extent(tree, page_start, page_end, GFP_NOFS);
2688 while (block_start <= block_end) {
2689 em = get_extent(inode, page, page_offset, block_start,
2690 block_end - block_start + 1, 1);
2691 if (IS_ERR(em) || !em)
2692 goto err;
2693
2694 cur_end = min(block_end, extent_map_end(em) - 1);
2695 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2696 block_off_end = block_off_start + blocksize;
2697 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2698
2699 if (!PageUptodate(page) && isnew &&
2700 (block_off_end > to || block_off_start < from)) {
2701 void *kaddr;
2702
2703 kaddr = kmap_atomic(page, KM_USER0);
2704 if (block_off_end > to)
2705 memset(kaddr + to, 0, block_off_end - to);
2706 if (block_off_start < from)
2707 memset(kaddr + block_off_start, 0,
2708 from - block_off_start);
2709 flush_dcache_page(page);
2710 kunmap_atomic(kaddr, KM_USER0);
2711 }
2712 if ((em->block_start != EXTENT_MAP_HOLE &&
2713 em->block_start != EXTENT_MAP_INLINE) &&
2714 !isnew && !PageUptodate(page) &&
2715 (block_off_end > to || block_off_start < from) &&
2716 !test_range_bit(tree, block_start, cur_end,
2717 EXTENT_UPTODATE, 1)) {
2718 u64 sector;
2719 u64 extent_offset = block_start - em->start;
2720 size_t iosize;
2721 sector = (em->block_start + extent_offset) >> 9;
2722 iosize = (cur_end - block_start + blocksize) &
2723 ~((u64)blocksize - 1);
2724 /*
2725 * we've already got the extent locked, but we
2726 * need to split the state such that our end_bio
2727 * handler can clear the lock.
2728 */
2729 set_extent_bit(tree, block_start,
2730 block_start + iosize - 1,
2731 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2732 ret = submit_extent_page(READ, tree, page,
2733 sector, iosize, page_offset, em->bdev,
2734 NULL, 1,
2735 end_bio_extent_preparewrite, 0,
2736 0, 0);
2737 iocount++;
2738 block_start = block_start + iosize;
2739 } else {
2740 set_extent_uptodate(tree, block_start, cur_end,
2741 GFP_NOFS);
2742 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2743 block_start = cur_end + 1;
2744 }
2745 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2746 free_extent_map(em);
2747 }
2748 if (iocount) {
2749 wait_extent_bit(tree, orig_block_start,
2750 block_end, EXTENT_LOCKED);
2751 }
2752 check_page_uptodate(tree, page);
2753err:
2754 /* FIXME, zero out newly allocated blocks on error */
2755 return err;
2756}
2757
2758/*
2759 * a helper for releasepage, this tests for areas of the page that
2760 * are locked or under IO and drops the related state bits if it is safe
2761 * to drop the page.
2762 */
2763int try_release_extent_state(struct extent_map_tree *map,
2764 struct extent_io_tree *tree, struct page *page,
2765 gfp_t mask)
2766{
2767 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2768 u64 end = start + PAGE_CACHE_SIZE - 1;
2769 int ret = 1;
2770
2771 if (test_range_bit(tree, start, end,
2772 EXTENT_IOBITS | EXTENT_ORDERED, 0))
2773 ret = 0;
2774 else {
2775 if ((mask & GFP_NOFS) == GFP_NOFS)
2776 mask = GFP_NOFS;
2777 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2778 1, 1, mask);
2779 }
2780 return ret;
2781}
2782
2783/*
2784 * a helper for releasepage. As long as there are no locked extents
2785 * in the range corresponding to the page, both state records and extent
2786 * map records are removed
2787 */
2788int try_release_extent_mapping(struct extent_map_tree *map,
2789 struct extent_io_tree *tree, struct page *page,
2790 gfp_t mask)
2791{
2792 struct extent_map *em;
2793 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2794 u64 end = start + PAGE_CACHE_SIZE - 1;
2795
2796 if ((mask & __GFP_WAIT) &&
2797 page->mapping->host->i_size > 16 * 1024 * 1024) {
2798 u64 len;
2799 while (start <= end) {
2800 len = end - start + 1;
2801 spin_lock(&map->lock);
2802 em = lookup_extent_mapping(map, start, len);
2803 if (!em || IS_ERR(em)) {
2804 spin_unlock(&map->lock);
2805 break;
2806 }
2807 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2808 em->start != start) {
2809 spin_unlock(&map->lock);
2810 free_extent_map(em);
2811 break;
2812 }
2813 if (!test_range_bit(tree, em->start,
2814 extent_map_end(em) - 1,
2815 EXTENT_LOCKED | EXTENT_WRITEBACK |
2816 EXTENT_ORDERED,
2817 0)) {
2818 remove_extent_mapping(map, em);
2819 /* once for the rb tree */
2820 free_extent_map(em);
2821 }
2822 start = extent_map_end(em);
2823 spin_unlock(&map->lock);
2824
2825 /* once for us */
2826 free_extent_map(em);
2827 }
2828 }
2829 return try_release_extent_state(map, tree, page, mask);
2830}
2831
2832sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2833 get_extent_t *get_extent)
2834{
2835 struct inode *inode = mapping->host;
2836 u64 start = iblock << inode->i_blkbits;
2837 sector_t sector = 0;
2838 size_t blksize = (1 << inode->i_blkbits);
2839 struct extent_map *em;
2840
2841 lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2842 GFP_NOFS);
2843 em = get_extent(inode, NULL, 0, start, blksize, 0);
2844 unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2845 GFP_NOFS);
2846 if (!em || IS_ERR(em))
2847 return 0;
2848
2849 if (em->block_start > EXTENT_MAP_LAST_BYTE)
2850 goto out;
2851
2852 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2853out:
2854 free_extent_map(em);
2855 return sector;
2856}
2857
2858static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2859 unsigned long i)
2860{
2861 struct page *p;
2862 struct address_space *mapping;
2863
2864 if (i == 0)
2865 return eb->first_page;
2866 i += eb->start >> PAGE_CACHE_SHIFT;
2867 mapping = eb->first_page->mapping;
2868 if (!mapping)
2869 return NULL;
2870
2871 /*
2872 * extent_buffer_page is only called after pinning the page
2873 * by increasing the reference count. So we know the page must
2874 * be in the radix tree.
2875 */
2876 rcu_read_lock();
2877 p = radix_tree_lookup(&mapping->page_tree, i);
2878 rcu_read_unlock();
2879
2880 return p;
2881}
2882
2883static inline unsigned long num_extent_pages(u64 start, u64 len)
2884{
2885 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2886 (start >> PAGE_CACHE_SHIFT);
2887}
2888
2889static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2890 u64 start,
2891 unsigned long len,
2892 gfp_t mask)
2893{
2894 struct extent_buffer *eb = NULL;
2895#ifdef LEAK_DEBUG
2896 unsigned long flags;
2897#endif
2898
2899 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2900 eb->start = start;
2901 eb->len = len;
2902 mutex_init(&eb->mutex);
2903#ifdef LEAK_DEBUG
2904 spin_lock_irqsave(&leak_lock, flags);
2905 list_add(&eb->leak_list, &buffers);
2906 spin_unlock_irqrestore(&leak_lock, flags);
2907#endif
2908 atomic_set(&eb->refs, 1);
2909
2910 return eb;
2911}
2912
2913static void __free_extent_buffer(struct extent_buffer *eb)
2914{
2915#ifdef LEAK_DEBUG
2916 unsigned long flags;
2917 spin_lock_irqsave(&leak_lock, flags);
2918 list_del(&eb->leak_list);
2919 spin_unlock_irqrestore(&leak_lock, flags);
2920#endif
2921 kmem_cache_free(extent_buffer_cache, eb);
2922}
2923
2924struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2925 u64 start, unsigned long len,
2926 struct page *page0,
2927 gfp_t mask)
2928{
2929 unsigned long num_pages = num_extent_pages(start, len);
2930 unsigned long i;
2931 unsigned long index = start >> PAGE_CACHE_SHIFT;
2932 struct extent_buffer *eb;
2933 struct extent_buffer *exists = NULL;
2934 struct page *p;
2935 struct address_space *mapping = tree->mapping;
2936 int uptodate = 1;
2937
2938 spin_lock(&tree->buffer_lock);
2939 eb = buffer_search(tree, start);
2940 if (eb) {
2941 atomic_inc(&eb->refs);
2942 spin_unlock(&tree->buffer_lock);
2943 mark_page_accessed(eb->first_page);
2944 return eb;
2945 }
2946 spin_unlock(&tree->buffer_lock);
2947
2948 eb = __alloc_extent_buffer(tree, start, len, mask);
2949 if (!eb)
2950 return NULL;
2951
2952 if (page0) {
2953 eb->first_page = page0;
2954 i = 1;
2955 index++;
2956 page_cache_get(page0);
2957 mark_page_accessed(page0);
2958 set_page_extent_mapped(page0);
2959 set_page_extent_head(page0, len);
2960 uptodate = PageUptodate(page0);
2961 } else {
2962 i = 0;
2963 }
2964 for (; i < num_pages; i++, index++) {
2965 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2966 if (!p) {
2967 WARN_ON(1);
2968 goto free_eb;
2969 }
2970 set_page_extent_mapped(p);
2971 mark_page_accessed(p);
2972 if (i == 0) {
2973 eb->first_page = p;
2974 set_page_extent_head(p, len);
2975 } else {
2976 set_page_private(p, EXTENT_PAGE_PRIVATE);
2977 }
2978 if (!PageUptodate(p))
2979 uptodate = 0;
2980 unlock_page(p);
2981 }
2982 if (uptodate)
2983 eb->flags |= EXTENT_UPTODATE;
2984 eb->flags |= EXTENT_BUFFER_FILLED;
2985
2986 spin_lock(&tree->buffer_lock);
2987 exists = buffer_tree_insert(tree, start, &eb->rb_node);
2988 if (exists) {
2989 /* add one reference for the caller */
2990 atomic_inc(&exists->refs);
2991 spin_unlock(&tree->buffer_lock);
2992 goto free_eb;
2993 }
2994 spin_unlock(&tree->buffer_lock);
2995
2996 /* add one reference for the tree */
2997 atomic_inc(&eb->refs);
2998 return eb;
2999
3000free_eb:
3001 if (!atomic_dec_and_test(&eb->refs))
3002 return exists;
3003 for (index = 1; index < i; index++)
3004 page_cache_release(extent_buffer_page(eb, index));
3005 page_cache_release(extent_buffer_page(eb, 0));
3006 __free_extent_buffer(eb);
3007 return exists;
3008}
3009
3010struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3011 u64 start, unsigned long len,
3012 gfp_t mask)
3013{
3014 struct extent_buffer *eb;
3015
3016 spin_lock(&tree->buffer_lock);
3017 eb = buffer_search(tree, start);
3018 if (eb)
3019 atomic_inc(&eb->refs);
3020 spin_unlock(&tree->buffer_lock);
3021
3022 if (eb)
3023 mark_page_accessed(eb->first_page);
3024
3025 return eb;
3026}
3027
3028void free_extent_buffer(struct extent_buffer *eb)
3029{
3030 if (!eb)
3031 return;
3032
3033 if (!atomic_dec_and_test(&eb->refs))
3034 return;
3035
3036 WARN_ON(1);
3037}
3038
3039int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3040 struct extent_buffer *eb)
3041{
3042 int set;
3043 unsigned long i;
3044 unsigned long num_pages;
3045 struct page *page;
3046
3047 u64 start = eb->start;
3048 u64 end = start + eb->len - 1;
3049
3050 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
3051 num_pages = num_extent_pages(eb->start, eb->len);
3052
3053 for (i = 0; i < num_pages; i++) {
3054 page = extent_buffer_page(eb, i);
3055 if (!set && !PageDirty(page))
3056 continue;
3057
3058 lock_page(page);
3059 if (i == 0)
3060 set_page_extent_head(page, eb->len);
3061 else
3062 set_page_private(page, EXTENT_PAGE_PRIVATE);
3063
3064 /*
3065 * if we're on the last page or the first page and the
3066 * block isn't aligned on a page boundary, do extra checks
3067 * to make sure we don't clean page that is partially dirty
3068 */
3069 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3070 ((i == num_pages - 1) &&
3071 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3072 start = (u64)page->index << PAGE_CACHE_SHIFT;
3073 end = start + PAGE_CACHE_SIZE - 1;
3074 if (test_range_bit(tree, start, end,
3075 EXTENT_DIRTY, 0)) {
3076 unlock_page(page);
3077 continue;
3078 }
3079 }
3080 clear_page_dirty_for_io(page);
3081 spin_lock_irq(&page->mapping->tree_lock);
3082 if (!PageDirty(page)) {
3083 radix_tree_tag_clear(&page->mapping->page_tree,
3084 page_index(page),
3085 PAGECACHE_TAG_DIRTY);
3086 }
3087 spin_unlock_irq(&page->mapping->tree_lock);
3088 unlock_page(page);
3089 }
3090 return 0;
3091}
3092
3093int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3094 struct extent_buffer *eb)
3095{
3096 return wait_on_extent_writeback(tree, eb->start,
3097 eb->start + eb->len - 1);
3098}
3099
3100int set_extent_buffer_dirty(struct extent_io_tree *tree,
3101 struct extent_buffer *eb)
3102{
3103 unsigned long i;
3104 unsigned long num_pages;
3105
3106 num_pages = num_extent_pages(eb->start, eb->len);
3107 for (i = 0; i < num_pages; i++) {
3108 struct page *page = extent_buffer_page(eb, i);
3109 /* writepage may need to do something special for the
3110 * first page, we have to make sure page->private is
3111 * properly set. releasepage may drop page->private
3112 * on us if the page isn't already dirty.
3113 */
3114 lock_page(page);
3115 if (i == 0) {
3116 set_page_extent_head(page, eb->len);
3117 } else if (PagePrivate(page) &&
3118 page->private != EXTENT_PAGE_PRIVATE) {
3119 set_page_extent_mapped(page);
3120 }
3121 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3122 set_extent_dirty(tree, page_offset(page),
3123 page_offset(page) + PAGE_CACHE_SIZE - 1,
3124 GFP_NOFS);
3125 unlock_page(page);
3126 }
3127 return 0;
3128}
3129
3130int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3131 struct extent_buffer *eb)
3132{
3133 unsigned long i;
3134 struct page *page;
3135 unsigned long num_pages;
3136
3137 num_pages = num_extent_pages(eb->start, eb->len);
3138 eb->flags &= ~EXTENT_UPTODATE;
3139
3140 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3141 GFP_NOFS);
3142 for (i = 0; i < num_pages; i++) {
3143 page = extent_buffer_page(eb, i);
3144 if (page)
3145 ClearPageUptodate(page);
3146 }
3147 return 0;
3148}
3149
3150int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3151 struct extent_buffer *eb)
3152{
3153 unsigned long i;
3154 struct page *page;
3155 unsigned long num_pages;
3156
3157 num_pages = num_extent_pages(eb->start, eb->len);
3158
3159 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3160 GFP_NOFS);
3161 for (i = 0; i < num_pages; i++) {
3162 page = extent_buffer_page(eb, i);
3163 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3164 ((i == num_pages - 1) &&
3165 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3166 check_page_uptodate(tree, page);
3167 continue;
3168 }
3169 SetPageUptodate(page);
3170 }
3171 return 0;
3172}
3173
3174int extent_range_uptodate(struct extent_io_tree *tree,
3175 u64 start, u64 end)
3176{
3177 struct page *page;
3178 int ret;
3179 int pg_uptodate = 1;
3180 int uptodate;
3181 unsigned long index;
3182
3183 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
3184 if (ret)
3185 return 1;
3186 while (start <= end) {
3187 index = start >> PAGE_CACHE_SHIFT;
3188 page = find_get_page(tree->mapping, index);
3189 uptodate = PageUptodate(page);
3190 page_cache_release(page);
3191 if (!uptodate) {
3192 pg_uptodate = 0;
3193 break;
3194 }
3195 start += PAGE_CACHE_SIZE;
3196 }
3197 return pg_uptodate;
3198}
3199
3200int extent_buffer_uptodate(struct extent_io_tree *tree,
3201 struct extent_buffer *eb)
3202{
3203 int ret = 0;
3204 unsigned long num_pages;
3205 unsigned long i;
3206 struct page *page;
3207 int pg_uptodate = 1;
3208
3209 if (eb->flags & EXTENT_UPTODATE)
3210 return 1;
3211
3212 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3213 EXTENT_UPTODATE, 1);
3214 if (ret)
3215 return ret;
3216
3217 num_pages = num_extent_pages(eb->start, eb->len);
3218 for (i = 0; i < num_pages; i++) {
3219 page = extent_buffer_page(eb, i);
3220 if (!PageUptodate(page)) {
3221 pg_uptodate = 0;
3222 break;
3223 }
3224 }
3225 return pg_uptodate;
3226}
3227
3228int read_extent_buffer_pages(struct extent_io_tree *tree,
3229 struct extent_buffer *eb,
3230 u64 start, int wait,
3231 get_extent_t *get_extent, int mirror_num)
3232{
3233 unsigned long i;
3234 unsigned long start_i;
3235 struct page *page;
3236 int err;
3237 int ret = 0;
3238 int locked_pages = 0;
3239 int all_uptodate = 1;
3240 int inc_all_pages = 0;
3241 unsigned long num_pages;
3242 struct bio *bio = NULL;
3243 unsigned long bio_flags = 0;
3244
3245 if (eb->flags & EXTENT_UPTODATE)
3246 return 0;
3247
3248 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3249 EXTENT_UPTODATE, 1)) {
3250 return 0;
3251 }
3252
3253 if (start) {
3254 WARN_ON(start < eb->start);
3255 start_i = (start >> PAGE_CACHE_SHIFT) -
3256 (eb->start >> PAGE_CACHE_SHIFT);
3257 } else {
3258 start_i = 0;
3259 }
3260
3261 num_pages = num_extent_pages(eb->start, eb->len);
3262 for (i = start_i; i < num_pages; i++) {
3263 page = extent_buffer_page(eb, i);
3264 if (!wait) {
3265 if (!trylock_page(page))
3266 goto unlock_exit;
3267 } else {
3268 lock_page(page);
3269 }
3270 locked_pages++;
3271 if (!PageUptodate(page))
3272 all_uptodate = 0;
3273 }
3274 if (all_uptodate) {
3275 if (start_i == 0)
3276 eb->flags |= EXTENT_UPTODATE;
3277 goto unlock_exit;
3278 }
3279
3280 for (i = start_i; i < num_pages; i++) {
3281 page = extent_buffer_page(eb, i);
3282 if (inc_all_pages)
3283 page_cache_get(page);
3284 if (!PageUptodate(page)) {
3285 if (start_i == 0)
3286 inc_all_pages = 1;
3287 ClearPageError(page);
3288 err = __extent_read_full_page(tree, page,
3289 get_extent, &bio,
3290 mirror_num, &bio_flags);
3291 if (err)
3292 ret = err;
3293 } else {
3294 unlock_page(page);
3295 }
3296 }
3297
3298 if (bio)
3299 submit_one_bio(READ, bio, mirror_num, bio_flags);
3300
3301 if (ret || !wait)
3302 return ret;
3303
3304 for (i = start_i; i < num_pages; i++) {
3305 page = extent_buffer_page(eb, i);
3306 wait_on_page_locked(page);
3307 if (!PageUptodate(page))
3308 ret = -EIO;
3309 }
3310
3311 if (!ret)
3312 eb->flags |= EXTENT_UPTODATE;
3313 return ret;
3314
3315unlock_exit:
3316 i = start_i;
3317 while (locked_pages > 0) {
3318 page = extent_buffer_page(eb, i);
3319 i++;
3320 unlock_page(page);
3321 locked_pages--;
3322 }
3323 return ret;
3324}
3325
3326void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3327 unsigned long start,
3328 unsigned long len)
3329{
3330 size_t cur;
3331 size_t offset;
3332 struct page *page;
3333 char *kaddr;
3334 char *dst = (char *)dstv;
3335 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3336 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3337
3338 WARN_ON(start > eb->len);
3339 WARN_ON(start + len > eb->start + eb->len);
3340
3341 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3342
3343 while (len > 0) {
3344 page = extent_buffer_page(eb, i);
3345
3346 cur = min(len, (PAGE_CACHE_SIZE - offset));
3347 kaddr = kmap_atomic(page, KM_USER1);
3348 memcpy(dst, kaddr + offset, cur);
3349 kunmap_atomic(kaddr, KM_USER1);
3350
3351 dst += cur;
3352 len -= cur;
3353 offset = 0;
3354 i++;
3355 }
3356}
3357
3358int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3359 unsigned long min_len, char **token, char **map,
3360 unsigned long *map_start,
3361 unsigned long *map_len, int km)
3362{
3363 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3364 char *kaddr;
3365 struct page *p;
3366 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3367 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3368 unsigned long end_i = (start_offset + start + min_len - 1) >>
3369 PAGE_CACHE_SHIFT;
3370
3371 if (i != end_i)
3372 return -EINVAL;
3373
3374 if (i == 0) {
3375 offset = start_offset;
3376 *map_start = 0;
3377 } else {
3378 offset = 0;
3379 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3380 }
3381
3382 if (start + min_len > eb->len) {
3383 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
3384 "wanted %lu %lu\n", (unsigned long long)eb->start,
3385 eb->len, start, min_len);
3386 WARN_ON(1);
3387 }
3388
3389 p = extent_buffer_page(eb, i);
3390 kaddr = kmap_atomic(p, km);
3391 *token = kaddr;
3392 *map = kaddr + offset;
3393 *map_len = PAGE_CACHE_SIZE - offset;
3394 return 0;
3395}
3396
3397int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3398 unsigned long min_len,
3399 char **token, char **map,
3400 unsigned long *map_start,
3401 unsigned long *map_len, int km)
3402{
3403 int err;
3404 int save = 0;
3405 if (eb->map_token) {
3406 unmap_extent_buffer(eb, eb->map_token, km);
3407 eb->map_token = NULL;
3408 save = 1;
3409 WARN_ON(!mutex_is_locked(&eb->mutex));
3410 }
3411 err = map_private_extent_buffer(eb, start, min_len, token, map,
3412 map_start, map_len, km);
3413 if (!err && save) {
3414 eb->map_token = *token;
3415 eb->kaddr = *map;
3416 eb->map_start = *map_start;
3417 eb->map_len = *map_len;
3418 }
3419 return err;
3420}
3421
3422void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3423{
3424 kunmap_atomic(token, km);
3425}
3426
3427int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3428 unsigned long start,
3429 unsigned long len)
3430{
3431 size_t cur;
3432 size_t offset;
3433 struct page *page;
3434 char *kaddr;
3435 char *ptr = (char *)ptrv;
3436 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3437 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3438 int ret = 0;
3439
3440 WARN_ON(start > eb->len);
3441 WARN_ON(start + len > eb->start + eb->len);
3442
3443 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3444
3445 while (len > 0) {
3446 page = extent_buffer_page(eb, i);
3447
3448 cur = min(len, (PAGE_CACHE_SIZE - offset));
3449
3450 kaddr = kmap_atomic(page, KM_USER0);
3451 ret = memcmp(ptr, kaddr + offset, cur);
3452 kunmap_atomic(kaddr, KM_USER0);
3453 if (ret)
3454 break;
3455
3456 ptr += cur;
3457 len -= cur;
3458 offset = 0;
3459 i++;
3460 }
3461 return ret;
3462}
3463
3464void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3465 unsigned long start, unsigned long len)
3466{
3467 size_t cur;
3468 size_t offset;
3469 struct page *page;
3470 char *kaddr;
3471 char *src = (char *)srcv;
3472 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3473 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3474
3475 WARN_ON(start > eb->len);
3476 WARN_ON(start + len > eb->start + eb->len);
3477
3478 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3479
3480 while (len > 0) {
3481 page = extent_buffer_page(eb, i);
3482 WARN_ON(!PageUptodate(page));
3483
3484 cur = min(len, PAGE_CACHE_SIZE - offset);
3485 kaddr = kmap_atomic(page, KM_USER1);
3486 memcpy(kaddr + offset, src, cur);
3487 kunmap_atomic(kaddr, KM_USER1);
3488
3489 src += cur;
3490 len -= cur;
3491 offset = 0;
3492 i++;
3493 }
3494}
3495
3496void memset_extent_buffer(struct extent_buffer *eb, char c,
3497 unsigned long start, unsigned long len)
3498{
3499 size_t cur;
3500 size_t offset;
3501 struct page *page;
3502 char *kaddr;
3503 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3504 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3505
3506 WARN_ON(start > eb->len);
3507 WARN_ON(start + len > eb->start + eb->len);
3508
3509 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3510
3511 while (len > 0) {
3512 page = extent_buffer_page(eb, i);
3513 WARN_ON(!PageUptodate(page));
3514
3515 cur = min(len, PAGE_CACHE_SIZE - offset);
3516 kaddr = kmap_atomic(page, KM_USER0);
3517 memset(kaddr + offset, c, cur);
3518 kunmap_atomic(kaddr, KM_USER0);
3519
3520 len -= cur;
3521 offset = 0;
3522 i++;
3523 }
3524}
3525
3526void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3527 unsigned long dst_offset, unsigned long src_offset,
3528 unsigned long len)
3529{
3530 u64 dst_len = dst->len;
3531 size_t cur;
3532 size_t offset;
3533 struct page *page;
3534 char *kaddr;
3535 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3536 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3537
3538 WARN_ON(src->len != dst_len);
3539
3540 offset = (start_offset + dst_offset) &
3541 ((unsigned long)PAGE_CACHE_SIZE - 1);
3542
3543 while (len > 0) {
3544 page = extent_buffer_page(dst, i);
3545 WARN_ON(!PageUptodate(page));
3546
3547 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3548
3549 kaddr = kmap_atomic(page, KM_USER0);
3550 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3551 kunmap_atomic(kaddr, KM_USER0);
3552
3553 src_offset += cur;
3554 len -= cur;
3555 offset = 0;
3556 i++;
3557 }
3558}
3559
3560static void move_pages(struct page *dst_page, struct page *src_page,
3561 unsigned long dst_off, unsigned long src_off,
3562 unsigned long len)
3563{
3564 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3565 if (dst_page == src_page) {
3566 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3567 } else {
3568 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3569 char *p = dst_kaddr + dst_off + len;
3570 char *s = src_kaddr + src_off + len;
3571
3572 while (len--)
3573 *--p = *--s;
3574
3575 kunmap_atomic(src_kaddr, KM_USER1);
3576 }
3577 kunmap_atomic(dst_kaddr, KM_USER0);
3578}
3579
3580static void copy_pages(struct page *dst_page, struct page *src_page,
3581 unsigned long dst_off, unsigned long src_off,
3582 unsigned long len)
3583{
3584 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3585 char *src_kaddr;
3586
3587 if (dst_page != src_page)
3588 src_kaddr = kmap_atomic(src_page, KM_USER1);
3589 else
3590 src_kaddr = dst_kaddr;
3591
3592 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3593 kunmap_atomic(dst_kaddr, KM_USER0);
3594 if (dst_page != src_page)
3595 kunmap_atomic(src_kaddr, KM_USER1);
3596}
3597
3598void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3599 unsigned long src_offset, unsigned long len)
3600{
3601 size_t cur;
3602 size_t dst_off_in_page;
3603 size_t src_off_in_page;
3604 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3605 unsigned long dst_i;
3606 unsigned long src_i;
3607
3608 if (src_offset + len > dst->len) {
3609 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3610 "len %lu dst len %lu\n", src_offset, len, dst->len);
3611 BUG_ON(1);
3612 }
3613 if (dst_offset + len > dst->len) {
3614 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3615 "len %lu dst len %lu\n", dst_offset, len, dst->len);
3616 BUG_ON(1);
3617 }
3618
3619 while (len > 0) {
3620 dst_off_in_page = (start_offset + dst_offset) &
3621 ((unsigned long)PAGE_CACHE_SIZE - 1);
3622 src_off_in_page = (start_offset + src_offset) &
3623 ((unsigned long)PAGE_CACHE_SIZE - 1);
3624
3625 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3626 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3627
3628 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3629 src_off_in_page));
3630 cur = min_t(unsigned long, cur,
3631 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3632
3633 copy_pages(extent_buffer_page(dst, dst_i),
3634 extent_buffer_page(dst, src_i),
3635 dst_off_in_page, src_off_in_page, cur);
3636
3637 src_offset += cur;
3638 dst_offset += cur;
3639 len -= cur;
3640 }
3641}
3642
3643void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3644 unsigned long src_offset, unsigned long len)
3645{
3646 size_t cur;
3647 size_t dst_off_in_page;
3648 size_t src_off_in_page;
3649 unsigned long dst_end = dst_offset + len - 1;
3650 unsigned long src_end = src_offset + len - 1;
3651 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3652 unsigned long dst_i;
3653 unsigned long src_i;
3654
3655 if (src_offset + len > dst->len) {
3656 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3657 "len %lu len %lu\n", src_offset, len, dst->len);
3658 BUG_ON(1);
3659 }
3660 if (dst_offset + len > dst->len) {
3661 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3662 "len %lu len %lu\n", dst_offset, len, dst->len);
3663 BUG_ON(1);
3664 }
3665 if (dst_offset < src_offset) {
3666 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3667 return;
3668 }
3669 while (len > 0) {
3670 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3671 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3672
3673 dst_off_in_page = (start_offset + dst_end) &
3674 ((unsigned long)PAGE_CACHE_SIZE - 1);
3675 src_off_in_page = (start_offset + src_end) &
3676 ((unsigned long)PAGE_CACHE_SIZE - 1);
3677
3678 cur = min_t(unsigned long, len, src_off_in_page + 1);
3679 cur = min(cur, dst_off_in_page + 1);
3680 move_pages(extent_buffer_page(dst, dst_i),
3681 extent_buffer_page(dst, src_i),
3682 dst_off_in_page - cur + 1,
3683 src_off_in_page - cur + 1, cur);
3684
3685 dst_end -= cur;
3686 src_end -= cur;
3687 len -= cur;
3688 }
3689}
3690
3691int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3692{
3693 u64 start = page_offset(page);
3694 struct extent_buffer *eb;
3695 int ret = 1;
3696 unsigned long i;
3697 unsigned long num_pages;
3698
3699 spin_lock(&tree->buffer_lock);
3700 eb = buffer_search(tree, start);
3701 if (!eb)
3702 goto out;
3703
3704 if (atomic_read(&eb->refs) > 1) {
3705 ret = 0;
3706 goto out;
3707 }
3708 /* at this point we can safely release the extent buffer */
3709 num_pages = num_extent_pages(eb->start, eb->len);
3710 for (i = 0; i < num_pages; i++)
3711 page_cache_release(extent_buffer_page(eb, i));
3712 rb_erase(&eb->rb_node, &tree->buffer);
3713 __free_extent_buffer(eb);
3714out:
3715 spin_unlock(&tree->buffer_lock);
3716 return ret;
3717}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
new file mode 100644
index 000000000000..c5b483a79137
--- /dev/null
+++ b/fs/btrfs/extent_io.h
@@ -0,0 +1,269 @@
1#ifndef __EXTENTIO__
2#define __EXTENTIO__
3
4#include <linux/rbtree.h>
5
6/* bits for the extent state */
7#define EXTENT_DIRTY 1
8#define EXTENT_WRITEBACK (1 << 1)
9#define EXTENT_UPTODATE (1 << 2)
10#define EXTENT_LOCKED (1 << 3)
11#define EXTENT_NEW (1 << 4)
12#define EXTENT_DELALLOC (1 << 5)
13#define EXTENT_DEFRAG (1 << 6)
14#define EXTENT_DEFRAG_DONE (1 << 7)
15#define EXTENT_BUFFER_FILLED (1 << 8)
16#define EXTENT_ORDERED (1 << 9)
17#define EXTENT_ORDERED_METADATA (1 << 10)
18#define EXTENT_BOUNDARY (1 << 11)
19#define EXTENT_NODATASUM (1 << 12)
20#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
21
22/* flags for bio submission */
23#define EXTENT_BIO_COMPRESSED 1
24
25/*
26 * page->private values. Every page that is controlled by the extent
27 * map has page->private set to one.
28 */
29#define EXTENT_PAGE_PRIVATE 1
30#define EXTENT_PAGE_PRIVATE_FIRST_PAGE 3
31
32struct extent_state;
33
34typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
35 struct bio *bio, int mirror_num,
36 unsigned long bio_flags);
37struct extent_io_ops {
38 int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
39 u64 start, u64 end, int *page_started,
40 unsigned long *nr_written);
41 int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
42 int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
43 extent_submit_bio_hook_t *submit_bio_hook;
44 int (*merge_bio_hook)(struct page *page, unsigned long offset,
45 size_t size, struct bio *bio,
46 unsigned long bio_flags);
47 int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
48 int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
49 u64 start, u64 end,
50 struct extent_state *state);
51 int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
52 u64 start, u64 end,
53 struct extent_state *state);
54 int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
55 struct extent_state *state);
56 int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
57 struct extent_state *state, int uptodate);
58 int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
59 unsigned long old, unsigned long bits);
60 int (*clear_bit_hook)(struct inode *inode, u64 start, u64 end,
61 unsigned long old, unsigned long bits);
62 int (*write_cache_pages_lock_hook)(struct page *page);
63};
64
65struct extent_io_tree {
66 struct rb_root state;
67 struct rb_root buffer;
68 struct address_space *mapping;
69 u64 dirty_bytes;
70 spinlock_t lock;
71 spinlock_t buffer_lock;
72 struct extent_io_ops *ops;
73};
74
75struct extent_state {
76 u64 start;
77 u64 end; /* inclusive */
78 struct rb_node rb_node;
79 struct extent_io_tree *tree;
80 wait_queue_head_t wq;
81 atomic_t refs;
82 unsigned long state;
83
84 /* for use by the FS */
85 u64 private;
86
87 struct list_head leak_list;
88};
89
90struct extent_buffer {
91 u64 start;
92 unsigned long len;
93 char *map_token;
94 char *kaddr;
95 unsigned long map_start;
96 unsigned long map_len;
97 struct page *first_page;
98 atomic_t refs;
99 int flags;
100 struct list_head leak_list;
101 struct rb_node rb_node;
102 struct mutex mutex;
103};
104
105struct extent_map_tree;
106
107static inline struct extent_state *extent_state_next(struct extent_state *state)
108{
109 struct rb_node *node;
110 node = rb_next(&state->rb_node);
111 if (!node)
112 return NULL;
113 return rb_entry(node, struct extent_state, rb_node);
114}
115
116typedef struct extent_map *(get_extent_t)(struct inode *inode,
117 struct page *page,
118 size_t page_offset,
119 u64 start, u64 len,
120 int create);
121
122void extent_io_tree_init(struct extent_io_tree *tree,
123 struct address_space *mapping, gfp_t mask);
124int try_release_extent_mapping(struct extent_map_tree *map,
125 struct extent_io_tree *tree, struct page *page,
126 gfp_t mask);
127int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page);
128int try_release_extent_state(struct extent_map_tree *map,
129 struct extent_io_tree *tree, struct page *page,
130 gfp_t mask);
131int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
132int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
133int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
134 gfp_t mask);
135int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
136 get_extent_t *get_extent);
137int __init extent_io_init(void);
138void extent_io_exit(void);
139
140u64 count_range_bits(struct extent_io_tree *tree,
141 u64 *start, u64 search_end,
142 u64 max_bytes, unsigned long bits);
143
144int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
145 int bits, int filled);
146int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
147 int bits, gfp_t mask);
148int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
149 int bits, int wake, int delete, gfp_t mask);
150int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
151 int bits, gfp_t mask);
152int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
153 gfp_t mask);
154int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
155 gfp_t mask);
156int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
157 gfp_t mask);
158int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
159 gfp_t mask);
160int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
161 gfp_t mask);
162int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start,
163 u64 end, gfp_t mask);
164int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
165 gfp_t mask);
166int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
167 gfp_t mask);
168int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
169 u64 *start_ret, u64 *end_ret, int bits);
170struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
171 u64 start, int bits);
172int extent_invalidatepage(struct extent_io_tree *tree,
173 struct page *page, unsigned long offset);
174int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
175 get_extent_t *get_extent,
176 struct writeback_control *wbc);
177int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
178 u64 start, u64 end, get_extent_t *get_extent,
179 int mode);
180int extent_writepages(struct extent_io_tree *tree,
181 struct address_space *mapping,
182 get_extent_t *get_extent,
183 struct writeback_control *wbc);
184int extent_readpages(struct extent_io_tree *tree,
185 struct address_space *mapping,
186 struct list_head *pages, unsigned nr_pages,
187 get_extent_t get_extent);
188int extent_prepare_write(struct extent_io_tree *tree,
189 struct inode *inode, struct page *page,
190 unsigned from, unsigned to, get_extent_t *get_extent);
191int extent_commit_write(struct extent_io_tree *tree,
192 struct inode *inode, struct page *page,
193 unsigned from, unsigned to);
194sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
195 get_extent_t *get_extent);
196int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end);
197int set_state_private(struct extent_io_tree *tree, u64 start, u64 private);
198int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
199void set_page_extent_mapped(struct page *page);
200
201struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
202 u64 start, unsigned long len,
203 struct page *page0,
204 gfp_t mask);
205struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
206 u64 start, unsigned long len,
207 gfp_t mask);
208void free_extent_buffer(struct extent_buffer *eb);
209int read_extent_buffer_pages(struct extent_io_tree *tree,
210 struct extent_buffer *eb, u64 start, int wait,
211 get_extent_t *get_extent, int mirror_num);
212
213static inline void extent_buffer_get(struct extent_buffer *eb)
214{
215 atomic_inc(&eb->refs);
216}
217
218int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
219 unsigned long start,
220 unsigned long len);
221void read_extent_buffer(struct extent_buffer *eb, void *dst,
222 unsigned long start,
223 unsigned long len);
224void write_extent_buffer(struct extent_buffer *eb, const void *src,
225 unsigned long start, unsigned long len);
226void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
227 unsigned long dst_offset, unsigned long src_offset,
228 unsigned long len);
229void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
230 unsigned long src_offset, unsigned long len);
231void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
232 unsigned long src_offset, unsigned long len);
233void memset_extent_buffer(struct extent_buffer *eb, char c,
234 unsigned long start, unsigned long len);
235int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
236 struct extent_buffer *eb);
237int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end);
238int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
239int clear_extent_buffer_dirty(struct extent_io_tree *tree,
240 struct extent_buffer *eb);
241int set_extent_buffer_dirty(struct extent_io_tree *tree,
242 struct extent_buffer *eb);
243int set_extent_buffer_uptodate(struct extent_io_tree *tree,
244 struct extent_buffer *eb);
245int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
246 struct extent_buffer *eb);
247int extent_buffer_uptodate(struct extent_io_tree *tree,
248 struct extent_buffer *eb);
249int map_extent_buffer(struct extent_buffer *eb, unsigned long offset,
250 unsigned long min_len, char **token, char **map,
251 unsigned long *map_start,
252 unsigned long *map_len, int km);
253int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
254 unsigned long min_len, char **token, char **map,
255 unsigned long *map_start,
256 unsigned long *map_len, int km);
257void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km);
258int release_extent_buffer_tail_pages(struct extent_buffer *eb);
259int extent_range_uptodate(struct extent_io_tree *tree,
260 u64 start, u64 end);
261int extent_clear_unlock_delalloc(struct inode *inode,
262 struct extent_io_tree *tree,
263 u64 start, u64 end, struct page *locked_page,
264 int unlock_page,
265 int clear_unlock,
266 int clear_delalloc, int clear_dirty,
267 int set_writeback,
268 int end_writeback);
269#endif
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
new file mode 100644
index 000000000000..4a83e33ada32
--- /dev/null
+++ b/fs/btrfs/extent_map.c
@@ -0,0 +1,351 @@
1#include <linux/err.h>
2#include <linux/gfp.h>
3#include <linux/slab.h>
4#include <linux/module.h>
5#include <linux/spinlock.h>
6#include <linux/version.h>
7#include <linux/hardirq.h>
8#include "extent_map.h"
9
10/* temporary define until extent_map moves out of btrfs */
11struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
12 unsigned long extra_flags,
13 void (*ctor)(void *, struct kmem_cache *,
14 unsigned long));
15
16static struct kmem_cache *extent_map_cache;
17
18int __init extent_map_init(void)
19{
20 extent_map_cache = btrfs_cache_create("extent_map",
21 sizeof(struct extent_map), 0,
22 NULL);
23 if (!extent_map_cache)
24 return -ENOMEM;
25 return 0;
26}
27
28void extent_map_exit(void)
29{
30 if (extent_map_cache)
31 kmem_cache_destroy(extent_map_cache);
32}
33
34/**
35 * extent_map_tree_init - initialize extent map tree
36 * @tree: tree to initialize
37 * @mask: flags for memory allocations during tree operations
38 *
39 * Initialize the extent tree @tree. Should be called for each new inode
40 * or other user of the extent_map interface.
41 */
42void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
43{
44 tree->map.rb_node = NULL;
45 spin_lock_init(&tree->lock);
46}
47EXPORT_SYMBOL(extent_map_tree_init);
48
49/**
50 * alloc_extent_map - allocate new extent map structure
51 * @mask: memory allocation flags
52 *
53 * Allocate a new extent_map structure. The new structure is
54 * returned with a reference count of one and needs to be
55 * freed using free_extent_map()
56 */
57struct extent_map *alloc_extent_map(gfp_t mask)
58{
59 struct extent_map *em;
60 em = kmem_cache_alloc(extent_map_cache, mask);
61 if (!em || IS_ERR(em))
62 return em;
63 em->in_tree = 0;
64 em->flags = 0;
65 atomic_set(&em->refs, 1);
66 return em;
67}
68EXPORT_SYMBOL(alloc_extent_map);
69
70/**
71 * free_extent_map - drop reference count of an extent_map
72 * @em: extent map beeing releasead
73 *
74 * Drops the reference out on @em by one and free the structure
75 * if the reference count hits zero.
76 */
77void free_extent_map(struct extent_map *em)
78{
79 if (!em)
80 return;
81 WARN_ON(atomic_read(&em->refs) == 0);
82 if (atomic_dec_and_test(&em->refs)) {
83 WARN_ON(em->in_tree);
84 kmem_cache_free(extent_map_cache, em);
85 }
86}
87EXPORT_SYMBOL(free_extent_map);
88
89static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
90 struct rb_node *node)
91{
92 struct rb_node **p = &root->rb_node;
93 struct rb_node *parent = NULL;
94 struct extent_map *entry;
95
96 while (*p) {
97 parent = *p;
98 entry = rb_entry(parent, struct extent_map, rb_node);
99
100 WARN_ON(!entry->in_tree);
101
102 if (offset < entry->start)
103 p = &(*p)->rb_left;
104 else if (offset >= extent_map_end(entry))
105 p = &(*p)->rb_right;
106 else
107 return parent;
108 }
109
110 entry = rb_entry(node, struct extent_map, rb_node);
111 entry->in_tree = 1;
112 rb_link_node(node, parent, p);
113 rb_insert_color(node, root);
114 return NULL;
115}
116
117/*
118 * search through the tree for an extent_map with a given offset. If
119 * it can't be found, try to find some neighboring extents
120 */
121static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
122 struct rb_node **prev_ret,
123 struct rb_node **next_ret)
124{
125 struct rb_node *n = root->rb_node;
126 struct rb_node *prev = NULL;
127 struct rb_node *orig_prev = NULL;
128 struct extent_map *entry;
129 struct extent_map *prev_entry = NULL;
130
131 while (n) {
132 entry = rb_entry(n, struct extent_map, rb_node);
133 prev = n;
134 prev_entry = entry;
135
136 WARN_ON(!entry->in_tree);
137
138 if (offset < entry->start)
139 n = n->rb_left;
140 else if (offset >= extent_map_end(entry))
141 n = n->rb_right;
142 else
143 return n;
144 }
145
146 if (prev_ret) {
147 orig_prev = prev;
148 while (prev && offset >= extent_map_end(prev_entry)) {
149 prev = rb_next(prev);
150 prev_entry = rb_entry(prev, struct extent_map, rb_node);
151 }
152 *prev_ret = prev;
153 prev = orig_prev;
154 }
155
156 if (next_ret) {
157 prev_entry = rb_entry(prev, struct extent_map, rb_node);
158 while (prev && offset < prev_entry->start) {
159 prev = rb_prev(prev);
160 prev_entry = rb_entry(prev, struct extent_map, rb_node);
161 }
162 *next_ret = prev;
163 }
164 return NULL;
165}
166
167/*
168 * look for an offset in the tree, and if it can't be found, return
169 * the first offset we can find smaller than 'offset'.
170 */
171static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
172{
173 struct rb_node *prev;
174 struct rb_node *ret;
175 ret = __tree_search(root, offset, &prev, NULL);
176 if (!ret)
177 return prev;
178 return ret;
179}
180
181/* check to see if two extent_map structs are adjacent and safe to merge */
182static int mergable_maps(struct extent_map *prev, struct extent_map *next)
183{
184 if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
185 return 0;
186
187 /*
188 * don't merge compressed extents, we need to know their
189 * actual size
190 */
191 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
192 return 0;
193
194 if (extent_map_end(prev) == next->start &&
195 prev->flags == next->flags &&
196 prev->bdev == next->bdev &&
197 ((next->block_start == EXTENT_MAP_HOLE &&
198 prev->block_start == EXTENT_MAP_HOLE) ||
199 (next->block_start == EXTENT_MAP_INLINE &&
200 prev->block_start == EXTENT_MAP_INLINE) ||
201 (next->block_start == EXTENT_MAP_DELALLOC &&
202 prev->block_start == EXTENT_MAP_DELALLOC) ||
203 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
204 next->block_start == extent_map_block_end(prev)))) {
205 return 1;
206 }
207 return 0;
208}
209
210/**
211 * add_extent_mapping - add new extent map to the extent tree
212 * @tree: tree to insert new map in
213 * @em: map to insert
214 *
215 * Insert @em into @tree or perform a simple forward/backward merge with
216 * existing mappings. The extent_map struct passed in will be inserted
217 * into the tree directly, with an additional reference taken, or a
218 * reference dropped if the merge attempt was sucessfull.
219 */
220int add_extent_mapping(struct extent_map_tree *tree,
221 struct extent_map *em)
222{
223 int ret = 0;
224 struct extent_map *merge = NULL;
225 struct rb_node *rb;
226 struct extent_map *exist;
227
228 exist = lookup_extent_mapping(tree, em->start, em->len);
229 if (exist) {
230 free_extent_map(exist);
231 ret = -EEXIST;
232 goto out;
233 }
234 assert_spin_locked(&tree->lock);
235 rb = tree_insert(&tree->map, em->start, &em->rb_node);
236 if (rb) {
237 ret = -EEXIST;
238 free_extent_map(merge);
239 goto out;
240 }
241 atomic_inc(&em->refs);
242 if (em->start != 0) {
243 rb = rb_prev(&em->rb_node);
244 if (rb)
245 merge = rb_entry(rb, struct extent_map, rb_node);
246 if (rb && mergable_maps(merge, em)) {
247 em->start = merge->start;
248 em->len += merge->len;
249 em->block_len += merge->block_len;
250 em->block_start = merge->block_start;
251 merge->in_tree = 0;
252 rb_erase(&merge->rb_node, &tree->map);
253 free_extent_map(merge);
254 }
255 }
256 rb = rb_next(&em->rb_node);
257 if (rb)
258 merge = rb_entry(rb, struct extent_map, rb_node);
259 if (rb && mergable_maps(em, merge)) {
260 em->len += merge->len;
261 em->block_len += merge->len;
262 rb_erase(&merge->rb_node, &tree->map);
263 merge->in_tree = 0;
264 free_extent_map(merge);
265 }
266out:
267 return ret;
268}
269EXPORT_SYMBOL(add_extent_mapping);
270
271/* simple helper to do math around the end of an extent, handling wrap */
272static u64 range_end(u64 start, u64 len)
273{
274 if (start + len < start)
275 return (u64)-1;
276 return start + len;
277}
278
279/**
280 * lookup_extent_mapping - lookup extent_map
281 * @tree: tree to lookup in
282 * @start: byte offset to start the search
283 * @len: length of the lookup range
284 *
285 * Find and return the first extent_map struct in @tree that intersects the
286 * [start, len] range. There may be additional objects in the tree that
287 * intersect, so check the object returned carefully to make sure that no
288 * additional lookups are needed.
289 */
290struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
291 u64 start, u64 len)
292{
293 struct extent_map *em;
294 struct rb_node *rb_node;
295 struct rb_node *prev = NULL;
296 struct rb_node *next = NULL;
297 u64 end = range_end(start, len);
298
299 assert_spin_locked(&tree->lock);
300 rb_node = __tree_search(&tree->map, start, &prev, &next);
301 if (!rb_node && prev) {
302 em = rb_entry(prev, struct extent_map, rb_node);
303 if (end > em->start && start < extent_map_end(em))
304 goto found;
305 }
306 if (!rb_node && next) {
307 em = rb_entry(next, struct extent_map, rb_node);
308 if (end > em->start && start < extent_map_end(em))
309 goto found;
310 }
311 if (!rb_node) {
312 em = NULL;
313 goto out;
314 }
315 if (IS_ERR(rb_node)) {
316 em = ERR_PTR(PTR_ERR(rb_node));
317 goto out;
318 }
319 em = rb_entry(rb_node, struct extent_map, rb_node);
320 if (end > em->start && start < extent_map_end(em))
321 goto found;
322
323 em = NULL;
324 goto out;
325
326found:
327 atomic_inc(&em->refs);
328out:
329 return em;
330}
331EXPORT_SYMBOL(lookup_extent_mapping);
332
333/**
334 * remove_extent_mapping - removes an extent_map from the extent tree
335 * @tree: extent tree to remove from
336 * @em: extent map beeing removed
337 *
338 * Removes @em from @tree. No reference counts are dropped, and no checks
339 * are done to see if the range is in use
340 */
341int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
342{
343 int ret = 0;
344
345 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
346 assert_spin_locked(&tree->lock);
347 rb_erase(&em->rb_node, &tree->map);
348 em->in_tree = 0;
349 return ret;
350}
351EXPORT_SYMBOL(remove_extent_mapping);
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
new file mode 100644
index 000000000000..fb6eeef06bb0
--- /dev/null
+++ b/fs/btrfs/extent_map.h
@@ -0,0 +1,62 @@
1#ifndef __EXTENTMAP__
2#define __EXTENTMAP__
3
4#include <linux/rbtree.h>
5
6#define EXTENT_MAP_LAST_BYTE (u64)-4
7#define EXTENT_MAP_HOLE (u64)-3
8#define EXTENT_MAP_INLINE (u64)-2
9#define EXTENT_MAP_DELALLOC (u64)-1
10
11/* bits for the flags field */
12#define EXTENT_FLAG_PINNED 0 /* this entry not yet on disk, don't free it */
13#define EXTENT_FLAG_COMPRESSED 1
14#define EXTENT_FLAG_VACANCY 2 /* no file extent item found */
15#define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */
16
17struct extent_map {
18 struct rb_node rb_node;
19
20 /* all of these are in bytes */
21 u64 start;
22 u64 len;
23 u64 orig_start;
24 u64 block_start;
25 u64 block_len;
26 unsigned long flags;
27 struct block_device *bdev;
28 atomic_t refs;
29 int in_tree;
30};
31
32struct extent_map_tree {
33 struct rb_root map;
34 spinlock_t lock;
35};
36
37static inline u64 extent_map_end(struct extent_map *em)
38{
39 if (em->start + em->len < em->start)
40 return (u64)-1;
41 return em->start + em->len;
42}
43
44static inline u64 extent_map_block_end(struct extent_map *em)
45{
46 if (em->block_start + em->block_len < em->block_start)
47 return (u64)-1;
48 return em->block_start + em->block_len;
49}
50
51void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask);
52struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
53 u64 start, u64 len);
54int add_extent_mapping(struct extent_map_tree *tree,
55 struct extent_map *em);
56int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
57
58struct extent_map *alloc_extent_map(gfp_t mask);
59void free_extent_map(struct extent_map *em);
60int __init extent_map_init(void);
61void extent_map_exit(void);
62#endif
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
new file mode 100644
index 000000000000..964652435fd1
--- /dev/null
+++ b/fs/btrfs/file-item.c
@@ -0,0 +1,831 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/bio.h>
20#include <linux/pagemap.h>
21#include <linux/highmem.h>
22#include "ctree.h"
23#include "disk-io.h"
24#include "transaction.h"
25#include "print-tree.h"
26
27#define MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \
28 sizeof(struct btrfs_item) * 2) / \
29 size) - 1))
30
31#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
32 sizeof(struct btrfs_ordered_sum)) / \
33 sizeof(struct btrfs_sector_sum) * \
34 (r)->sectorsize - (r)->sectorsize)
35
36int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 u64 objectid, u64 pos,
39 u64 disk_offset, u64 disk_num_bytes,
40 u64 num_bytes, u64 offset, u64 ram_bytes,
41 u8 compression, u8 encryption, u16 other_encoding)
42{
43 int ret = 0;
44 struct btrfs_file_extent_item *item;
45 struct btrfs_key file_key;
46 struct btrfs_path *path;
47 struct extent_buffer *leaf;
48
49 path = btrfs_alloc_path();
50 BUG_ON(!path);
51 file_key.objectid = objectid;
52 file_key.offset = pos;
53 btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY);
54
55 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
56 sizeof(*item));
57 if (ret < 0)
58 goto out;
59 BUG_ON(ret);
60 leaf = path->nodes[0];
61 item = btrfs_item_ptr(leaf, path->slots[0],
62 struct btrfs_file_extent_item);
63 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
64 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
65 btrfs_set_file_extent_offset(leaf, item, offset);
66 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
67 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
68 btrfs_set_file_extent_generation(leaf, item, trans->transid);
69 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
70 btrfs_set_file_extent_compression(leaf, item, compression);
71 btrfs_set_file_extent_encryption(leaf, item, encryption);
72 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
73
74 btrfs_mark_buffer_dirty(leaf);
75out:
76 btrfs_free_path(path);
77 return ret;
78}
79
80struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
81 struct btrfs_root *root,
82 struct btrfs_path *path,
83 u64 bytenr, int cow)
84{
85 int ret;
86 struct btrfs_key file_key;
87 struct btrfs_key found_key;
88 struct btrfs_csum_item *item;
89 struct extent_buffer *leaf;
90 u64 csum_offset = 0;
91 u16 csum_size =
92 btrfs_super_csum_size(&root->fs_info->super_copy);
93 int csums_in_item;
94
95 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
96 file_key.offset = bytenr;
97 btrfs_set_key_type(&file_key, BTRFS_EXTENT_CSUM_KEY);
98 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
99 if (ret < 0)
100 goto fail;
101 leaf = path->nodes[0];
102 if (ret > 0) {
103 ret = 1;
104 if (path->slots[0] == 0)
105 goto fail;
106 path->slots[0]--;
107 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
108 if (btrfs_key_type(&found_key) != BTRFS_EXTENT_CSUM_KEY)
109 goto fail;
110
111 csum_offset = (bytenr - found_key.offset) >>
112 root->fs_info->sb->s_blocksize_bits;
113 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
114 csums_in_item /= csum_size;
115
116 if (csum_offset >= csums_in_item) {
117 ret = -EFBIG;
118 goto fail;
119 }
120 }
121 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
122 item = (struct btrfs_csum_item *)((unsigned char *)item +
123 csum_offset * csum_size);
124 return item;
125fail:
126 if (ret > 0)
127 ret = -ENOENT;
128 return ERR_PTR(ret);
129}
130
131
132int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
133 struct btrfs_root *root,
134 struct btrfs_path *path, u64 objectid,
135 u64 offset, int mod)
136{
137 int ret;
138 struct btrfs_key file_key;
139 int ins_len = mod < 0 ? -1 : 0;
140 int cow = mod != 0;
141
142 file_key.objectid = objectid;
143 file_key.offset = offset;
144 btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY);
145 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
146 return ret;
147}
148
149
150int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
151 struct bio *bio, u32 *dst)
152{
153 u32 sum;
154 struct bio_vec *bvec = bio->bi_io_vec;
155 int bio_index = 0;
156 u64 offset;
157 u64 item_start_offset = 0;
158 u64 item_last_offset = 0;
159 u64 disk_bytenr;
160 u32 diff;
161 u16 csum_size =
162 btrfs_super_csum_size(&root->fs_info->super_copy);
163 int ret;
164 struct btrfs_path *path;
165 struct btrfs_csum_item *item = NULL;
166 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
167
168 path = btrfs_alloc_path();
169 if (bio->bi_size > PAGE_CACHE_SIZE * 8)
170 path->reada = 2;
171
172 WARN_ON(bio->bi_vcnt <= 0);
173
174 disk_bytenr = (u64)bio->bi_sector << 9;
175 while (bio_index < bio->bi_vcnt) {
176 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
177 ret = btrfs_find_ordered_sum(inode, offset, disk_bytenr, &sum);
178 if (ret == 0)
179 goto found;
180
181 if (!item || disk_bytenr < item_start_offset ||
182 disk_bytenr >= item_last_offset) {
183 struct btrfs_key found_key;
184 u32 item_size;
185
186 if (item)
187 btrfs_release_path(root, path);
188 item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
189 path, disk_bytenr, 0);
190 if (IS_ERR(item)) {
191 ret = PTR_ERR(item);
192 if (ret == -ENOENT || ret == -EFBIG)
193 ret = 0;
194 sum = 0;
195 if (BTRFS_I(inode)->root->root_key.objectid ==
196 BTRFS_DATA_RELOC_TREE_OBJECTID) {
197 set_extent_bits(io_tree, offset,
198 offset + bvec->bv_len - 1,
199 EXTENT_NODATASUM, GFP_NOFS);
200 } else {
201 printk(KERN_INFO "btrfs no csum found "
202 "for inode %lu start %llu\n",
203 inode->i_ino,
204 (unsigned long long)offset);
205 }
206 item = NULL;
207 btrfs_release_path(root, path);
208 goto found;
209 }
210 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
211 path->slots[0]);
212
213 item_start_offset = found_key.offset;
214 item_size = btrfs_item_size_nr(path->nodes[0],
215 path->slots[0]);
216 item_last_offset = item_start_offset +
217 (item_size / csum_size) *
218 root->sectorsize;
219 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
220 struct btrfs_csum_item);
221 }
222 /*
223 * this byte range must be able to fit inside
224 * a single leaf so it will also fit inside a u32
225 */
226 diff = disk_bytenr - item_start_offset;
227 diff = diff / root->sectorsize;
228 diff = diff * csum_size;
229
230 read_extent_buffer(path->nodes[0], &sum,
231 ((unsigned long)item) + diff,
232 csum_size);
233found:
234 if (dst)
235 *dst++ = sum;
236 else
237 set_state_private(io_tree, offset, sum);
238 disk_bytenr += bvec->bv_len;
239 bio_index++;
240 bvec++;
241 }
242 btrfs_free_path(path);
243 return 0;
244}
245
246int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
247 struct list_head *list)
248{
249 struct btrfs_key key;
250 struct btrfs_path *path;
251 struct extent_buffer *leaf;
252 struct btrfs_ordered_sum *sums;
253 struct btrfs_sector_sum *sector_sum;
254 struct btrfs_csum_item *item;
255 unsigned long offset;
256 int ret;
257 size_t size;
258 u64 csum_end;
259 u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy);
260
261 path = btrfs_alloc_path();
262 BUG_ON(!path);
263
264 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
265 key.offset = start;
266 key.type = BTRFS_EXTENT_CSUM_KEY;
267
268 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
269 if (ret < 0)
270 goto fail;
271 if (ret > 0 && path->slots[0] > 0) {
272 leaf = path->nodes[0];
273 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
274 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
275 key.type == BTRFS_EXTENT_CSUM_KEY) {
276 offset = (start - key.offset) >>
277 root->fs_info->sb->s_blocksize_bits;
278 if (offset * csum_size <
279 btrfs_item_size_nr(leaf, path->slots[0] - 1))
280 path->slots[0]--;
281 }
282 }
283
284 while (start <= end) {
285 leaf = path->nodes[0];
286 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
287 ret = btrfs_next_leaf(root, path);
288 if (ret < 0)
289 goto fail;
290 if (ret > 0)
291 break;
292 leaf = path->nodes[0];
293 }
294
295 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
296 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
297 key.type != BTRFS_EXTENT_CSUM_KEY)
298 break;
299
300 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
301 if (key.offset > end)
302 break;
303
304 if (key.offset > start)
305 start = key.offset;
306
307 size = btrfs_item_size_nr(leaf, path->slots[0]);
308 csum_end = key.offset + (size / csum_size) * root->sectorsize;
309 if (csum_end <= start) {
310 path->slots[0]++;
311 continue;
312 }
313
314 csum_end = min(csum_end, end + 1);
315 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
316 struct btrfs_csum_item);
317 while (start < csum_end) {
318 size = min_t(size_t, csum_end - start,
319 MAX_ORDERED_SUM_BYTES(root));
320 sums = kzalloc(btrfs_ordered_sum_size(root, size),
321 GFP_NOFS);
322 BUG_ON(!sums);
323
324 sector_sum = sums->sums;
325 sums->bytenr = start;
326 sums->len = size;
327
328 offset = (start - key.offset) >>
329 root->fs_info->sb->s_blocksize_bits;
330 offset *= csum_size;
331
332 while (size > 0) {
333 read_extent_buffer(path->nodes[0],
334 &sector_sum->sum,
335 ((unsigned long)item) +
336 offset, csum_size);
337 sector_sum->bytenr = start;
338
339 size -= root->sectorsize;
340 start += root->sectorsize;
341 offset += csum_size;
342 sector_sum++;
343 }
344 list_add_tail(&sums->list, list);
345 }
346 path->slots[0]++;
347 }
348 ret = 0;
349fail:
350 btrfs_free_path(path);
351 return ret;
352}
353
354int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
355 struct bio *bio, u64 file_start, int contig)
356{
357 struct btrfs_ordered_sum *sums;
358 struct btrfs_sector_sum *sector_sum;
359 struct btrfs_ordered_extent *ordered;
360 char *data;
361 struct bio_vec *bvec = bio->bi_io_vec;
362 int bio_index = 0;
363 unsigned long total_bytes = 0;
364 unsigned long this_sum_bytes = 0;
365 u64 offset;
366 u64 disk_bytenr;
367
368 WARN_ON(bio->bi_vcnt <= 0);
369 sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS);
370 if (!sums)
371 return -ENOMEM;
372
373 sector_sum = sums->sums;
374 disk_bytenr = (u64)bio->bi_sector << 9;
375 sums->len = bio->bi_size;
376 INIT_LIST_HEAD(&sums->list);
377
378 if (contig)
379 offset = file_start;
380 else
381 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
382
383 ordered = btrfs_lookup_ordered_extent(inode, offset);
384 BUG_ON(!ordered);
385 sums->bytenr = ordered->start;
386
387 while (bio_index < bio->bi_vcnt) {
388 if (!contig)
389 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
390
391 if (!contig && (offset >= ordered->file_offset + ordered->len ||
392 offset < ordered->file_offset)) {
393 unsigned long bytes_left;
394 sums->len = this_sum_bytes;
395 this_sum_bytes = 0;
396 btrfs_add_ordered_sum(inode, ordered, sums);
397 btrfs_put_ordered_extent(ordered);
398
399 bytes_left = bio->bi_size - total_bytes;
400
401 sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
402 GFP_NOFS);
403 BUG_ON(!sums);
404 sector_sum = sums->sums;
405 sums->len = bytes_left;
406 ordered = btrfs_lookup_ordered_extent(inode, offset);
407 BUG_ON(!ordered);
408 sums->bytenr = ordered->start;
409 }
410
411 data = kmap_atomic(bvec->bv_page, KM_USER0);
412 sector_sum->sum = ~(u32)0;
413 sector_sum->sum = btrfs_csum_data(root,
414 data + bvec->bv_offset,
415 sector_sum->sum,
416 bvec->bv_len);
417 kunmap_atomic(data, KM_USER0);
418 btrfs_csum_final(sector_sum->sum,
419 (char *)&sector_sum->sum);
420 sector_sum->bytenr = disk_bytenr;
421
422 sector_sum++;
423 bio_index++;
424 total_bytes += bvec->bv_len;
425 this_sum_bytes += bvec->bv_len;
426 disk_bytenr += bvec->bv_len;
427 offset += bvec->bv_len;
428 bvec++;
429 }
430 this_sum_bytes = 0;
431 btrfs_add_ordered_sum(inode, ordered, sums);
432 btrfs_put_ordered_extent(ordered);
433 return 0;
434}
435
436/*
437 * helper function for csum removal, this expects the
438 * key to describe the csum pointed to by the path, and it expects
439 * the csum to overlap the range [bytenr, len]
440 *
441 * The csum should not be entirely contained in the range and the
442 * range should not be entirely contained in the csum.
443 *
444 * This calls btrfs_truncate_item with the correct args based on the
445 * overlap, and fixes up the key as required.
446 */
447static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
448 struct btrfs_root *root,
449 struct btrfs_path *path,
450 struct btrfs_key *key,
451 u64 bytenr, u64 len)
452{
453 struct extent_buffer *leaf;
454 u16 csum_size =
455 btrfs_super_csum_size(&root->fs_info->super_copy);
456 u64 csum_end;
457 u64 end_byte = bytenr + len;
458 u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits;
459 int ret;
460
461 leaf = path->nodes[0];
462 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
463 csum_end <<= root->fs_info->sb->s_blocksize_bits;
464 csum_end += key->offset;
465
466 if (key->offset < bytenr && csum_end <= end_byte) {
467 /*
468 * [ bytenr - len ]
469 * [ ]
470 * [csum ]
471 * A simple truncate off the end of the item
472 */
473 u32 new_size = (bytenr - key->offset) >> blocksize_bits;
474 new_size *= csum_size;
475 ret = btrfs_truncate_item(trans, root, path, new_size, 1);
476 BUG_ON(ret);
477 } else if (key->offset >= bytenr && csum_end > end_byte &&
478 end_byte > key->offset) {
479 /*
480 * [ bytenr - len ]
481 * [ ]
482 * [csum ]
483 * we need to truncate from the beginning of the csum
484 */
485 u32 new_size = (csum_end - end_byte) >> blocksize_bits;
486 new_size *= csum_size;
487
488 ret = btrfs_truncate_item(trans, root, path, new_size, 0);
489 BUG_ON(ret);
490
491 key->offset = end_byte;
492 ret = btrfs_set_item_key_safe(trans, root, path, key);
493 BUG_ON(ret);
494 } else {
495 BUG();
496 }
497 return 0;
498}
499
500/*
501 * deletes the csum items from the csum tree for a given
502 * range of bytes.
503 */
504int btrfs_del_csums(struct btrfs_trans_handle *trans,
505 struct btrfs_root *root, u64 bytenr, u64 len)
506{
507 struct btrfs_path *path;
508 struct btrfs_key key;
509 u64 end_byte = bytenr + len;
510 u64 csum_end;
511 struct extent_buffer *leaf;
512 int ret;
513 u16 csum_size =
514 btrfs_super_csum_size(&root->fs_info->super_copy);
515 int blocksize_bits = root->fs_info->sb->s_blocksize_bits;
516
517 root = root->fs_info->csum_root;
518
519 path = btrfs_alloc_path();
520
521 while (1) {
522 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
523 key.offset = end_byte - 1;
524 key.type = BTRFS_EXTENT_CSUM_KEY;
525
526 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
527 if (ret > 0) {
528 if (path->slots[0] == 0)
529 goto out;
530 path->slots[0]--;
531 }
532 leaf = path->nodes[0];
533 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
534
535 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
536 key.type != BTRFS_EXTENT_CSUM_KEY) {
537 break;
538 }
539
540 if (key.offset >= end_byte)
541 break;
542
543 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
544 csum_end <<= blocksize_bits;
545 csum_end += key.offset;
546
547 /* this csum ends before we start, we're done */
548 if (csum_end <= bytenr)
549 break;
550
551 /* delete the entire item, it is inside our range */
552 if (key.offset >= bytenr && csum_end <= end_byte) {
553 ret = btrfs_del_item(trans, root, path);
554 BUG_ON(ret);
555 if (key.offset == bytenr)
556 break;
557 } else if (key.offset < bytenr && csum_end > end_byte) {
558 unsigned long offset;
559 unsigned long shift_len;
560 unsigned long item_offset;
561 /*
562 * [ bytenr - len ]
563 * [csum ]
564 *
565 * Our bytes are in the middle of the csum,
566 * we need to split this item and insert a new one.
567 *
568 * But we can't drop the path because the
569 * csum could change, get removed, extended etc.
570 *
571 * The trick here is the max size of a csum item leaves
572 * enough room in the tree block for a single
573 * item header. So, we split the item in place,
574 * adding a new header pointing to the existing
575 * bytes. Then we loop around again and we have
576 * a nicely formed csum item that we can neatly
577 * truncate.
578 */
579 offset = (bytenr - key.offset) >> blocksize_bits;
580 offset *= csum_size;
581
582 shift_len = (len >> blocksize_bits) * csum_size;
583
584 item_offset = btrfs_item_ptr_offset(leaf,
585 path->slots[0]);
586
587 memset_extent_buffer(leaf, 0, item_offset + offset,
588 shift_len);
589 key.offset = bytenr;
590
591 /*
592 * btrfs_split_item returns -EAGAIN when the
593 * item changed size or key
594 */
595 ret = btrfs_split_item(trans, root, path, &key, offset);
596 BUG_ON(ret && ret != -EAGAIN);
597
598 key.offset = end_byte - 1;
599 } else {
600 ret = truncate_one_csum(trans, root, path,
601 &key, bytenr, len);
602 BUG_ON(ret);
603 if (key.offset < bytenr)
604 break;
605 }
606 btrfs_release_path(root, path);
607 }
608out:
609 btrfs_free_path(path);
610 return 0;
611}
612
613int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
614 struct btrfs_root *root,
615 struct btrfs_ordered_sum *sums)
616{
617 u64 bytenr;
618 int ret;
619 struct btrfs_key file_key;
620 struct btrfs_key found_key;
621 u64 next_offset;
622 u64 total_bytes = 0;
623 int found_next;
624 struct btrfs_path *path;
625 struct btrfs_csum_item *item;
626 struct btrfs_csum_item *item_end;
627 struct extent_buffer *leaf = NULL;
628 u64 csum_offset;
629 struct btrfs_sector_sum *sector_sum;
630 u32 nritems;
631 u32 ins_size;
632 char *eb_map;
633 char *eb_token;
634 unsigned long map_len;
635 unsigned long map_start;
636 u16 csum_size =
637 btrfs_super_csum_size(&root->fs_info->super_copy);
638
639 path = btrfs_alloc_path();
640 BUG_ON(!path);
641 sector_sum = sums->sums;
642again:
643 next_offset = (u64)-1;
644 found_next = 0;
645 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
646 file_key.offset = sector_sum->bytenr;
647 bytenr = sector_sum->bytenr;
648 btrfs_set_key_type(&file_key, BTRFS_EXTENT_CSUM_KEY);
649
650 item = btrfs_lookup_csum(trans, root, path, sector_sum->bytenr, 1);
651 if (!IS_ERR(item)) {
652 leaf = path->nodes[0];
653 ret = 0;
654 goto found;
655 }
656 ret = PTR_ERR(item);
657 if (ret == -EFBIG) {
658 u32 item_size;
659 /* we found one, but it isn't big enough yet */
660 leaf = path->nodes[0];
661 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
662 if ((item_size / csum_size) >=
663 MAX_CSUM_ITEMS(root, csum_size)) {
664 /* already at max size, make a new one */
665 goto insert;
666 }
667 } else {
668 int slot = path->slots[0] + 1;
669 /* we didn't find a csum item, insert one */
670 nritems = btrfs_header_nritems(path->nodes[0]);
671 if (path->slots[0] >= nritems - 1) {
672 ret = btrfs_next_leaf(root, path);
673 if (ret == 1)
674 found_next = 1;
675 if (ret != 0)
676 goto insert;
677 slot = 0;
678 }
679 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
680 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
681 found_key.type != BTRFS_EXTENT_CSUM_KEY) {
682 found_next = 1;
683 goto insert;
684 }
685 next_offset = found_key.offset;
686 found_next = 1;
687 goto insert;
688 }
689
690 /*
691 * at this point, we know the tree has an item, but it isn't big
692 * enough yet to put our csum in. Grow it
693 */
694 btrfs_release_path(root, path);
695 ret = btrfs_search_slot(trans, root, &file_key, path,
696 csum_size, 1);
697 if (ret < 0)
698 goto fail_unlock;
699
700 if (ret > 0) {
701 if (path->slots[0] == 0)
702 goto insert;
703 path->slots[0]--;
704 }
705
706 leaf = path->nodes[0];
707 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
708 csum_offset = (bytenr - found_key.offset) >>
709 root->fs_info->sb->s_blocksize_bits;
710
711 if (btrfs_key_type(&found_key) != BTRFS_EXTENT_CSUM_KEY ||
712 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
713 csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) {
714 goto insert;
715 }
716
717 if (csum_offset >= btrfs_item_size_nr(leaf, path->slots[0]) /
718 csum_size) {
719 u32 diff = (csum_offset + 1) * csum_size;
720
721 /*
722 * is the item big enough already? we dropped our lock
723 * before and need to recheck
724 */
725 if (diff < btrfs_item_size_nr(leaf, path->slots[0]))
726 goto csum;
727
728 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
729 if (diff != csum_size)
730 goto insert;
731
732 ret = btrfs_extend_item(trans, root, path, diff);
733 BUG_ON(ret);
734 goto csum;
735 }
736
737insert:
738 btrfs_release_path(root, path);
739 csum_offset = 0;
740 if (found_next) {
741 u64 tmp = total_bytes + root->sectorsize;
742 u64 next_sector = sector_sum->bytenr;
743 struct btrfs_sector_sum *next = sector_sum + 1;
744
745 while (tmp < sums->len) {
746 if (next_sector + root->sectorsize != next->bytenr)
747 break;
748 tmp += root->sectorsize;
749 next_sector = next->bytenr;
750 next++;
751 }
752 tmp = min(tmp, next_offset - file_key.offset);
753 tmp >>= root->fs_info->sb->s_blocksize_bits;
754 tmp = max((u64)1, tmp);
755 tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size));
756 ins_size = csum_size * tmp;
757 } else {
758 ins_size = csum_size;
759 }
760 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
761 ins_size);
762 if (ret < 0)
763 goto fail_unlock;
764 if (ret != 0) {
765 WARN_ON(1);
766 goto fail_unlock;
767 }
768csum:
769 leaf = path->nodes[0];
770 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
771 ret = 0;
772 item = (struct btrfs_csum_item *)((unsigned char *)item +
773 csum_offset * csum_size);
774found:
775 item_end = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
776 item_end = (struct btrfs_csum_item *)((unsigned char *)item_end +
777 btrfs_item_size_nr(leaf, path->slots[0]));
778 eb_token = NULL;
779 cond_resched();
780next_sector:
781
782 if (!eb_token ||
783 (unsigned long)item + csum_size >= map_start + map_len) {
784 int err;
785
786 if (eb_token)
787 unmap_extent_buffer(leaf, eb_token, KM_USER1);
788 eb_token = NULL;
789 err = map_private_extent_buffer(leaf, (unsigned long)item,
790 csum_size,
791 &eb_token, &eb_map,
792 &map_start, &map_len, KM_USER1);
793 if (err)
794 eb_token = NULL;
795 }
796 if (eb_token) {
797 memcpy(eb_token + ((unsigned long)item & (PAGE_CACHE_SIZE - 1)),
798 &sector_sum->sum, csum_size);
799 } else {
800 write_extent_buffer(leaf, &sector_sum->sum,
801 (unsigned long)item, csum_size);
802 }
803
804 total_bytes += root->sectorsize;
805 sector_sum++;
806 if (total_bytes < sums->len) {
807 item = (struct btrfs_csum_item *)((char *)item +
808 csum_size);
809 if (item < item_end && bytenr + PAGE_CACHE_SIZE ==
810 sector_sum->bytenr) {
811 bytenr = sector_sum->bytenr;
812 goto next_sector;
813 }
814 }
815 if (eb_token) {
816 unmap_extent_buffer(leaf, eb_token, KM_USER1);
817 eb_token = NULL;
818 }
819 btrfs_mark_buffer_dirty(path->nodes[0]);
820 cond_resched();
821 if (total_bytes < sums->len) {
822 btrfs_release_path(root, path);
823 goto again;
824 }
825out:
826 btrfs_free_path(path);
827 return ret;
828
829fail_unlock:
830 goto out;
831}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
new file mode 100644
index 000000000000..90268334145e
--- /dev/null
+++ b/fs/btrfs/file.c
@@ -0,0 +1,1288 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/fs.h>
20#include <linux/pagemap.h>
21#include <linux/highmem.h>
22#include <linux/time.h>
23#include <linux/init.h>
24#include <linux/string.h>
25#include <linux/smp_lock.h>
26#include <linux/backing-dev.h>
27#include <linux/mpage.h>
28#include <linux/swap.h>
29#include <linux/writeback.h>
30#include <linux/statfs.h>
31#include <linux/compat.h>
32#include <linux/version.h>
33#include "ctree.h"
34#include "disk-io.h"
35#include "transaction.h"
36#include "btrfs_inode.h"
37#include "ioctl.h"
38#include "print-tree.h"
39#include "tree-log.h"
40#include "locking.h"
41#include "compat.h"
42
43
44/* simple helper to fault in pages and copy. This should go away
45 * and be replaced with calls into generic code.
46 */
47static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
48 int write_bytes,
49 struct page **prepared_pages,
50 const char __user *buf)
51{
52 long page_fault = 0;
53 int i;
54 int offset = pos & (PAGE_CACHE_SIZE - 1);
55
56 for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
57 size_t count = min_t(size_t,
58 PAGE_CACHE_SIZE - offset, write_bytes);
59 struct page *page = prepared_pages[i];
60 fault_in_pages_readable(buf, count);
61
62 /* Copy data from userspace to the current page */
63 kmap(page);
64 page_fault = __copy_from_user(page_address(page) + offset,
65 buf, count);
66 /* Flush processor's dcache for this page */
67 flush_dcache_page(page);
68 kunmap(page);
69 buf += count;
70 write_bytes -= count;
71
72 if (page_fault)
73 break;
74 }
75 return page_fault ? -EFAULT : 0;
76}
77
78/*
79 * unlocks pages after btrfs_file_write is done with them
80 */
81static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
82{
83 size_t i;
84 for (i = 0; i < num_pages; i++) {
85 if (!pages[i])
86 break;
87 /* page checked is some magic around finding pages that
88 * have been modified without going through btrfs_set_page_dirty
89 * clear it here
90 */
91 ClearPageChecked(pages[i]);
92 unlock_page(pages[i]);
93 mark_page_accessed(pages[i]);
94 page_cache_release(pages[i]);
95 }
96}
97
98/*
99 * after copy_from_user, pages need to be dirtied and we need to make
100 * sure holes are created between the current EOF and the start of
101 * any next extents (if required).
102 *
103 * this also makes the decision about creating an inline extent vs
104 * doing real data extents, marking pages dirty and delalloc as required.
105 */
106static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
107 struct btrfs_root *root,
108 struct file *file,
109 struct page **pages,
110 size_t num_pages,
111 loff_t pos,
112 size_t write_bytes)
113{
114 int err = 0;
115 int i;
116 struct inode *inode = fdentry(file)->d_inode;
117 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
118 u64 hint_byte;
119 u64 num_bytes;
120 u64 start_pos;
121 u64 end_of_last_block;
122 u64 end_pos = pos + write_bytes;
123 loff_t isize = i_size_read(inode);
124
125 start_pos = pos & ~((u64)root->sectorsize - 1);
126 num_bytes = (write_bytes + pos - start_pos +
127 root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
128
129 end_of_last_block = start_pos + num_bytes - 1;
130
131 lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
132 trans = btrfs_join_transaction(root, 1);
133 if (!trans) {
134 err = -ENOMEM;
135 goto out_unlock;
136 }
137 btrfs_set_trans_block_group(trans, inode);
138 hint_byte = 0;
139
140 set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
141
142 /* check for reserved extents on each page, we don't want
143 * to reset the delalloc bit on things that already have
144 * extents reserved.
145 */
146 btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
147 for (i = 0; i < num_pages; i++) {
148 struct page *p = pages[i];
149 SetPageUptodate(p);
150 ClearPageChecked(p);
151 set_page_dirty(p);
152 }
153 if (end_pos > isize) {
154 i_size_write(inode, end_pos);
155 btrfs_update_inode(trans, root, inode);
156 }
157 err = btrfs_end_transaction(trans, root);
158out_unlock:
159 unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
160 return err;
161}
162
163/*
164 * this drops all the extents in the cache that intersect the range
165 * [start, end]. Existing extents are split as required.
166 */
167int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
168 int skip_pinned)
169{
170 struct extent_map *em;
171 struct extent_map *split = NULL;
172 struct extent_map *split2 = NULL;
173 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
174 u64 len = end - start + 1;
175 int ret;
176 int testend = 1;
177 unsigned long flags;
178 int compressed = 0;
179
180 WARN_ON(end < start);
181 if (end == (u64)-1) {
182 len = (u64)-1;
183 testend = 0;
184 }
185 while (1) {
186 if (!split)
187 split = alloc_extent_map(GFP_NOFS);
188 if (!split2)
189 split2 = alloc_extent_map(GFP_NOFS);
190
191 spin_lock(&em_tree->lock);
192 em = lookup_extent_mapping(em_tree, start, len);
193 if (!em) {
194 spin_unlock(&em_tree->lock);
195 break;
196 }
197 flags = em->flags;
198 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
199 spin_unlock(&em_tree->lock);
200 if (em->start <= start &&
201 (!testend || em->start + em->len >= start + len)) {
202 free_extent_map(em);
203 break;
204 }
205 if (start < em->start) {
206 len = em->start - start;
207 } else {
208 len = start + len - (em->start + em->len);
209 start = em->start + em->len;
210 }
211 free_extent_map(em);
212 continue;
213 }
214 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
215 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
216 remove_extent_mapping(em_tree, em);
217
218 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
219 em->start < start) {
220 split->start = em->start;
221 split->len = start - em->start;
222 split->orig_start = em->orig_start;
223 split->block_start = em->block_start;
224
225 if (compressed)
226 split->block_len = em->block_len;
227 else
228 split->block_len = split->len;
229
230 split->bdev = em->bdev;
231 split->flags = flags;
232 ret = add_extent_mapping(em_tree, split);
233 BUG_ON(ret);
234 free_extent_map(split);
235 split = split2;
236 split2 = NULL;
237 }
238 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
239 testend && em->start + em->len > start + len) {
240 u64 diff = start + len - em->start;
241
242 split->start = start + len;
243 split->len = em->start + em->len - (start + len);
244 split->bdev = em->bdev;
245 split->flags = flags;
246
247 if (compressed) {
248 split->block_len = em->block_len;
249 split->block_start = em->block_start;
250 split->orig_start = em->orig_start;
251 } else {
252 split->block_len = split->len;
253 split->block_start = em->block_start + diff;
254 split->orig_start = split->start;
255 }
256
257 ret = add_extent_mapping(em_tree, split);
258 BUG_ON(ret);
259 free_extent_map(split);
260 split = NULL;
261 }
262 spin_unlock(&em_tree->lock);
263
264 /* once for us */
265 free_extent_map(em);
266 /* once for the tree*/
267 free_extent_map(em);
268 }
269 if (split)
270 free_extent_map(split);
271 if (split2)
272 free_extent_map(split2);
273 return 0;
274}
275
276int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
277{
278 return 0;
279#if 0
280 struct btrfs_path *path;
281 struct btrfs_key found_key;
282 struct extent_buffer *leaf;
283 struct btrfs_file_extent_item *extent;
284 u64 last_offset = 0;
285 int nritems;
286 int slot;
287 int found_type;
288 int ret;
289 int err = 0;
290 u64 extent_end = 0;
291
292 path = btrfs_alloc_path();
293 ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino,
294 last_offset, 0);
295 while (1) {
296 nritems = btrfs_header_nritems(path->nodes[0]);
297 if (path->slots[0] >= nritems) {
298 ret = btrfs_next_leaf(root, path);
299 if (ret)
300 goto out;
301 nritems = btrfs_header_nritems(path->nodes[0]);
302 }
303 slot = path->slots[0];
304 leaf = path->nodes[0];
305 btrfs_item_key_to_cpu(leaf, &found_key, slot);
306 if (found_key.objectid != inode->i_ino)
307 break;
308 if (found_key.type != BTRFS_EXTENT_DATA_KEY)
309 goto out;
310
311 if (found_key.offset < last_offset) {
312 WARN_ON(1);
313 btrfs_print_leaf(root, leaf);
314 printk(KERN_ERR "inode %lu found offset %llu "
315 "expected %llu\n", inode->i_ino,
316 (unsigned long long)found_key.offset,
317 (unsigned long long)last_offset);
318 err = 1;
319 goto out;
320 }
321 extent = btrfs_item_ptr(leaf, slot,
322 struct btrfs_file_extent_item);
323 found_type = btrfs_file_extent_type(leaf, extent);
324 if (found_type == BTRFS_FILE_EXTENT_REG) {
325 extent_end = found_key.offset +
326 btrfs_file_extent_num_bytes(leaf, extent);
327 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
328 struct btrfs_item *item;
329 item = btrfs_item_nr(leaf, slot);
330 extent_end = found_key.offset +
331 btrfs_file_extent_inline_len(leaf, extent);
332 extent_end = (extent_end + root->sectorsize - 1) &
333 ~((u64)root->sectorsize - 1);
334 }
335 last_offset = extent_end;
336 path->slots[0]++;
337 }
338 if (0 && last_offset < inode->i_size) {
339 WARN_ON(1);
340 btrfs_print_leaf(root, leaf);
341 printk(KERN_ERR "inode %lu found offset %llu size %llu\n",
342 inode->i_ino, (unsigned long long)last_offset,
343 (unsigned long long)inode->i_size);
344 err = 1;
345
346 }
347out:
348 btrfs_free_path(path);
349 return err;
350#endif
351}
352
353/*
354 * this is very complex, but the basic idea is to drop all extents
355 * in the range start - end. hint_block is filled in with a block number
356 * that would be a good hint to the block allocator for this file.
357 *
358 * If an extent intersects the range but is not entirely inside the range
359 * it is either truncated or split. Anything entirely inside the range
360 * is deleted from the tree.
361 *
362 * inline_limit is used to tell this code which offsets in the file to keep
363 * if they contain inline extents.
364 */
365noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
366 struct btrfs_root *root, struct inode *inode,
367 u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
368{
369 u64 extent_end = 0;
370 u64 locked_end = end;
371 u64 search_start = start;
372 u64 leaf_start;
373 u64 ram_bytes = 0;
374 u64 orig_parent = 0;
375 u64 disk_bytenr = 0;
376 u8 compression;
377 u8 encryption;
378 u16 other_encoding = 0;
379 u64 root_gen;
380 u64 root_owner;
381 struct extent_buffer *leaf;
382 struct btrfs_file_extent_item *extent;
383 struct btrfs_path *path;
384 struct btrfs_key key;
385 struct btrfs_file_extent_item old;
386 int keep;
387 int slot;
388 int bookend;
389 int found_type = 0;
390 int found_extent;
391 int found_inline;
392 int recow;
393 int ret;
394
395 inline_limit = 0;
396 btrfs_drop_extent_cache(inode, start, end - 1, 0);
397
398 path = btrfs_alloc_path();
399 if (!path)
400 return -ENOMEM;
401 while (1) {
402 recow = 0;
403 btrfs_release_path(root, path);
404 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
405 search_start, -1);
406 if (ret < 0)
407 goto out;
408 if (ret > 0) {
409 if (path->slots[0] == 0) {
410 ret = 0;
411 goto out;
412 }
413 path->slots[0]--;
414 }
415next_slot:
416 keep = 0;
417 bookend = 0;
418 found_extent = 0;
419 found_inline = 0;
420 leaf_start = 0;
421 root_gen = 0;
422 root_owner = 0;
423 compression = 0;
424 encryption = 0;
425 extent = NULL;
426 leaf = path->nodes[0];
427 slot = path->slots[0];
428 ret = 0;
429 btrfs_item_key_to_cpu(leaf, &key, slot);
430 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
431 key.offset >= end) {
432 goto out;
433 }
434 if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
435 key.objectid != inode->i_ino) {
436 goto out;
437 }
438 if (recow) {
439 search_start = max(key.offset, start);
440 continue;
441 }
442 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
443 extent = btrfs_item_ptr(leaf, slot,
444 struct btrfs_file_extent_item);
445 found_type = btrfs_file_extent_type(leaf, extent);
446 compression = btrfs_file_extent_compression(leaf,
447 extent);
448 encryption = btrfs_file_extent_encryption(leaf,
449 extent);
450 other_encoding = btrfs_file_extent_other_encoding(leaf,
451 extent);
452 if (found_type == BTRFS_FILE_EXTENT_REG ||
453 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
454 extent_end =
455 btrfs_file_extent_disk_bytenr(leaf,
456 extent);
457 if (extent_end)
458 *hint_byte = extent_end;
459
460 extent_end = key.offset +
461 btrfs_file_extent_num_bytes(leaf, extent);
462 ram_bytes = btrfs_file_extent_ram_bytes(leaf,
463 extent);
464 found_extent = 1;
465 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
466 found_inline = 1;
467 extent_end = key.offset +
468 btrfs_file_extent_inline_len(leaf, extent);
469 }
470 } else {
471 extent_end = search_start;
472 }
473
474 /* we found nothing we can drop */
475 if ((!found_extent && !found_inline) ||
476 search_start >= extent_end) {
477 int nextret;
478 u32 nritems;
479 nritems = btrfs_header_nritems(leaf);
480 if (slot >= nritems - 1) {
481 nextret = btrfs_next_leaf(root, path);
482 if (nextret)
483 goto out;
484 recow = 1;
485 } else {
486 path->slots[0]++;
487 }
488 goto next_slot;
489 }
490
491 if (end <= extent_end && start >= key.offset && found_inline)
492 *hint_byte = EXTENT_MAP_INLINE;
493
494 if (found_extent) {
495 read_extent_buffer(leaf, &old, (unsigned long)extent,
496 sizeof(old));
497 root_gen = btrfs_header_generation(leaf);
498 root_owner = btrfs_header_owner(leaf);
499 leaf_start = leaf->start;
500 }
501
502 if (end < extent_end && end >= key.offset) {
503 bookend = 1;
504 if (found_inline && start <= key.offset)
505 keep = 1;
506 }
507
508 if (bookend && found_extent) {
509 if (locked_end < extent_end) {
510 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
511 locked_end, extent_end - 1,
512 GFP_NOFS);
513 if (!ret) {
514 btrfs_release_path(root, path);
515 lock_extent(&BTRFS_I(inode)->io_tree,
516 locked_end, extent_end - 1,
517 GFP_NOFS);
518 locked_end = extent_end;
519 continue;
520 }
521 locked_end = extent_end;
522 }
523 orig_parent = path->nodes[0]->start;
524 disk_bytenr = le64_to_cpu(old.disk_bytenr);
525 if (disk_bytenr != 0) {
526 ret = btrfs_inc_extent_ref(trans, root,
527 disk_bytenr,
528 le64_to_cpu(old.disk_num_bytes),
529 orig_parent, root->root_key.objectid,
530 trans->transid, inode->i_ino);
531 BUG_ON(ret);
532 }
533 }
534
535 if (found_inline) {
536 u64 mask = root->sectorsize - 1;
537 search_start = (extent_end + mask) & ~mask;
538 } else
539 search_start = extent_end;
540
541 /* truncate existing extent */
542 if (start > key.offset) {
543 u64 new_num;
544 u64 old_num;
545 keep = 1;
546 WARN_ON(start & (root->sectorsize - 1));
547 if (found_extent) {
548 new_num = start - key.offset;
549 old_num = btrfs_file_extent_num_bytes(leaf,
550 extent);
551 *hint_byte =
552 btrfs_file_extent_disk_bytenr(leaf,
553 extent);
554 if (btrfs_file_extent_disk_bytenr(leaf,
555 extent)) {
556 inode_sub_bytes(inode, old_num -
557 new_num);
558 }
559 btrfs_set_file_extent_num_bytes(leaf,
560 extent, new_num);
561 btrfs_mark_buffer_dirty(leaf);
562 } else if (key.offset < inline_limit &&
563 (end > extent_end) &&
564 (inline_limit < extent_end)) {
565 u32 new_size;
566 new_size = btrfs_file_extent_calc_inline_size(
567 inline_limit - key.offset);
568 inode_sub_bytes(inode, extent_end -
569 inline_limit);
570 btrfs_set_file_extent_ram_bytes(leaf, extent,
571 new_size);
572 if (!compression && !encryption) {
573 btrfs_truncate_item(trans, root, path,
574 new_size, 1);
575 }
576 }
577 }
578 /* delete the entire extent */
579 if (!keep) {
580 if (found_inline)
581 inode_sub_bytes(inode, extent_end -
582 key.offset);
583 ret = btrfs_del_item(trans, root, path);
584 /* TODO update progress marker and return */
585 BUG_ON(ret);
586 extent = NULL;
587 btrfs_release_path(root, path);
588 /* the extent will be freed later */
589 }
590 if (bookend && found_inline && start <= key.offset) {
591 u32 new_size;
592 new_size = btrfs_file_extent_calc_inline_size(
593 extent_end - end);
594 inode_sub_bytes(inode, end - key.offset);
595 btrfs_set_file_extent_ram_bytes(leaf, extent,
596 new_size);
597 if (!compression && !encryption)
598 ret = btrfs_truncate_item(trans, root, path,
599 new_size, 0);
600 BUG_ON(ret);
601 }
602 /* create bookend, splitting the extent in two */
603 if (bookend && found_extent) {
604 struct btrfs_key ins;
605 ins.objectid = inode->i_ino;
606 ins.offset = end;
607 btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
608
609 btrfs_release_path(root, path);
610 ret = btrfs_insert_empty_item(trans, root, path, &ins,
611 sizeof(*extent));
612 BUG_ON(ret);
613
614 leaf = path->nodes[0];
615 extent = btrfs_item_ptr(leaf, path->slots[0],
616 struct btrfs_file_extent_item);
617 write_extent_buffer(leaf, &old,
618 (unsigned long)extent, sizeof(old));
619
620 btrfs_set_file_extent_compression(leaf, extent,
621 compression);
622 btrfs_set_file_extent_encryption(leaf, extent,
623 encryption);
624 btrfs_set_file_extent_other_encoding(leaf, extent,
625 other_encoding);
626 btrfs_set_file_extent_offset(leaf, extent,
627 le64_to_cpu(old.offset) + end - key.offset);
628 WARN_ON(le64_to_cpu(old.num_bytes) <
629 (extent_end - end));
630 btrfs_set_file_extent_num_bytes(leaf, extent,
631 extent_end - end);
632
633 /*
634 * set the ram bytes to the size of the full extent
635 * before splitting. This is a worst case flag,
636 * but its the best we can do because we don't know
637 * how splitting affects compression
638 */
639 btrfs_set_file_extent_ram_bytes(leaf, extent,
640 ram_bytes);
641 btrfs_set_file_extent_type(leaf, extent, found_type);
642
643 btrfs_mark_buffer_dirty(path->nodes[0]);
644
645 if (disk_bytenr != 0) {
646 ret = btrfs_update_extent_ref(trans, root,
647 disk_bytenr, orig_parent,
648 leaf->start,
649 root->root_key.objectid,
650 trans->transid, ins.objectid);
651
652 BUG_ON(ret);
653 }
654 btrfs_release_path(root, path);
655 if (disk_bytenr != 0)
656 inode_add_bytes(inode, extent_end - end);
657 }
658
659 if (found_extent && !keep) {
660 u64 old_disk_bytenr = le64_to_cpu(old.disk_bytenr);
661
662 if (old_disk_bytenr != 0) {
663 inode_sub_bytes(inode,
664 le64_to_cpu(old.num_bytes));
665 ret = btrfs_free_extent(trans, root,
666 old_disk_bytenr,
667 le64_to_cpu(old.disk_num_bytes),
668 leaf_start, root_owner,
669 root_gen, key.objectid, 0);
670 BUG_ON(ret);
671 *hint_byte = old_disk_bytenr;
672 }
673 }
674
675 if (search_start >= end) {
676 ret = 0;
677 goto out;
678 }
679 }
680out:
681 btrfs_free_path(path);
682 if (locked_end > end) {
683 unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
684 GFP_NOFS);
685 }
686 btrfs_check_file(root, inode);
687 return ret;
688}
689
690static int extent_mergeable(struct extent_buffer *leaf, int slot,
691 u64 objectid, u64 bytenr, u64 *start, u64 *end)
692{
693 struct btrfs_file_extent_item *fi;
694 struct btrfs_key key;
695 u64 extent_end;
696
697 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
698 return 0;
699
700 btrfs_item_key_to_cpu(leaf, &key, slot);
701 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
702 return 0;
703
704 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
705 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
706 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
707 btrfs_file_extent_compression(leaf, fi) ||
708 btrfs_file_extent_encryption(leaf, fi) ||
709 btrfs_file_extent_other_encoding(leaf, fi))
710 return 0;
711
712 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
713 if ((*start && *start != key.offset) || (*end && *end != extent_end))
714 return 0;
715
716 *start = key.offset;
717 *end = extent_end;
718 return 1;
719}
720
721/*
722 * Mark extent in the range start - end as written.
723 *
724 * This changes extent type from 'pre-allocated' to 'regular'. If only
725 * part of extent is marked as written, the extent will be split into
726 * two or three.
727 */
728int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
729 struct btrfs_root *root,
730 struct inode *inode, u64 start, u64 end)
731{
732 struct extent_buffer *leaf;
733 struct btrfs_path *path;
734 struct btrfs_file_extent_item *fi;
735 struct btrfs_key key;
736 u64 bytenr;
737 u64 num_bytes;
738 u64 extent_end;
739 u64 extent_offset;
740 u64 other_start;
741 u64 other_end;
742 u64 split = start;
743 u64 locked_end = end;
744 u64 orig_parent;
745 int extent_type;
746 int split_end = 1;
747 int ret;
748
749 btrfs_drop_extent_cache(inode, start, end - 1, 0);
750
751 path = btrfs_alloc_path();
752 BUG_ON(!path);
753again:
754 key.objectid = inode->i_ino;
755 key.type = BTRFS_EXTENT_DATA_KEY;
756 if (split == start)
757 key.offset = split;
758 else
759 key.offset = split - 1;
760
761 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
762 if (ret > 0 && path->slots[0] > 0)
763 path->slots[0]--;
764
765 leaf = path->nodes[0];
766 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
767 BUG_ON(key.objectid != inode->i_ino ||
768 key.type != BTRFS_EXTENT_DATA_KEY);
769 fi = btrfs_item_ptr(leaf, path->slots[0],
770 struct btrfs_file_extent_item);
771 extent_type = btrfs_file_extent_type(leaf, fi);
772 BUG_ON(extent_type != BTRFS_FILE_EXTENT_PREALLOC);
773 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
774 BUG_ON(key.offset > start || extent_end < end);
775
776 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
777 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
778 extent_offset = btrfs_file_extent_offset(leaf, fi);
779
780 if (key.offset == start)
781 split = end;
782
783 if (key.offset == start && extent_end == end) {
784 int del_nr = 0;
785 int del_slot = 0;
786 u64 leaf_owner = btrfs_header_owner(leaf);
787 u64 leaf_gen = btrfs_header_generation(leaf);
788 other_start = end;
789 other_end = 0;
790 if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
791 bytenr, &other_start, &other_end)) {
792 extent_end = other_end;
793 del_slot = path->slots[0] + 1;
794 del_nr++;
795 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
796 leaf->start, leaf_owner,
797 leaf_gen, inode->i_ino, 0);
798 BUG_ON(ret);
799 }
800 other_start = 0;
801 other_end = start;
802 if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino,
803 bytenr, &other_start, &other_end)) {
804 key.offset = other_start;
805 del_slot = path->slots[0];
806 del_nr++;
807 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
808 leaf->start, leaf_owner,
809 leaf_gen, inode->i_ino, 0);
810 BUG_ON(ret);
811 }
812 split_end = 0;
813 if (del_nr == 0) {
814 btrfs_set_file_extent_type(leaf, fi,
815 BTRFS_FILE_EXTENT_REG);
816 goto done;
817 }
818
819 fi = btrfs_item_ptr(leaf, del_slot - 1,
820 struct btrfs_file_extent_item);
821 btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG);
822 btrfs_set_file_extent_num_bytes(leaf, fi,
823 extent_end - key.offset);
824 btrfs_mark_buffer_dirty(leaf);
825
826 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
827 BUG_ON(ret);
828 goto done;
829 } else if (split == start) {
830 if (locked_end < extent_end) {
831 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
832 locked_end, extent_end - 1, GFP_NOFS);
833 if (!ret) {
834 btrfs_release_path(root, path);
835 lock_extent(&BTRFS_I(inode)->io_tree,
836 locked_end, extent_end - 1, GFP_NOFS);
837 locked_end = extent_end;
838 goto again;
839 }
840 locked_end = extent_end;
841 }
842 btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset);
843 extent_offset += split - key.offset;
844 } else {
845 BUG_ON(key.offset != start);
846 btrfs_set_file_extent_offset(leaf, fi, extent_offset +
847 split - key.offset);
848 btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split);
849 key.offset = split;
850 btrfs_set_item_key_safe(trans, root, path, &key);
851 extent_end = split;
852 }
853
854 if (extent_end == end) {
855 split_end = 0;
856 extent_type = BTRFS_FILE_EXTENT_REG;
857 }
858 if (extent_end == end && split == start) {
859 other_start = end;
860 other_end = 0;
861 if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
862 bytenr, &other_start, &other_end)) {
863 path->slots[0]++;
864 fi = btrfs_item_ptr(leaf, path->slots[0],
865 struct btrfs_file_extent_item);
866 key.offset = split;
867 btrfs_set_item_key_safe(trans, root, path, &key);
868 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
869 btrfs_set_file_extent_num_bytes(leaf, fi,
870 other_end - split);
871 goto done;
872 }
873 }
874 if (extent_end == end && split == end) {
875 other_start = 0;
876 other_end = start;
877 if (extent_mergeable(leaf, path->slots[0] - 1 , inode->i_ino,
878 bytenr, &other_start, &other_end)) {
879 path->slots[0]--;
880 fi = btrfs_item_ptr(leaf, path->slots[0],
881 struct btrfs_file_extent_item);
882 btrfs_set_file_extent_num_bytes(leaf, fi, extent_end -
883 other_start);
884 goto done;
885 }
886 }
887
888 btrfs_mark_buffer_dirty(leaf);
889
890 orig_parent = leaf->start;
891 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
892 orig_parent, root->root_key.objectid,
893 trans->transid, inode->i_ino);
894 BUG_ON(ret);
895 btrfs_release_path(root, path);
896
897 key.offset = start;
898 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi));
899 BUG_ON(ret);
900
901 leaf = path->nodes[0];
902 fi = btrfs_item_ptr(leaf, path->slots[0],
903 struct btrfs_file_extent_item);
904 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
905 btrfs_set_file_extent_type(leaf, fi, extent_type);
906 btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr);
907 btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes);
908 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
909 btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset);
910 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
911 btrfs_set_file_extent_compression(leaf, fi, 0);
912 btrfs_set_file_extent_encryption(leaf, fi, 0);
913 btrfs_set_file_extent_other_encoding(leaf, fi, 0);
914
915 if (orig_parent != leaf->start) {
916 ret = btrfs_update_extent_ref(trans, root, bytenr,
917 orig_parent, leaf->start,
918 root->root_key.objectid,
919 trans->transid, inode->i_ino);
920 BUG_ON(ret);
921 }
922done:
923 btrfs_mark_buffer_dirty(leaf);
924 btrfs_release_path(root, path);
925 if (split_end && split == start) {
926 split = end;
927 goto again;
928 }
929 if (locked_end > end) {
930 unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
931 GFP_NOFS);
932 }
933 btrfs_free_path(path);
934 return 0;
935}
936
937/*
938 * this gets pages into the page cache and locks them down, it also properly
939 * waits for data=ordered extents to finish before allowing the pages to be
940 * modified.
941 */
942static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
943 struct page **pages, size_t num_pages,
944 loff_t pos, unsigned long first_index,
945 unsigned long last_index, size_t write_bytes)
946{
947 int i;
948 unsigned long index = pos >> PAGE_CACHE_SHIFT;
949 struct inode *inode = fdentry(file)->d_inode;
950 int err = 0;
951 u64 start_pos;
952 u64 last_pos;
953
954 start_pos = pos & ~((u64)root->sectorsize - 1);
955 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
956
957 if (start_pos > inode->i_size) {
958 err = btrfs_cont_expand(inode, start_pos);
959 if (err)
960 return err;
961 }
962
963 memset(pages, 0, num_pages * sizeof(struct page *));
964again:
965 for (i = 0; i < num_pages; i++) {
966 pages[i] = grab_cache_page(inode->i_mapping, index + i);
967 if (!pages[i]) {
968 err = -ENOMEM;
969 BUG_ON(1);
970 }
971 wait_on_page_writeback(pages[i]);
972 }
973 if (start_pos < inode->i_size) {
974 struct btrfs_ordered_extent *ordered;
975 lock_extent(&BTRFS_I(inode)->io_tree,
976 start_pos, last_pos - 1, GFP_NOFS);
977 ordered = btrfs_lookup_first_ordered_extent(inode,
978 last_pos - 1);
979 if (ordered &&
980 ordered->file_offset + ordered->len > start_pos &&
981 ordered->file_offset < last_pos) {
982 btrfs_put_ordered_extent(ordered);
983 unlock_extent(&BTRFS_I(inode)->io_tree,
984 start_pos, last_pos - 1, GFP_NOFS);
985 for (i = 0; i < num_pages; i++) {
986 unlock_page(pages[i]);
987 page_cache_release(pages[i]);
988 }
989 btrfs_wait_ordered_range(inode, start_pos,
990 last_pos - start_pos);
991 goto again;
992 }
993 if (ordered)
994 btrfs_put_ordered_extent(ordered);
995
996 clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
997 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
998 GFP_NOFS);
999 unlock_extent(&BTRFS_I(inode)->io_tree,
1000 start_pos, last_pos - 1, GFP_NOFS);
1001 }
1002 for (i = 0; i < num_pages; i++) {
1003 clear_page_dirty_for_io(pages[i]);
1004 set_page_extent_mapped(pages[i]);
1005 WARN_ON(!PageLocked(pages[i]));
1006 }
1007 return 0;
1008}
1009
1010static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
1011 size_t count, loff_t *ppos)
1012{
1013 loff_t pos;
1014 loff_t start_pos;
1015 ssize_t num_written = 0;
1016 ssize_t err = 0;
1017 int ret = 0;
1018 struct inode *inode = fdentry(file)->d_inode;
1019 struct btrfs_root *root = BTRFS_I(inode)->root;
1020 struct page **pages = NULL;
1021 int nrptrs;
1022 struct page *pinned[2];
1023 unsigned long first_index;
1024 unsigned long last_index;
1025 int will_write;
1026
1027 will_write = ((file->f_flags & O_SYNC) || IS_SYNC(inode) ||
1028 (file->f_flags & O_DIRECT));
1029
1030 nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
1031 PAGE_CACHE_SIZE / (sizeof(struct page *)));
1032 pinned[0] = NULL;
1033 pinned[1] = NULL;
1034
1035 pos = *ppos;
1036 start_pos = pos;
1037
1038 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1039 current->backing_dev_info = inode->i_mapping->backing_dev_info;
1040 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1041 if (err)
1042 goto out_nolock;
1043 if (count == 0)
1044 goto out_nolock;
1045
1046 err = file_remove_suid(file);
1047 if (err)
1048 goto out_nolock;
1049 file_update_time(file);
1050
1051 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1052
1053 mutex_lock(&inode->i_mutex);
1054 BTRFS_I(inode)->sequence++;
1055 first_index = pos >> PAGE_CACHE_SHIFT;
1056 last_index = (pos + count) >> PAGE_CACHE_SHIFT;
1057
1058 /*
1059 * there are lots of better ways to do this, but this code
1060 * makes sure the first and last page in the file range are
1061 * up to date and ready for cow
1062 */
1063 if ((pos & (PAGE_CACHE_SIZE - 1))) {
1064 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
1065 if (!PageUptodate(pinned[0])) {
1066 ret = btrfs_readpage(NULL, pinned[0]);
1067 BUG_ON(ret);
1068 wait_on_page_locked(pinned[0]);
1069 } else {
1070 unlock_page(pinned[0]);
1071 }
1072 }
1073 if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
1074 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
1075 if (!PageUptodate(pinned[1])) {
1076 ret = btrfs_readpage(NULL, pinned[1]);
1077 BUG_ON(ret);
1078 wait_on_page_locked(pinned[1]);
1079 } else {
1080 unlock_page(pinned[1]);
1081 }
1082 }
1083
1084 while (count > 0) {
1085 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1086 size_t write_bytes = min(count, nrptrs *
1087 (size_t)PAGE_CACHE_SIZE -
1088 offset);
1089 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
1090 PAGE_CACHE_SHIFT;
1091
1092 WARN_ON(num_pages > nrptrs);
1093 memset(pages, 0, sizeof(struct page *) * nrptrs);
1094
1095 ret = btrfs_check_free_space(root, write_bytes, 0);
1096 if (ret)
1097 goto out;
1098
1099 ret = prepare_pages(root, file, pages, num_pages,
1100 pos, first_index, last_index,
1101 write_bytes);
1102 if (ret)
1103 goto out;
1104
1105 ret = btrfs_copy_from_user(pos, num_pages,
1106 write_bytes, pages, buf);
1107 if (ret) {
1108 btrfs_drop_pages(pages, num_pages);
1109 goto out;
1110 }
1111
1112 ret = dirty_and_release_pages(NULL, root, file, pages,
1113 num_pages, pos, write_bytes);
1114 btrfs_drop_pages(pages, num_pages);
1115 if (ret)
1116 goto out;
1117
1118 if (will_write) {
1119 btrfs_fdatawrite_range(inode->i_mapping, pos,
1120 pos + write_bytes - 1,
1121 WB_SYNC_NONE);
1122 } else {
1123 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1124 num_pages);
1125 if (num_pages <
1126 (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1127 btrfs_btree_balance_dirty(root, 1);
1128 btrfs_throttle(root);
1129 }
1130
1131 buf += write_bytes;
1132 count -= write_bytes;
1133 pos += write_bytes;
1134 num_written += write_bytes;
1135
1136 cond_resched();
1137 }
1138out:
1139 mutex_unlock(&inode->i_mutex);
1140
1141out_nolock:
1142 kfree(pages);
1143 if (pinned[0])
1144 page_cache_release(pinned[0]);
1145 if (pinned[1])
1146 page_cache_release(pinned[1]);
1147 *ppos = pos;
1148
1149 if (num_written > 0 && will_write) {
1150 struct btrfs_trans_handle *trans;
1151
1152 err = btrfs_wait_ordered_range(inode, start_pos, num_written);
1153 if (err)
1154 num_written = err;
1155
1156 if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
1157 trans = btrfs_start_transaction(root, 1);
1158 ret = btrfs_log_dentry_safe(trans, root,
1159 file->f_dentry);
1160 if (ret == 0) {
1161 btrfs_sync_log(trans, root);
1162 btrfs_end_transaction(trans, root);
1163 } else {
1164 btrfs_commit_transaction(trans, root);
1165 }
1166 }
1167 if (file->f_flags & O_DIRECT) {
1168 invalidate_mapping_pages(inode->i_mapping,
1169 start_pos >> PAGE_CACHE_SHIFT,
1170 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
1171 }
1172 }
1173 current->backing_dev_info = NULL;
1174 return num_written ? num_written : err;
1175}
1176
1177int btrfs_release_file(struct inode *inode, struct file *filp)
1178{
1179 if (filp->private_data)
1180 btrfs_ioctl_trans_end(filp);
1181 return 0;
1182}
1183
1184/*
1185 * fsync call for both files and directories. This logs the inode into
1186 * the tree log instead of forcing full commits whenever possible.
1187 *
1188 * It needs to call filemap_fdatawait so that all ordered extent updates are
1189 * in the metadata btree are up to date for copying to the log.
1190 *
1191 * It drops the inode mutex before doing the tree log commit. This is an
1192 * important optimization for directories because holding the mutex prevents
1193 * new operations on the dir while we write to disk.
1194 */
1195int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1196{
1197 struct inode *inode = dentry->d_inode;
1198 struct btrfs_root *root = BTRFS_I(inode)->root;
1199 int ret = 0;
1200 struct btrfs_trans_handle *trans;
1201
1202 /*
1203 * check the transaction that last modified this inode
1204 * and see if its already been committed
1205 */
1206 if (!BTRFS_I(inode)->last_trans)
1207 goto out;
1208
1209 mutex_lock(&root->fs_info->trans_mutex);
1210 if (BTRFS_I(inode)->last_trans <=
1211 root->fs_info->last_trans_committed) {
1212 BTRFS_I(inode)->last_trans = 0;
1213 mutex_unlock(&root->fs_info->trans_mutex);
1214 goto out;
1215 }
1216 mutex_unlock(&root->fs_info->trans_mutex);
1217
1218 root->fs_info->tree_log_batch++;
1219 filemap_fdatawrite(inode->i_mapping);
1220 btrfs_wait_ordered_range(inode, 0, (u64)-1);
1221 root->fs_info->tree_log_batch++;
1222
1223 /*
1224 * ok we haven't committed the transaction yet, lets do a commit
1225 */
1226 if (file->private_data)
1227 btrfs_ioctl_trans_end(file);
1228
1229 trans = btrfs_start_transaction(root, 1);
1230 if (!trans) {
1231 ret = -ENOMEM;
1232 goto out;
1233 }
1234
1235 ret = btrfs_log_dentry_safe(trans, root, file->f_dentry);
1236 if (ret < 0)
1237 goto out;
1238
1239 /* we've logged all the items and now have a consistent
1240 * version of the file in the log. It is possible that
1241 * someone will come in and modify the file, but that's
1242 * fine because the log is consistent on disk, and we
1243 * have references to all of the file's extents
1244 *
1245 * It is possible that someone will come in and log the
1246 * file again, but that will end up using the synchronization
1247 * inside btrfs_sync_log to keep things safe.
1248 */
1249 mutex_unlock(&file->f_dentry->d_inode->i_mutex);
1250
1251 if (ret > 0) {
1252 ret = btrfs_commit_transaction(trans, root);
1253 } else {
1254 btrfs_sync_log(trans, root);
1255 ret = btrfs_end_transaction(trans, root);
1256 }
1257 mutex_lock(&file->f_dentry->d_inode->i_mutex);
1258out:
1259 return ret > 0 ? EIO : ret;
1260}
1261
1262static struct vm_operations_struct btrfs_file_vm_ops = {
1263 .fault = filemap_fault,
1264 .page_mkwrite = btrfs_page_mkwrite,
1265};
1266
1267static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
1268{
1269 vma->vm_ops = &btrfs_file_vm_ops;
1270 file_accessed(filp);
1271 return 0;
1272}
1273
1274struct file_operations btrfs_file_operations = {
1275 .llseek = generic_file_llseek,
1276 .read = do_sync_read,
1277 .aio_read = generic_file_aio_read,
1278 .splice_read = generic_file_splice_read,
1279 .write = btrfs_file_write,
1280 .mmap = btrfs_file_mmap,
1281 .open = generic_file_open,
1282 .release = btrfs_release_file,
1283 .fsync = btrfs_sync_file,
1284 .unlocked_ioctl = btrfs_ioctl,
1285#ifdef CONFIG_COMPAT
1286 .compat_ioctl = btrfs_ioctl,
1287#endif
1288};
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
new file mode 100644
index 000000000000..d1e5f0e84c58
--- /dev/null
+++ b/fs/btrfs/free-space-cache.c
@@ -0,0 +1,495 @@
1/*
2 * Copyright (C) 2008 Red Hat. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include "ctree.h"
21
22static int tree_insert_offset(struct rb_root *root, u64 offset,
23 struct rb_node *node)
24{
25 struct rb_node **p = &root->rb_node;
26 struct rb_node *parent = NULL;
27 struct btrfs_free_space *info;
28
29 while (*p) {
30 parent = *p;
31 info = rb_entry(parent, struct btrfs_free_space, offset_index);
32
33 if (offset < info->offset)
34 p = &(*p)->rb_left;
35 else if (offset > info->offset)
36 p = &(*p)->rb_right;
37 else
38 return -EEXIST;
39 }
40
41 rb_link_node(node, parent, p);
42 rb_insert_color(node, root);
43
44 return 0;
45}
46
47static int tree_insert_bytes(struct rb_root *root, u64 bytes,
48 struct rb_node *node)
49{
50 struct rb_node **p = &root->rb_node;
51 struct rb_node *parent = NULL;
52 struct btrfs_free_space *info;
53
54 while (*p) {
55 parent = *p;
56 info = rb_entry(parent, struct btrfs_free_space, bytes_index);
57
58 if (bytes < info->bytes)
59 p = &(*p)->rb_left;
60 else
61 p = &(*p)->rb_right;
62 }
63
64 rb_link_node(node, parent, p);
65 rb_insert_color(node, root);
66
67 return 0;
68}
69
70/*
71 * searches the tree for the given offset. If contains is set we will return
72 * the free space that contains the given offset. If contains is not set we
73 * will return the free space that starts at or after the given offset and is
74 * at least bytes long.
75 */
76static struct btrfs_free_space *tree_search_offset(struct rb_root *root,
77 u64 offset, u64 bytes,
78 int contains)
79{
80 struct rb_node *n = root->rb_node;
81 struct btrfs_free_space *entry, *ret = NULL;
82
83 while (n) {
84 entry = rb_entry(n, struct btrfs_free_space, offset_index);
85
86 if (offset < entry->offset) {
87 if (!contains &&
88 (!ret || entry->offset < ret->offset) &&
89 (bytes <= entry->bytes))
90 ret = entry;
91 n = n->rb_left;
92 } else if (offset > entry->offset) {
93 if ((entry->offset + entry->bytes - 1) >= offset &&
94 bytes <= entry->bytes) {
95 ret = entry;
96 break;
97 }
98 n = n->rb_right;
99 } else {
100 if (bytes > entry->bytes) {
101 n = n->rb_right;
102 continue;
103 }
104 ret = entry;
105 break;
106 }
107 }
108
109 return ret;
110}
111
112/*
113 * return a chunk at least bytes size, as close to offset that we can get.
114 */
115static struct btrfs_free_space *tree_search_bytes(struct rb_root *root,
116 u64 offset, u64 bytes)
117{
118 struct rb_node *n = root->rb_node;
119 struct btrfs_free_space *entry, *ret = NULL;
120
121 while (n) {
122 entry = rb_entry(n, struct btrfs_free_space, bytes_index);
123
124 if (bytes < entry->bytes) {
125 /*
126 * We prefer to get a hole size as close to the size we
127 * are asking for so we don't take small slivers out of
128 * huge holes, but we also want to get as close to the
129 * offset as possible so we don't have a whole lot of
130 * fragmentation.
131 */
132 if (offset <= entry->offset) {
133 if (!ret)
134 ret = entry;
135 else if (entry->bytes < ret->bytes)
136 ret = entry;
137 else if (entry->offset < ret->offset)
138 ret = entry;
139 }
140 n = n->rb_left;
141 } else if (bytes > entry->bytes) {
142 n = n->rb_right;
143 } else {
144 /*
145 * Ok we may have multiple chunks of the wanted size,
146 * so we don't want to take the first one we find, we
147 * want to take the one closest to our given offset, so
148 * keep searching just in case theres a better match.
149 */
150 n = n->rb_right;
151 if (offset > entry->offset)
152 continue;
153 else if (!ret || entry->offset < ret->offset)
154 ret = entry;
155 }
156 }
157
158 return ret;
159}
160
161static void unlink_free_space(struct btrfs_block_group_cache *block_group,
162 struct btrfs_free_space *info)
163{
164 rb_erase(&info->offset_index, &block_group->free_space_offset);
165 rb_erase(&info->bytes_index, &block_group->free_space_bytes);
166}
167
168static int link_free_space(struct btrfs_block_group_cache *block_group,
169 struct btrfs_free_space *info)
170{
171 int ret = 0;
172
173
174 ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
175 &info->offset_index);
176 if (ret)
177 return ret;
178
179 ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes,
180 &info->bytes_index);
181 if (ret)
182 return ret;
183
184 return ret;
185}
186
187static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
188 u64 offset, u64 bytes)
189{
190 struct btrfs_free_space *right_info;
191 struct btrfs_free_space *left_info;
192 struct btrfs_free_space *info = NULL;
193 struct btrfs_free_space *alloc_info;
194 int ret = 0;
195
196 alloc_info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
197 if (!alloc_info)
198 return -ENOMEM;
199
200 /*
201 * first we want to see if there is free space adjacent to the range we
202 * are adding, if there is remove that struct and add a new one to
203 * cover the entire range
204 */
205 right_info = tree_search_offset(&block_group->free_space_offset,
206 offset+bytes, 0, 1);
207 left_info = tree_search_offset(&block_group->free_space_offset,
208 offset-1, 0, 1);
209
210 if (right_info && right_info->offset == offset+bytes) {
211 unlink_free_space(block_group, right_info);
212 info = right_info;
213 info->offset = offset;
214 info->bytes += bytes;
215 } else if (right_info && right_info->offset != offset+bytes) {
216 printk(KERN_ERR "btrfs adding space in the middle of an "
217 "existing free space area. existing: "
218 "offset=%llu, bytes=%llu. new: offset=%llu, "
219 "bytes=%llu\n", (unsigned long long)right_info->offset,
220 (unsigned long long)right_info->bytes,
221 (unsigned long long)offset,
222 (unsigned long long)bytes);
223 BUG();
224 }
225
226 if (left_info) {
227 unlink_free_space(block_group, left_info);
228
229 if (unlikely((left_info->offset + left_info->bytes) !=
230 offset)) {
231 printk(KERN_ERR "btrfs free space to the left "
232 "of new free space isn't "
233 "quite right. existing: offset=%llu, "
234 "bytes=%llu. new: offset=%llu, bytes=%llu\n",
235 (unsigned long long)left_info->offset,
236 (unsigned long long)left_info->bytes,
237 (unsigned long long)offset,
238 (unsigned long long)bytes);
239 BUG();
240 }
241
242 if (info) {
243 info->offset = left_info->offset;
244 info->bytes += left_info->bytes;
245 kfree(left_info);
246 } else {
247 info = left_info;
248 info->bytes += bytes;
249 }
250 }
251
252 if (info) {
253 ret = link_free_space(block_group, info);
254 if (!ret)
255 info = NULL;
256 goto out;
257 }
258
259 info = alloc_info;
260 alloc_info = NULL;
261 info->offset = offset;
262 info->bytes = bytes;
263
264 ret = link_free_space(block_group, info);
265 if (ret)
266 kfree(info);
267out:
268 if (ret) {
269 printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
270 if (ret == -EEXIST)
271 BUG();
272 }
273
274 kfree(alloc_info);
275
276 return ret;
277}
278
279static int
280__btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
281 u64 offset, u64 bytes)
282{
283 struct btrfs_free_space *info;
284 int ret = 0;
285
286 info = tree_search_offset(&block_group->free_space_offset, offset, 0,
287 1);
288
289 if (info && info->offset == offset) {
290 if (info->bytes < bytes) {
291 printk(KERN_ERR "Found free space at %llu, size %llu,"
292 "trying to use %llu\n",
293 (unsigned long long)info->offset,
294 (unsigned long long)info->bytes,
295 (unsigned long long)bytes);
296 WARN_ON(1);
297 ret = -EINVAL;
298 goto out;
299 }
300 unlink_free_space(block_group, info);
301
302 if (info->bytes == bytes) {
303 kfree(info);
304 goto out;
305 }
306
307 info->offset += bytes;
308 info->bytes -= bytes;
309
310 ret = link_free_space(block_group, info);
311 BUG_ON(ret);
312 } else if (info && info->offset < offset &&
313 info->offset + info->bytes >= offset + bytes) {
314 u64 old_start = info->offset;
315 /*
316 * we're freeing space in the middle of the info,
317 * this can happen during tree log replay
318 *
319 * first unlink the old info and then
320 * insert it again after the hole we're creating
321 */
322 unlink_free_space(block_group, info);
323 if (offset + bytes < info->offset + info->bytes) {
324 u64 old_end = info->offset + info->bytes;
325
326 info->offset = offset + bytes;
327 info->bytes = old_end - info->offset;
328 ret = link_free_space(block_group, info);
329 BUG_ON(ret);
330 } else {
331 /* the hole we're creating ends at the end
332 * of the info struct, just free the info
333 */
334 kfree(info);
335 }
336
337 /* step two, insert a new info struct to cover anything
338 * before the hole
339 */
340 ret = __btrfs_add_free_space(block_group, old_start,
341 offset - old_start);
342 BUG_ON(ret);
343 } else {
344 WARN_ON(1);
345 }
346out:
347 return ret;
348}
349
350int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
351 u64 offset, u64 bytes)
352{
353 int ret;
354 struct btrfs_free_space *sp;
355
356 mutex_lock(&block_group->alloc_mutex);
357 ret = __btrfs_add_free_space(block_group, offset, bytes);
358 sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
359 BUG_ON(!sp);
360 mutex_unlock(&block_group->alloc_mutex);
361
362 return ret;
363}
364
365int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group,
366 u64 offset, u64 bytes)
367{
368 int ret;
369 struct btrfs_free_space *sp;
370
371 ret = __btrfs_add_free_space(block_group, offset, bytes);
372 sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
373 BUG_ON(!sp);
374
375 return ret;
376}
377
378int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
379 u64 offset, u64 bytes)
380{
381 int ret = 0;
382
383 mutex_lock(&block_group->alloc_mutex);
384 ret = __btrfs_remove_free_space(block_group, offset, bytes);
385 mutex_unlock(&block_group->alloc_mutex);
386
387 return ret;
388}
389
390int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group,
391 u64 offset, u64 bytes)
392{
393 int ret;
394
395 ret = __btrfs_remove_free_space(block_group, offset, bytes);
396
397 return ret;
398}
399
400void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
401 u64 bytes)
402{
403 struct btrfs_free_space *info;
404 struct rb_node *n;
405 int count = 0;
406
407 for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) {
408 info = rb_entry(n, struct btrfs_free_space, offset_index);
409 if (info->bytes >= bytes)
410 count++;
411 }
412 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
413 "\n", count);
414}
415
416u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group)
417{
418 struct btrfs_free_space *info;
419 struct rb_node *n;
420 u64 ret = 0;
421
422 for (n = rb_first(&block_group->free_space_offset); n;
423 n = rb_next(n)) {
424 info = rb_entry(n, struct btrfs_free_space, offset_index);
425 ret += info->bytes;
426 }
427
428 return ret;
429}
430
431void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
432{
433 struct btrfs_free_space *info;
434 struct rb_node *node;
435
436 mutex_lock(&block_group->alloc_mutex);
437 while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
438 info = rb_entry(node, struct btrfs_free_space, bytes_index);
439 unlink_free_space(block_group, info);
440 kfree(info);
441 if (need_resched()) {
442 mutex_unlock(&block_group->alloc_mutex);
443 cond_resched();
444 mutex_lock(&block_group->alloc_mutex);
445 }
446 }
447 mutex_unlock(&block_group->alloc_mutex);
448}
449
450#if 0
451static struct btrfs_free_space *btrfs_find_free_space_offset(struct
452 btrfs_block_group_cache
453 *block_group, u64 offset,
454 u64 bytes)
455{
456 struct btrfs_free_space *ret;
457
458 mutex_lock(&block_group->alloc_mutex);
459 ret = tree_search_offset(&block_group->free_space_offset, offset,
460 bytes, 0);
461 mutex_unlock(&block_group->alloc_mutex);
462
463 return ret;
464}
465
466static struct btrfs_free_space *btrfs_find_free_space_bytes(struct
467 btrfs_block_group_cache
468 *block_group, u64 offset,
469 u64 bytes)
470{
471 struct btrfs_free_space *ret;
472
473 mutex_lock(&block_group->alloc_mutex);
474
475 ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes);
476 mutex_unlock(&block_group->alloc_mutex);
477
478 return ret;
479}
480#endif
481
482struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache
483 *block_group, u64 offset,
484 u64 bytes)
485{
486 struct btrfs_free_space *ret = NULL;
487
488 ret = tree_search_offset(&block_group->free_space_offset, offset,
489 bytes, 0);
490 if (!ret)
491 ret = tree_search_bytes(&block_group->free_space_bytes,
492 offset, bytes);
493
494 return ret;
495}
diff --git a/fs/btrfs/hash.h b/fs/btrfs/hash.h
new file mode 100644
index 000000000000..2a020b276768
--- /dev/null
+++ b/fs/btrfs/hash.h
@@ -0,0 +1,27 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#ifndef __HASH__
20#define __HASH__
21
22#include "crc32c.h"
23static inline u64 btrfs_name_hash(const char *name, int len)
24{
25 return btrfs_crc32c((u32)~1, name, len);
26}
27#endif
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
new file mode 100644
index 000000000000..3d46fa1f29a4
--- /dev/null
+++ b/fs/btrfs/inode-item.c
@@ -0,0 +1,206 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include "ctree.h"
20#include "disk-io.h"
21#include "transaction.h"
22
23static int find_name_in_backref(struct btrfs_path *path, const char *name,
24 int name_len, struct btrfs_inode_ref **ref_ret)
25{
26 struct extent_buffer *leaf;
27 struct btrfs_inode_ref *ref;
28 unsigned long ptr;
29 unsigned long name_ptr;
30 u32 item_size;
31 u32 cur_offset = 0;
32 int len;
33
34 leaf = path->nodes[0];
35 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
36 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
37 while (cur_offset < item_size) {
38 ref = (struct btrfs_inode_ref *)(ptr + cur_offset);
39 len = btrfs_inode_ref_name_len(leaf, ref);
40 name_ptr = (unsigned long)(ref + 1);
41 cur_offset += len + sizeof(*ref);
42 if (len != name_len)
43 continue;
44 if (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0) {
45 *ref_ret = ref;
46 return 1;
47 }
48 }
49 return 0;
50}
51
52int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
53 struct btrfs_root *root,
54 const char *name, int name_len,
55 u64 inode_objectid, u64 ref_objectid, u64 *index)
56{
57 struct btrfs_path *path;
58 struct btrfs_key key;
59 struct btrfs_inode_ref *ref;
60 struct extent_buffer *leaf;
61 unsigned long ptr;
62 unsigned long item_start;
63 u32 item_size;
64 u32 sub_item_len;
65 int ret;
66 int del_len = name_len + sizeof(*ref);
67
68 key.objectid = inode_objectid;
69 key.offset = ref_objectid;
70 btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
71
72 path = btrfs_alloc_path();
73 if (!path)
74 return -ENOMEM;
75
76 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
77 if (ret > 0) {
78 ret = -ENOENT;
79 goto out;
80 } else if (ret < 0) {
81 goto out;
82 }
83 if (!find_name_in_backref(path, name, name_len, &ref)) {
84 ret = -ENOENT;
85 goto out;
86 }
87 leaf = path->nodes[0];
88 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
89
90 if (index)
91 *index = btrfs_inode_ref_index(leaf, ref);
92
93 if (del_len == item_size) {
94 ret = btrfs_del_item(trans, root, path);
95 goto out;
96 }
97 ptr = (unsigned long)ref;
98 sub_item_len = name_len + sizeof(*ref);
99 item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
100 memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
101 item_size - (ptr + sub_item_len - item_start));
102 ret = btrfs_truncate_item(trans, root, path,
103 item_size - sub_item_len, 1);
104 BUG_ON(ret);
105out:
106 btrfs_free_path(path);
107 return ret;
108}
109
110int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
111 struct btrfs_root *root,
112 const char *name, int name_len,
113 u64 inode_objectid, u64 ref_objectid, u64 index)
114{
115 struct btrfs_path *path;
116 struct btrfs_key key;
117 struct btrfs_inode_ref *ref;
118 unsigned long ptr;
119 int ret;
120 int ins_len = name_len + sizeof(*ref);
121
122 key.objectid = inode_objectid;
123 key.offset = ref_objectid;
124 btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
125
126 path = btrfs_alloc_path();
127 if (!path)
128 return -ENOMEM;
129
130 ret = btrfs_insert_empty_item(trans, root, path, &key,
131 ins_len);
132 if (ret == -EEXIST) {
133 u32 old_size;
134
135 if (find_name_in_backref(path, name, name_len, &ref))
136 goto out;
137
138 old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
139 ret = btrfs_extend_item(trans, root, path, ins_len);
140 BUG_ON(ret);
141 ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
142 struct btrfs_inode_ref);
143 ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
144 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
145 btrfs_set_inode_ref_index(path->nodes[0], ref, index);
146 ptr = (unsigned long)(ref + 1);
147 ret = 0;
148 } else if (ret < 0) {
149 goto out;
150 } else {
151 ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
152 struct btrfs_inode_ref);
153 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
154 btrfs_set_inode_ref_index(path->nodes[0], ref, index);
155 ptr = (unsigned long)(ref + 1);
156 }
157 write_extent_buffer(path->nodes[0], name, ptr, name_len);
158 btrfs_mark_buffer_dirty(path->nodes[0]);
159
160out:
161 btrfs_free_path(path);
162 return ret;
163}
164
165int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans,
166 struct btrfs_root *root,
167 struct btrfs_path *path, u64 objectid)
168{
169 struct btrfs_key key;
170 int ret;
171 key.objectid = objectid;
172 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
173 key.offset = 0;
174
175 ret = btrfs_insert_empty_item(trans, root, path, &key,
176 sizeof(struct btrfs_inode_item));
177 if (ret == 0 && objectid > root->highest_inode)
178 root->highest_inode = objectid;
179 return ret;
180}
181
182int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root
183 *root, struct btrfs_path *path,
184 struct btrfs_key *location, int mod)
185{
186 int ins_len = mod < 0 ? -1 : 0;
187 int cow = mod != 0;
188 int ret;
189 int slot;
190 struct extent_buffer *leaf;
191 struct btrfs_key found_key;
192
193 ret = btrfs_search_slot(trans, root, location, path, ins_len, cow);
194 if (ret > 0 && btrfs_key_type(location) == BTRFS_ROOT_ITEM_KEY &&
195 location->offset == (u64)-1 && path->slots[0] != 0) {
196 slot = path->slots[0] - 1;
197 leaf = path->nodes[0];
198 btrfs_item_key_to_cpu(leaf, &found_key, slot);
199 if (found_key.objectid == location->objectid &&
200 btrfs_key_type(&found_key) == btrfs_key_type(location)) {
201 path->slots[0]--;
202 return 0;
203 }
204 }
205 return ret;
206}
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
new file mode 100644
index 000000000000..2aa79873eb46
--- /dev/null
+++ b/fs/btrfs/inode-map.c
@@ -0,0 +1,144 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include "ctree.h"
20#include "disk-io.h"
21#include "transaction.h"
22
23int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid)
24{
25 struct btrfs_path *path;
26 int ret;
27 struct extent_buffer *l;
28 struct btrfs_key search_key;
29 struct btrfs_key found_key;
30 int slot;
31
32 path = btrfs_alloc_path();
33 BUG_ON(!path);
34
35 search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
36 search_key.type = -1;
37 search_key.offset = (u64)-1;
38 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
39 if (ret < 0)
40 goto error;
41 BUG_ON(ret == 0);
42 if (path->slots[0] > 0) {
43 slot = path->slots[0] - 1;
44 l = path->nodes[0];
45 btrfs_item_key_to_cpu(l, &found_key, slot);
46 *objectid = found_key.objectid;
47 } else {
48 *objectid = BTRFS_FIRST_FREE_OBJECTID;
49 }
50 ret = 0;
51error:
52 btrfs_free_path(path);
53 return ret;
54}
55
56/*
57 * walks the btree of allocated inodes and find a hole.
58 */
59int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
60 struct btrfs_root *root,
61 u64 dirid, u64 *objectid)
62{
63 struct btrfs_path *path;
64 struct btrfs_key key;
65 int ret;
66 int slot = 0;
67 u64 last_ino = 0;
68 int start_found;
69 struct extent_buffer *l;
70 struct btrfs_key search_key;
71 u64 search_start = dirid;
72
73 mutex_lock(&root->objectid_mutex);
74 if (root->last_inode_alloc >= BTRFS_FIRST_FREE_OBJECTID &&
75 root->last_inode_alloc < BTRFS_LAST_FREE_OBJECTID) {
76 *objectid = ++root->last_inode_alloc;
77 mutex_unlock(&root->objectid_mutex);
78 return 0;
79 }
80 path = btrfs_alloc_path();
81 BUG_ON(!path);
82 search_start = max(search_start, BTRFS_FIRST_FREE_OBJECTID);
83 search_key.objectid = search_start;
84 search_key.type = 0;
85 search_key.offset = 0;
86
87 btrfs_init_path(path);
88 start_found = 0;
89 ret = btrfs_search_slot(trans, root, &search_key, path, 0, 0);
90 if (ret < 0)
91 goto error;
92
93 while (1) {
94 l = path->nodes[0];
95 slot = path->slots[0];
96 if (slot >= btrfs_header_nritems(l)) {
97 ret = btrfs_next_leaf(root, path);
98 if (ret == 0)
99 continue;
100 if (ret < 0)
101 goto error;
102 if (!start_found) {
103 *objectid = search_start;
104 start_found = 1;
105 goto found;
106 }
107 *objectid = last_ino > search_start ?
108 last_ino : search_start;
109 goto found;
110 }
111 btrfs_item_key_to_cpu(l, &key, slot);
112 if (key.objectid >= search_start) {
113 if (start_found) {
114 if (last_ino < search_start)
115 last_ino = search_start;
116 if (key.objectid > last_ino) {
117 *objectid = last_ino;
118 goto found;
119 }
120 } else if (key.objectid > search_start) {
121 *objectid = search_start;
122 goto found;
123 }
124 }
125 if (key.objectid >= BTRFS_LAST_FREE_OBJECTID)
126 break;
127
128 start_found = 1;
129 last_ino = key.objectid + 1;
130 path->slots[0]++;
131 }
132 BUG_ON(1);
133found:
134 btrfs_release_path(root, path);
135 btrfs_free_path(path);
136 BUG_ON(*objectid < search_start);
137 mutex_unlock(&root->objectid_mutex);
138 return 0;
139error:
140 btrfs_release_path(root, path);
141 btrfs_free_path(path);
142 mutex_unlock(&root->objectid_mutex);
143 return ret;
144}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
new file mode 100644
index 000000000000..8adfe059ab41
--- /dev/null
+++ b/fs/btrfs/inode.c
@@ -0,0 +1,5035 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/kernel.h>
20#include <linux/bio.h>
21#include <linux/buffer_head.h>
22#include <linux/file.h>
23#include <linux/fs.h>
24#include <linux/pagemap.h>
25#include <linux/highmem.h>
26#include <linux/time.h>
27#include <linux/init.h>
28#include <linux/string.h>
29#include <linux/smp_lock.h>
30#include <linux/backing-dev.h>
31#include <linux/mpage.h>
32#include <linux/swap.h>
33#include <linux/writeback.h>
34#include <linux/statfs.h>
35#include <linux/compat.h>
36#include <linux/bit_spinlock.h>
37#include <linux/version.h>
38#include <linux/xattr.h>
39#include <linux/posix_acl.h>
40#include <linux/falloc.h>
41#include "compat.h"
42#include "ctree.h"
43#include "disk-io.h"
44#include "transaction.h"
45#include "btrfs_inode.h"
46#include "ioctl.h"
47#include "print-tree.h"
48#include "volumes.h"
49#include "ordered-data.h"
50#include "xattr.h"
51#include "tree-log.h"
52#include "ref-cache.h"
53#include "compression.h"
54
55struct btrfs_iget_args {
56 u64 ino;
57 struct btrfs_root *root;
58};
59
60static struct inode_operations btrfs_dir_inode_operations;
61static struct inode_operations btrfs_symlink_inode_operations;
62static struct inode_operations btrfs_dir_ro_inode_operations;
63static struct inode_operations btrfs_special_inode_operations;
64static struct inode_operations btrfs_file_inode_operations;
65static struct address_space_operations btrfs_aops;
66static struct address_space_operations btrfs_symlink_aops;
67static struct file_operations btrfs_dir_file_operations;
68static struct extent_io_ops btrfs_extent_io_ops;
69
70static struct kmem_cache *btrfs_inode_cachep;
71struct kmem_cache *btrfs_trans_handle_cachep;
72struct kmem_cache *btrfs_transaction_cachep;
73struct kmem_cache *btrfs_bit_radix_cachep;
74struct kmem_cache *btrfs_path_cachep;
75
76#define S_SHIFT 12
77static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
78 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
79 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
80 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
81 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
82 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
83 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
84 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
85};
86
87static void btrfs_truncate(struct inode *inode);
88static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
89static noinline int cow_file_range(struct inode *inode,
90 struct page *locked_page,
91 u64 start, u64 end, int *page_started,
92 unsigned long *nr_written, int unlock);
93
94/*
95 * a very lame attempt at stopping writes when the FS is 85% full. There
96 * are countless ways this is incorrect, but it is better than nothing.
97 */
98int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
99 int for_del)
100{
101 u64 total;
102 u64 used;
103 u64 thresh;
104 int ret = 0;
105
106 spin_lock(&root->fs_info->delalloc_lock);
107 total = btrfs_super_total_bytes(&root->fs_info->super_copy);
108 used = btrfs_super_bytes_used(&root->fs_info->super_copy);
109 if (for_del)
110 thresh = total * 90;
111 else
112 thresh = total * 85;
113
114 do_div(thresh, 100);
115
116 if (used + root->fs_info->delalloc_bytes + num_required > thresh)
117 ret = -ENOSPC;
118 spin_unlock(&root->fs_info->delalloc_lock);
119 return ret;
120}
121
122/*
123 * this does all the hard work for inserting an inline extent into
124 * the btree. The caller should have done a btrfs_drop_extents so that
125 * no overlapping inline items exist in the btree
126 */
127static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
128 struct btrfs_root *root, struct inode *inode,
129 u64 start, size_t size, size_t compressed_size,
130 struct page **compressed_pages)
131{
132 struct btrfs_key key;
133 struct btrfs_path *path;
134 struct extent_buffer *leaf;
135 struct page *page = NULL;
136 char *kaddr;
137 unsigned long ptr;
138 struct btrfs_file_extent_item *ei;
139 int err = 0;
140 int ret;
141 size_t cur_size = size;
142 size_t datasize;
143 unsigned long offset;
144 int use_compress = 0;
145
146 if (compressed_size && compressed_pages) {
147 use_compress = 1;
148 cur_size = compressed_size;
149 }
150
151 path = btrfs_alloc_path();
152 if (!path)
153 return -ENOMEM;
154
155 btrfs_set_trans_block_group(trans, inode);
156
157 key.objectid = inode->i_ino;
158 key.offset = start;
159 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
160 datasize = btrfs_file_extent_calc_inline_size(cur_size);
161
162 inode_add_bytes(inode, size);
163 ret = btrfs_insert_empty_item(trans, root, path, &key,
164 datasize);
165 BUG_ON(ret);
166 if (ret) {
167 err = ret;
168 goto fail;
169 }
170 leaf = path->nodes[0];
171 ei = btrfs_item_ptr(leaf, path->slots[0],
172 struct btrfs_file_extent_item);
173 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
174 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
175 btrfs_set_file_extent_encryption(leaf, ei, 0);
176 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
177 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
178 ptr = btrfs_file_extent_inline_start(ei);
179
180 if (use_compress) {
181 struct page *cpage;
182 int i = 0;
183 while (compressed_size > 0) {
184 cpage = compressed_pages[i];
185 cur_size = min_t(unsigned long, compressed_size,
186 PAGE_CACHE_SIZE);
187
188 kaddr = kmap(cpage);
189 write_extent_buffer(leaf, kaddr, ptr, cur_size);
190 kunmap(cpage);
191
192 i++;
193 ptr += cur_size;
194 compressed_size -= cur_size;
195 }
196 btrfs_set_file_extent_compression(leaf, ei,
197 BTRFS_COMPRESS_ZLIB);
198 } else {
199 page = find_get_page(inode->i_mapping,
200 start >> PAGE_CACHE_SHIFT);
201 btrfs_set_file_extent_compression(leaf, ei, 0);
202 kaddr = kmap_atomic(page, KM_USER0);
203 offset = start & (PAGE_CACHE_SIZE - 1);
204 write_extent_buffer(leaf, kaddr + offset, ptr, size);
205 kunmap_atomic(kaddr, KM_USER0);
206 page_cache_release(page);
207 }
208 btrfs_mark_buffer_dirty(leaf);
209 btrfs_free_path(path);
210
211 BTRFS_I(inode)->disk_i_size = inode->i_size;
212 btrfs_update_inode(trans, root, inode);
213 return 0;
214fail:
215 btrfs_free_path(path);
216 return err;
217}
218
219
220/*
221 * conditionally insert an inline extent into the file. This
222 * does the checks required to make sure the data is small enough
223 * to fit as an inline extent.
224 */
225static int cow_file_range_inline(struct btrfs_trans_handle *trans,
226 struct btrfs_root *root,
227 struct inode *inode, u64 start, u64 end,
228 size_t compressed_size,
229 struct page **compressed_pages)
230{
231 u64 isize = i_size_read(inode);
232 u64 actual_end = min(end + 1, isize);
233 u64 inline_len = actual_end - start;
234 u64 aligned_end = (end + root->sectorsize - 1) &
235 ~((u64)root->sectorsize - 1);
236 u64 hint_byte;
237 u64 data_len = inline_len;
238 int ret;
239
240 if (compressed_size)
241 data_len = compressed_size;
242
243 if (start > 0 ||
244 actual_end >= PAGE_CACHE_SIZE ||
245 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
246 (!compressed_size &&
247 (actual_end & (root->sectorsize - 1)) == 0) ||
248 end + 1 < isize ||
249 data_len > root->fs_info->max_inline) {
250 return 1;
251 }
252
253 ret = btrfs_drop_extents(trans, root, inode, start,
254 aligned_end, start, &hint_byte);
255 BUG_ON(ret);
256
257 if (isize > actual_end)
258 inline_len = min_t(u64, isize, actual_end);
259 ret = insert_inline_extent(trans, root, inode, start,
260 inline_len, compressed_size,
261 compressed_pages);
262 BUG_ON(ret);
263 btrfs_drop_extent_cache(inode, start, aligned_end, 0);
264 return 0;
265}
266
267struct async_extent {
268 u64 start;
269 u64 ram_size;
270 u64 compressed_size;
271 struct page **pages;
272 unsigned long nr_pages;
273 struct list_head list;
274};
275
276struct async_cow {
277 struct inode *inode;
278 struct btrfs_root *root;
279 struct page *locked_page;
280 u64 start;
281 u64 end;
282 struct list_head extents;
283 struct btrfs_work work;
284};
285
286static noinline int add_async_extent(struct async_cow *cow,
287 u64 start, u64 ram_size,
288 u64 compressed_size,
289 struct page **pages,
290 unsigned long nr_pages)
291{
292 struct async_extent *async_extent;
293
294 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
295 async_extent->start = start;
296 async_extent->ram_size = ram_size;
297 async_extent->compressed_size = compressed_size;
298 async_extent->pages = pages;
299 async_extent->nr_pages = nr_pages;
300 list_add_tail(&async_extent->list, &cow->extents);
301 return 0;
302}
303
304/*
305 * we create compressed extents in two phases. The first
306 * phase compresses a range of pages that have already been
307 * locked (both pages and state bits are locked).
308 *
309 * This is done inside an ordered work queue, and the compression
310 * is spread across many cpus. The actual IO submission is step
311 * two, and the ordered work queue takes care of making sure that
312 * happens in the same order things were put onto the queue by
313 * writepages and friends.
314 *
315 * If this code finds it can't get good compression, it puts an
316 * entry onto the work queue to write the uncompressed bytes. This
317 * makes sure that both compressed inodes and uncompressed inodes
318 * are written in the same order that pdflush sent them down.
319 */
320static noinline int compress_file_range(struct inode *inode,
321 struct page *locked_page,
322 u64 start, u64 end,
323 struct async_cow *async_cow,
324 int *num_added)
325{
326 struct btrfs_root *root = BTRFS_I(inode)->root;
327 struct btrfs_trans_handle *trans;
328 u64 num_bytes;
329 u64 orig_start;
330 u64 disk_num_bytes;
331 u64 blocksize = root->sectorsize;
332 u64 actual_end;
333 u64 isize = i_size_read(inode);
334 int ret = 0;
335 struct page **pages = NULL;
336 unsigned long nr_pages;
337 unsigned long nr_pages_ret = 0;
338 unsigned long total_compressed = 0;
339 unsigned long total_in = 0;
340 unsigned long max_compressed = 128 * 1024;
341 unsigned long max_uncompressed = 128 * 1024;
342 int i;
343 int will_compress;
344
345 orig_start = start;
346
347 actual_end = min_t(u64, isize, end + 1);
348again:
349 will_compress = 0;
350 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
351 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
352
353 total_compressed = actual_end - start;
354
355 /* we want to make sure that amount of ram required to uncompress
356 * an extent is reasonable, so we limit the total size in ram
357 * of a compressed extent to 128k. This is a crucial number
358 * because it also controls how easily we can spread reads across
359 * cpus for decompression.
360 *
361 * We also want to make sure the amount of IO required to do
362 * a random read is reasonably small, so we limit the size of
363 * a compressed extent to 128k.
364 */
365 total_compressed = min(total_compressed, max_uncompressed);
366 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
367 num_bytes = max(blocksize, num_bytes);
368 disk_num_bytes = num_bytes;
369 total_in = 0;
370 ret = 0;
371
372 /*
373 * we do compression for mount -o compress and when the
374 * inode has not been flagged as nocompress. This flag can
375 * change at any time if we discover bad compression ratios.
376 */
377 if (!btrfs_test_flag(inode, NOCOMPRESS) &&
378 btrfs_test_opt(root, COMPRESS)) {
379 WARN_ON(pages);
380 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
381
382 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
383 total_compressed, pages,
384 nr_pages, &nr_pages_ret,
385 &total_in,
386 &total_compressed,
387 max_compressed);
388
389 if (!ret) {
390 unsigned long offset = total_compressed &
391 (PAGE_CACHE_SIZE - 1);
392 struct page *page = pages[nr_pages_ret - 1];
393 char *kaddr;
394
395 /* zero the tail end of the last page, we might be
396 * sending it down to disk
397 */
398 if (offset) {
399 kaddr = kmap_atomic(page, KM_USER0);
400 memset(kaddr + offset, 0,
401 PAGE_CACHE_SIZE - offset);
402 kunmap_atomic(kaddr, KM_USER0);
403 }
404 will_compress = 1;
405 }
406 }
407 if (start == 0) {
408 trans = btrfs_join_transaction(root, 1);
409 BUG_ON(!trans);
410 btrfs_set_trans_block_group(trans, inode);
411
412 /* lets try to make an inline extent */
413 if (ret || total_in < (actual_end - start)) {
414 /* we didn't compress the entire range, try
415 * to make an uncompressed inline extent.
416 */
417 ret = cow_file_range_inline(trans, root, inode,
418 start, end, 0, NULL);
419 } else {
420 /* try making a compressed inline extent */
421 ret = cow_file_range_inline(trans, root, inode,
422 start, end,
423 total_compressed, pages);
424 }
425 btrfs_end_transaction(trans, root);
426 if (ret == 0) {
427 /*
428 * inline extent creation worked, we don't need
429 * to create any more async work items. Unlock
430 * and free up our temp pages.
431 */
432 extent_clear_unlock_delalloc(inode,
433 &BTRFS_I(inode)->io_tree,
434 start, end, NULL, 1, 0,
435 0, 1, 1, 1);
436 ret = 0;
437 goto free_pages_out;
438 }
439 }
440
441 if (will_compress) {
442 /*
443 * we aren't doing an inline extent round the compressed size
444 * up to a block size boundary so the allocator does sane
445 * things
446 */
447 total_compressed = (total_compressed + blocksize - 1) &
448 ~(blocksize - 1);
449
450 /*
451 * one last check to make sure the compression is really a
452 * win, compare the page count read with the blocks on disk
453 */
454 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
455 ~(PAGE_CACHE_SIZE - 1);
456 if (total_compressed >= total_in) {
457 will_compress = 0;
458 } else {
459 disk_num_bytes = total_compressed;
460 num_bytes = total_in;
461 }
462 }
463 if (!will_compress && pages) {
464 /*
465 * the compression code ran but failed to make things smaller,
466 * free any pages it allocated and our page pointer array
467 */
468 for (i = 0; i < nr_pages_ret; i++) {
469 WARN_ON(pages[i]->mapping);
470 page_cache_release(pages[i]);
471 }
472 kfree(pages);
473 pages = NULL;
474 total_compressed = 0;
475 nr_pages_ret = 0;
476
477 /* flag the file so we don't compress in the future */
478 btrfs_set_flag(inode, NOCOMPRESS);
479 }
480 if (will_compress) {
481 *num_added += 1;
482
483 /* the async work queues will take care of doing actual
484 * allocation on disk for these compressed pages,
485 * and will submit them to the elevator.
486 */
487 add_async_extent(async_cow, start, num_bytes,
488 total_compressed, pages, nr_pages_ret);
489
490 if (start + num_bytes < end && start + num_bytes < actual_end) {
491 start += num_bytes;
492 pages = NULL;
493 cond_resched();
494 goto again;
495 }
496 } else {
497 /*
498 * No compression, but we still need to write the pages in
499 * the file we've been given so far. redirty the locked
500 * page if it corresponds to our extent and set things up
501 * for the async work queue to run cow_file_range to do
502 * the normal delalloc dance
503 */
504 if (page_offset(locked_page) >= start &&
505 page_offset(locked_page) <= end) {
506 __set_page_dirty_nobuffers(locked_page);
507 /* unlocked later on in the async handlers */
508 }
509 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
510 *num_added += 1;
511 }
512
513out:
514 return 0;
515
516free_pages_out:
517 for (i = 0; i < nr_pages_ret; i++) {
518 WARN_ON(pages[i]->mapping);
519 page_cache_release(pages[i]);
520 }
521 kfree(pages);
522
523 goto out;
524}
525
526/*
527 * phase two of compressed writeback. This is the ordered portion
528 * of the code, which only gets called in the order the work was
529 * queued. We walk all the async extents created by compress_file_range
530 * and send them down to the disk.
531 */
532static noinline int submit_compressed_extents(struct inode *inode,
533 struct async_cow *async_cow)
534{
535 struct async_extent *async_extent;
536 u64 alloc_hint = 0;
537 struct btrfs_trans_handle *trans;
538 struct btrfs_key ins;
539 struct extent_map *em;
540 struct btrfs_root *root = BTRFS_I(inode)->root;
541 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
542 struct extent_io_tree *io_tree;
543 int ret;
544
545 if (list_empty(&async_cow->extents))
546 return 0;
547
548 trans = btrfs_join_transaction(root, 1);
549
550 while (!list_empty(&async_cow->extents)) {
551 async_extent = list_entry(async_cow->extents.next,
552 struct async_extent, list);
553 list_del(&async_extent->list);
554
555 io_tree = &BTRFS_I(inode)->io_tree;
556
557 /* did the compression code fall back to uncompressed IO? */
558 if (!async_extent->pages) {
559 int page_started = 0;
560 unsigned long nr_written = 0;
561
562 lock_extent(io_tree, async_extent->start,
563 async_extent->start +
564 async_extent->ram_size - 1, GFP_NOFS);
565
566 /* allocate blocks */
567 cow_file_range(inode, async_cow->locked_page,
568 async_extent->start,
569 async_extent->start +
570 async_extent->ram_size - 1,
571 &page_started, &nr_written, 0);
572
573 /*
574 * if page_started, cow_file_range inserted an
575 * inline extent and took care of all the unlocking
576 * and IO for us. Otherwise, we need to submit
577 * all those pages down to the drive.
578 */
579 if (!page_started)
580 extent_write_locked_range(io_tree,
581 inode, async_extent->start,
582 async_extent->start +
583 async_extent->ram_size - 1,
584 btrfs_get_extent,
585 WB_SYNC_ALL);
586 kfree(async_extent);
587 cond_resched();
588 continue;
589 }
590
591 lock_extent(io_tree, async_extent->start,
592 async_extent->start + async_extent->ram_size - 1,
593 GFP_NOFS);
594 /*
595 * here we're doing allocation and writeback of the
596 * compressed pages
597 */
598 btrfs_drop_extent_cache(inode, async_extent->start,
599 async_extent->start +
600 async_extent->ram_size - 1, 0);
601
602 ret = btrfs_reserve_extent(trans, root,
603 async_extent->compressed_size,
604 async_extent->compressed_size,
605 0, alloc_hint,
606 (u64)-1, &ins, 1);
607 BUG_ON(ret);
608 em = alloc_extent_map(GFP_NOFS);
609 em->start = async_extent->start;
610 em->len = async_extent->ram_size;
611 em->orig_start = em->start;
612
613 em->block_start = ins.objectid;
614 em->block_len = ins.offset;
615 em->bdev = root->fs_info->fs_devices->latest_bdev;
616 set_bit(EXTENT_FLAG_PINNED, &em->flags);
617 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
618
619 while (1) {
620 spin_lock(&em_tree->lock);
621 ret = add_extent_mapping(em_tree, em);
622 spin_unlock(&em_tree->lock);
623 if (ret != -EEXIST) {
624 free_extent_map(em);
625 break;
626 }
627 btrfs_drop_extent_cache(inode, async_extent->start,
628 async_extent->start +
629 async_extent->ram_size - 1, 0);
630 }
631
632 ret = btrfs_add_ordered_extent(inode, async_extent->start,
633 ins.objectid,
634 async_extent->ram_size,
635 ins.offset,
636 BTRFS_ORDERED_COMPRESSED);
637 BUG_ON(ret);
638
639 btrfs_end_transaction(trans, root);
640
641 /*
642 * clear dirty, set writeback and unlock the pages.
643 */
644 extent_clear_unlock_delalloc(inode,
645 &BTRFS_I(inode)->io_tree,
646 async_extent->start,
647 async_extent->start +
648 async_extent->ram_size - 1,
649 NULL, 1, 1, 0, 1, 1, 0);
650
651 ret = btrfs_submit_compressed_write(inode,
652 async_extent->start,
653 async_extent->ram_size,
654 ins.objectid,
655 ins.offset, async_extent->pages,
656 async_extent->nr_pages);
657
658 BUG_ON(ret);
659 trans = btrfs_join_transaction(root, 1);
660 alloc_hint = ins.objectid + ins.offset;
661 kfree(async_extent);
662 cond_resched();
663 }
664
665 btrfs_end_transaction(trans, root);
666 return 0;
667}
668
669/*
670 * when extent_io.c finds a delayed allocation range in the file,
671 * the call backs end up in this code. The basic idea is to
672 * allocate extents on disk for the range, and create ordered data structs
673 * in ram to track those extents.
674 *
675 * locked_page is the page that writepage had locked already. We use
676 * it to make sure we don't do extra locks or unlocks.
677 *
678 * *page_started is set to one if we unlock locked_page and do everything
679 * required to start IO on it. It may be clean and already done with
680 * IO when we return.
681 */
682static noinline int cow_file_range(struct inode *inode,
683 struct page *locked_page,
684 u64 start, u64 end, int *page_started,
685 unsigned long *nr_written,
686 int unlock)
687{
688 struct btrfs_root *root = BTRFS_I(inode)->root;
689 struct btrfs_trans_handle *trans;
690 u64 alloc_hint = 0;
691 u64 num_bytes;
692 unsigned long ram_size;
693 u64 disk_num_bytes;
694 u64 cur_alloc_size;
695 u64 blocksize = root->sectorsize;
696 u64 actual_end;
697 u64 isize = i_size_read(inode);
698 struct btrfs_key ins;
699 struct extent_map *em;
700 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
701 int ret = 0;
702
703 trans = btrfs_join_transaction(root, 1);
704 BUG_ON(!trans);
705 btrfs_set_trans_block_group(trans, inode);
706
707 actual_end = min_t(u64, isize, end + 1);
708
709 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
710 num_bytes = max(blocksize, num_bytes);
711 disk_num_bytes = num_bytes;
712 ret = 0;
713
714 if (start == 0) {
715 /* lets try to make an inline extent */
716 ret = cow_file_range_inline(trans, root, inode,
717 start, end, 0, NULL);
718 if (ret == 0) {
719 extent_clear_unlock_delalloc(inode,
720 &BTRFS_I(inode)->io_tree,
721 start, end, NULL, 1, 1,
722 1, 1, 1, 1);
723 *nr_written = *nr_written +
724 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
725 *page_started = 1;
726 ret = 0;
727 goto out;
728 }
729 }
730
731 BUG_ON(disk_num_bytes >
732 btrfs_super_total_bytes(&root->fs_info->super_copy));
733
734 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
735
736 while (disk_num_bytes > 0) {
737 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
738 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
739 root->sectorsize, 0, alloc_hint,
740 (u64)-1, &ins, 1);
741 BUG_ON(ret);
742
743 em = alloc_extent_map(GFP_NOFS);
744 em->start = start;
745 em->orig_start = em->start;
746
747 ram_size = ins.offset;
748 em->len = ins.offset;
749
750 em->block_start = ins.objectid;
751 em->block_len = ins.offset;
752 em->bdev = root->fs_info->fs_devices->latest_bdev;
753 set_bit(EXTENT_FLAG_PINNED, &em->flags);
754
755 while (1) {
756 spin_lock(&em_tree->lock);
757 ret = add_extent_mapping(em_tree, em);
758 spin_unlock(&em_tree->lock);
759 if (ret != -EEXIST) {
760 free_extent_map(em);
761 break;
762 }
763 btrfs_drop_extent_cache(inode, start,
764 start + ram_size - 1, 0);
765 }
766
767 cur_alloc_size = ins.offset;
768 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
769 ram_size, cur_alloc_size, 0);
770 BUG_ON(ret);
771
772 if (root->root_key.objectid ==
773 BTRFS_DATA_RELOC_TREE_OBJECTID) {
774 ret = btrfs_reloc_clone_csums(inode, start,
775 cur_alloc_size);
776 BUG_ON(ret);
777 }
778
779 if (disk_num_bytes < cur_alloc_size)
780 break;
781
782 /* we're not doing compressed IO, don't unlock the first
783 * page (which the caller expects to stay locked), don't
784 * clear any dirty bits and don't set any writeback bits
785 */
786 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
787 start, start + ram_size - 1,
788 locked_page, unlock, 1,
789 1, 0, 0, 0);
790 disk_num_bytes -= cur_alloc_size;
791 num_bytes -= cur_alloc_size;
792 alloc_hint = ins.objectid + ins.offset;
793 start += cur_alloc_size;
794 }
795out:
796 ret = 0;
797 btrfs_end_transaction(trans, root);
798
799 return ret;
800}
801
802/*
803 * work queue call back to started compression on a file and pages
804 */
805static noinline void async_cow_start(struct btrfs_work *work)
806{
807 struct async_cow *async_cow;
808 int num_added = 0;
809 async_cow = container_of(work, struct async_cow, work);
810
811 compress_file_range(async_cow->inode, async_cow->locked_page,
812 async_cow->start, async_cow->end, async_cow,
813 &num_added);
814 if (num_added == 0)
815 async_cow->inode = NULL;
816}
817
818/*
819 * work queue call back to submit previously compressed pages
820 */
821static noinline void async_cow_submit(struct btrfs_work *work)
822{
823 struct async_cow *async_cow;
824 struct btrfs_root *root;
825 unsigned long nr_pages;
826
827 async_cow = container_of(work, struct async_cow, work);
828
829 root = async_cow->root;
830 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
831 PAGE_CACHE_SHIFT;
832
833 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
834
835 if (atomic_read(&root->fs_info->async_delalloc_pages) <
836 5 * 1042 * 1024 &&
837 waitqueue_active(&root->fs_info->async_submit_wait))
838 wake_up(&root->fs_info->async_submit_wait);
839
840 if (async_cow->inode)
841 submit_compressed_extents(async_cow->inode, async_cow);
842}
843
844static noinline void async_cow_free(struct btrfs_work *work)
845{
846 struct async_cow *async_cow;
847 async_cow = container_of(work, struct async_cow, work);
848 kfree(async_cow);
849}
850
851static int cow_file_range_async(struct inode *inode, struct page *locked_page,
852 u64 start, u64 end, int *page_started,
853 unsigned long *nr_written)
854{
855 struct async_cow *async_cow;
856 struct btrfs_root *root = BTRFS_I(inode)->root;
857 unsigned long nr_pages;
858 u64 cur_end;
859 int limit = 10 * 1024 * 1042;
860
861 if (!btrfs_test_opt(root, COMPRESS)) {
862 return cow_file_range(inode, locked_page, start, end,
863 page_started, nr_written, 1);
864 }
865
866 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
867 EXTENT_DELALLOC, 1, 0, GFP_NOFS);
868 while (start < end) {
869 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
870 async_cow->inode = inode;
871 async_cow->root = root;
872 async_cow->locked_page = locked_page;
873 async_cow->start = start;
874
875 if (btrfs_test_flag(inode, NOCOMPRESS))
876 cur_end = end;
877 else
878 cur_end = min(end, start + 512 * 1024 - 1);
879
880 async_cow->end = cur_end;
881 INIT_LIST_HEAD(&async_cow->extents);
882
883 async_cow->work.func = async_cow_start;
884 async_cow->work.ordered_func = async_cow_submit;
885 async_cow->work.ordered_free = async_cow_free;
886 async_cow->work.flags = 0;
887
888 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
889 PAGE_CACHE_SHIFT;
890 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
891
892 btrfs_queue_worker(&root->fs_info->delalloc_workers,
893 &async_cow->work);
894
895 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
896 wait_event(root->fs_info->async_submit_wait,
897 (atomic_read(&root->fs_info->async_delalloc_pages) <
898 limit));
899 }
900
901 while (atomic_read(&root->fs_info->async_submit_draining) &&
902 atomic_read(&root->fs_info->async_delalloc_pages)) {
903 wait_event(root->fs_info->async_submit_wait,
904 (atomic_read(&root->fs_info->async_delalloc_pages) ==
905 0));
906 }
907
908 *nr_written += nr_pages;
909 start = cur_end + 1;
910 }
911 *page_started = 1;
912 return 0;
913}
914
915static noinline int csum_exist_in_range(struct btrfs_root *root,
916 u64 bytenr, u64 num_bytes)
917{
918 int ret;
919 struct btrfs_ordered_sum *sums;
920 LIST_HEAD(list);
921
922 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
923 bytenr + num_bytes - 1, &list);
924 if (ret == 0 && list_empty(&list))
925 return 0;
926
927 while (!list_empty(&list)) {
928 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
929 list_del(&sums->list);
930 kfree(sums);
931 }
932 return 1;
933}
934
935/*
936 * when nowcow writeback call back. This checks for snapshots or COW copies
937 * of the extents that exist in the file, and COWs the file as required.
938 *
939 * If no cow copies or snapshots exist, we write directly to the existing
940 * blocks on disk
941 */
942static int run_delalloc_nocow(struct inode *inode, struct page *locked_page,
943 u64 start, u64 end, int *page_started, int force,
944 unsigned long *nr_written)
945{
946 struct btrfs_root *root = BTRFS_I(inode)->root;
947 struct btrfs_trans_handle *trans;
948 struct extent_buffer *leaf;
949 struct btrfs_path *path;
950 struct btrfs_file_extent_item *fi;
951 struct btrfs_key found_key;
952 u64 cow_start;
953 u64 cur_offset;
954 u64 extent_end;
955 u64 disk_bytenr;
956 u64 num_bytes;
957 int extent_type;
958 int ret;
959 int type;
960 int nocow;
961 int check_prev = 1;
962
963 path = btrfs_alloc_path();
964 BUG_ON(!path);
965 trans = btrfs_join_transaction(root, 1);
966 BUG_ON(!trans);
967
968 cow_start = (u64)-1;
969 cur_offset = start;
970 while (1) {
971 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
972 cur_offset, 0);
973 BUG_ON(ret < 0);
974 if (ret > 0 && path->slots[0] > 0 && check_prev) {
975 leaf = path->nodes[0];
976 btrfs_item_key_to_cpu(leaf, &found_key,
977 path->slots[0] - 1);
978 if (found_key.objectid == inode->i_ino &&
979 found_key.type == BTRFS_EXTENT_DATA_KEY)
980 path->slots[0]--;
981 }
982 check_prev = 0;
983next_slot:
984 leaf = path->nodes[0];
985 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
986 ret = btrfs_next_leaf(root, path);
987 if (ret < 0)
988 BUG_ON(1);
989 if (ret > 0)
990 break;
991 leaf = path->nodes[0];
992 }
993
994 nocow = 0;
995 disk_bytenr = 0;
996 num_bytes = 0;
997 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
998
999 if (found_key.objectid > inode->i_ino ||
1000 found_key.type > BTRFS_EXTENT_DATA_KEY ||
1001 found_key.offset > end)
1002 break;
1003
1004 if (found_key.offset > cur_offset) {
1005 extent_end = found_key.offset;
1006 goto out_check;
1007 }
1008
1009 fi = btrfs_item_ptr(leaf, path->slots[0],
1010 struct btrfs_file_extent_item);
1011 extent_type = btrfs_file_extent_type(leaf, fi);
1012
1013 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1014 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1015 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1016 extent_end = found_key.offset +
1017 btrfs_file_extent_num_bytes(leaf, fi);
1018 if (extent_end <= start) {
1019 path->slots[0]++;
1020 goto next_slot;
1021 }
1022 if (disk_bytenr == 0)
1023 goto out_check;
1024 if (btrfs_file_extent_compression(leaf, fi) ||
1025 btrfs_file_extent_encryption(leaf, fi) ||
1026 btrfs_file_extent_other_encoding(leaf, fi))
1027 goto out_check;
1028 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1029 goto out_check;
1030 if (btrfs_extent_readonly(root, disk_bytenr))
1031 goto out_check;
1032 if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1033 disk_bytenr))
1034 goto out_check;
1035 disk_bytenr += btrfs_file_extent_offset(leaf, fi);
1036 disk_bytenr += cur_offset - found_key.offset;
1037 num_bytes = min(end + 1, extent_end) - cur_offset;
1038 /*
1039 * force cow if csum exists in the range.
1040 * this ensure that csum for a given extent are
1041 * either valid or do not exist.
1042 */
1043 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1044 goto out_check;
1045 nocow = 1;
1046 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1047 extent_end = found_key.offset +
1048 btrfs_file_extent_inline_len(leaf, fi);
1049 extent_end = ALIGN(extent_end, root->sectorsize);
1050 } else {
1051 BUG_ON(1);
1052 }
1053out_check:
1054 if (extent_end <= start) {
1055 path->slots[0]++;
1056 goto next_slot;
1057 }
1058 if (!nocow) {
1059 if (cow_start == (u64)-1)
1060 cow_start = cur_offset;
1061 cur_offset = extent_end;
1062 if (cur_offset > end)
1063 break;
1064 path->slots[0]++;
1065 goto next_slot;
1066 }
1067
1068 btrfs_release_path(root, path);
1069 if (cow_start != (u64)-1) {
1070 ret = cow_file_range(inode, locked_page, cow_start,
1071 found_key.offset - 1, page_started,
1072 nr_written, 1);
1073 BUG_ON(ret);
1074 cow_start = (u64)-1;
1075 }
1076
1077 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1078 struct extent_map *em;
1079 struct extent_map_tree *em_tree;
1080 em_tree = &BTRFS_I(inode)->extent_tree;
1081 em = alloc_extent_map(GFP_NOFS);
1082 em->start = cur_offset;
1083 em->orig_start = em->start;
1084 em->len = num_bytes;
1085 em->block_len = num_bytes;
1086 em->block_start = disk_bytenr;
1087 em->bdev = root->fs_info->fs_devices->latest_bdev;
1088 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1089 while (1) {
1090 spin_lock(&em_tree->lock);
1091 ret = add_extent_mapping(em_tree, em);
1092 spin_unlock(&em_tree->lock);
1093 if (ret != -EEXIST) {
1094 free_extent_map(em);
1095 break;
1096 }
1097 btrfs_drop_extent_cache(inode, em->start,
1098 em->start + em->len - 1, 0);
1099 }
1100 type = BTRFS_ORDERED_PREALLOC;
1101 } else {
1102 type = BTRFS_ORDERED_NOCOW;
1103 }
1104
1105 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1106 num_bytes, num_bytes, type);
1107 BUG_ON(ret);
1108
1109 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1110 cur_offset, cur_offset + num_bytes - 1,
1111 locked_page, 1, 1, 1, 0, 0, 0);
1112 cur_offset = extent_end;
1113 if (cur_offset > end)
1114 break;
1115 }
1116 btrfs_release_path(root, path);
1117
1118 if (cur_offset <= end && cow_start == (u64)-1)
1119 cow_start = cur_offset;
1120 if (cow_start != (u64)-1) {
1121 ret = cow_file_range(inode, locked_page, cow_start, end,
1122 page_started, nr_written, 1);
1123 BUG_ON(ret);
1124 }
1125
1126 ret = btrfs_end_transaction(trans, root);
1127 BUG_ON(ret);
1128 btrfs_free_path(path);
1129 return 0;
1130}
1131
1132/*
1133 * extent_io.c call back to do delayed allocation processing
1134 */
1135static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1136 u64 start, u64 end, int *page_started,
1137 unsigned long *nr_written)
1138{
1139 int ret;
1140
1141 if (btrfs_test_flag(inode, NODATACOW))
1142 ret = run_delalloc_nocow(inode, locked_page, start, end,
1143 page_started, 1, nr_written);
1144 else if (btrfs_test_flag(inode, PREALLOC))
1145 ret = run_delalloc_nocow(inode, locked_page, start, end,
1146 page_started, 0, nr_written);
1147 else
1148 ret = cow_file_range_async(inode, locked_page, start, end,
1149 page_started, nr_written);
1150
1151 return ret;
1152}
1153
1154/*
1155 * extent_io.c set_bit_hook, used to track delayed allocation
1156 * bytes in this file, and to maintain the list of inodes that
1157 * have pending delalloc work to be done.
1158 */
1159static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1160 unsigned long old, unsigned long bits)
1161{
1162 /*
1163 * set_bit and clear bit hooks normally require _irqsave/restore
1164 * but in this case, we are only testeing for the DELALLOC
1165 * bit, which is only set or cleared with irqs on
1166 */
1167 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1168 struct btrfs_root *root = BTRFS_I(inode)->root;
1169 spin_lock(&root->fs_info->delalloc_lock);
1170 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1171 root->fs_info->delalloc_bytes += end - start + 1;
1172 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1173 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1174 &root->fs_info->delalloc_inodes);
1175 }
1176 spin_unlock(&root->fs_info->delalloc_lock);
1177 }
1178 return 0;
1179}
1180
1181/*
1182 * extent_io.c clear_bit_hook, see set_bit_hook for why
1183 */
1184static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
1185 unsigned long old, unsigned long bits)
1186{
1187 /*
1188 * set_bit and clear bit hooks normally require _irqsave/restore
1189 * but in this case, we are only testeing for the DELALLOC
1190 * bit, which is only set or cleared with irqs on
1191 */
1192 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1193 struct btrfs_root *root = BTRFS_I(inode)->root;
1194
1195 spin_lock(&root->fs_info->delalloc_lock);
1196 if (end - start + 1 > root->fs_info->delalloc_bytes) {
1197 printk(KERN_INFO "btrfs warning: delalloc account "
1198 "%llu %llu\n",
1199 (unsigned long long)end - start + 1,
1200 (unsigned long long)
1201 root->fs_info->delalloc_bytes);
1202 root->fs_info->delalloc_bytes = 0;
1203 BTRFS_I(inode)->delalloc_bytes = 0;
1204 } else {
1205 root->fs_info->delalloc_bytes -= end - start + 1;
1206 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
1207 }
1208 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1209 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1210 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1211 }
1212 spin_unlock(&root->fs_info->delalloc_lock);
1213 }
1214 return 0;
1215}
1216
1217/*
1218 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1219 * we don't create bios that span stripes or chunks
1220 */
1221int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1222 size_t size, struct bio *bio,
1223 unsigned long bio_flags)
1224{
1225 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1226 struct btrfs_mapping_tree *map_tree;
1227 u64 logical = (u64)bio->bi_sector << 9;
1228 u64 length = 0;
1229 u64 map_length;
1230 int ret;
1231
1232 if (bio_flags & EXTENT_BIO_COMPRESSED)
1233 return 0;
1234
1235 length = bio->bi_size;
1236 map_tree = &root->fs_info->mapping_tree;
1237 map_length = length;
1238 ret = btrfs_map_block(map_tree, READ, logical,
1239 &map_length, NULL, 0);
1240
1241 if (map_length < length + size)
1242 return 1;
1243 return 0;
1244}
1245
1246/*
1247 * in order to insert checksums into the metadata in large chunks,
1248 * we wait until bio submission time. All the pages in the bio are
1249 * checksummed and sums are attached onto the ordered extent record.
1250 *
1251 * At IO completion time the cums attached on the ordered extent record
1252 * are inserted into the btree
1253 */
1254static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1255 struct bio *bio, int mirror_num,
1256 unsigned long bio_flags)
1257{
1258 struct btrfs_root *root = BTRFS_I(inode)->root;
1259 int ret = 0;
1260
1261 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1262 BUG_ON(ret);
1263 return 0;
1264}
1265
1266/*
1267 * in order to insert checksums into the metadata in large chunks,
1268 * we wait until bio submission time. All the pages in the bio are
1269 * checksummed and sums are attached onto the ordered extent record.
1270 *
1271 * At IO completion time the cums attached on the ordered extent record
1272 * are inserted into the btree
1273 */
1274static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1275 int mirror_num, unsigned long bio_flags)
1276{
1277 struct btrfs_root *root = BTRFS_I(inode)->root;
1278 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1279}
1280
1281/*
1282 * extent_io.c submission hook. This does the right thing for csum calculation
1283 * on write, or reading the csums from the tree before a read
1284 */
1285static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1286 int mirror_num, unsigned long bio_flags)
1287{
1288 struct btrfs_root *root = BTRFS_I(inode)->root;
1289 int ret = 0;
1290 int skip_sum;
1291
1292 skip_sum = btrfs_test_flag(inode, NODATASUM);
1293
1294 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1295 BUG_ON(ret);
1296
1297 if (!(rw & (1 << BIO_RW))) {
1298 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1299 return btrfs_submit_compressed_read(inode, bio,
1300 mirror_num, bio_flags);
1301 } else if (!skip_sum)
1302 btrfs_lookup_bio_sums(root, inode, bio, NULL);
1303 goto mapit;
1304 } else if (!skip_sum) {
1305 /* csum items have already been cloned */
1306 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1307 goto mapit;
1308 /* we're doing a write, do the async checksumming */
1309 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1310 inode, rw, bio, mirror_num,
1311 bio_flags, __btrfs_submit_bio_start,
1312 __btrfs_submit_bio_done);
1313 }
1314
1315mapit:
1316 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1317}
1318
1319/*
1320 * given a list of ordered sums record them in the inode. This happens
1321 * at IO completion time based on sums calculated at bio submission time.
1322 */
1323static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1324 struct inode *inode, u64 file_offset,
1325 struct list_head *list)
1326{
1327 struct list_head *cur;
1328 struct btrfs_ordered_sum *sum;
1329
1330 btrfs_set_trans_block_group(trans, inode);
1331 list_for_each(cur, list) {
1332 sum = list_entry(cur, struct btrfs_ordered_sum, list);
1333 btrfs_csum_file_blocks(trans,
1334 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1335 }
1336 return 0;
1337}
1338
1339int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1340{
1341 if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1342 WARN_ON(1);
1343 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1344 GFP_NOFS);
1345}
1346
1347/* see btrfs_writepage_start_hook for details on why this is required */
1348struct btrfs_writepage_fixup {
1349 struct page *page;
1350 struct btrfs_work work;
1351};
1352
1353static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1354{
1355 struct btrfs_writepage_fixup *fixup;
1356 struct btrfs_ordered_extent *ordered;
1357 struct page *page;
1358 struct inode *inode;
1359 u64 page_start;
1360 u64 page_end;
1361
1362 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1363 page = fixup->page;
1364again:
1365 lock_page(page);
1366 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1367 ClearPageChecked(page);
1368 goto out_page;
1369 }
1370
1371 inode = page->mapping->host;
1372 page_start = page_offset(page);
1373 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1374
1375 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1376
1377 /* already ordered? We're done */
1378 if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
1379 EXTENT_ORDERED, 0)) {
1380 goto out;
1381 }
1382
1383 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1384 if (ordered) {
1385 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1386 page_end, GFP_NOFS);
1387 unlock_page(page);
1388 btrfs_start_ordered_extent(inode, ordered, 1);
1389 goto again;
1390 }
1391
1392 btrfs_set_extent_delalloc(inode, page_start, page_end);
1393 ClearPageChecked(page);
1394out:
1395 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1396out_page:
1397 unlock_page(page);
1398 page_cache_release(page);
1399}
1400
1401/*
1402 * There are a few paths in the higher layers of the kernel that directly
1403 * set the page dirty bit without asking the filesystem if it is a
1404 * good idea. This causes problems because we want to make sure COW
1405 * properly happens and the data=ordered rules are followed.
1406 *
1407 * In our case any range that doesn't have the ORDERED bit set
1408 * hasn't been properly setup for IO. We kick off an async process
1409 * to fix it up. The async helper will wait for ordered extents, set
1410 * the delalloc bit and make it safe to write the page.
1411 */
1412static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1413{
1414 struct inode *inode = page->mapping->host;
1415 struct btrfs_writepage_fixup *fixup;
1416 struct btrfs_root *root = BTRFS_I(inode)->root;
1417 int ret;
1418
1419 ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1420 EXTENT_ORDERED, 0);
1421 if (ret)
1422 return 0;
1423
1424 if (PageChecked(page))
1425 return -EAGAIN;
1426
1427 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1428 if (!fixup)
1429 return -EAGAIN;
1430
1431 SetPageChecked(page);
1432 page_cache_get(page);
1433 fixup->work.func = btrfs_writepage_fixup_worker;
1434 fixup->page = page;
1435 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1436 return -EAGAIN;
1437}
1438
1439static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1440 struct inode *inode, u64 file_pos,
1441 u64 disk_bytenr, u64 disk_num_bytes,
1442 u64 num_bytes, u64 ram_bytes,
1443 u8 compression, u8 encryption,
1444 u16 other_encoding, int extent_type)
1445{
1446 struct btrfs_root *root = BTRFS_I(inode)->root;
1447 struct btrfs_file_extent_item *fi;
1448 struct btrfs_path *path;
1449 struct extent_buffer *leaf;
1450 struct btrfs_key ins;
1451 u64 hint;
1452 int ret;
1453
1454 path = btrfs_alloc_path();
1455 BUG_ON(!path);
1456
1457 ret = btrfs_drop_extents(trans, root, inode, file_pos,
1458 file_pos + num_bytes, file_pos, &hint);
1459 BUG_ON(ret);
1460
1461 ins.objectid = inode->i_ino;
1462 ins.offset = file_pos;
1463 ins.type = BTRFS_EXTENT_DATA_KEY;
1464 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1465 BUG_ON(ret);
1466 leaf = path->nodes[0];
1467 fi = btrfs_item_ptr(leaf, path->slots[0],
1468 struct btrfs_file_extent_item);
1469 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1470 btrfs_set_file_extent_type(leaf, fi, extent_type);
1471 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1472 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1473 btrfs_set_file_extent_offset(leaf, fi, 0);
1474 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1475 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1476 btrfs_set_file_extent_compression(leaf, fi, compression);
1477 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1478 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1479 btrfs_mark_buffer_dirty(leaf);
1480
1481 inode_add_bytes(inode, num_bytes);
1482 btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0);
1483
1484 ins.objectid = disk_bytenr;
1485 ins.offset = disk_num_bytes;
1486 ins.type = BTRFS_EXTENT_ITEM_KEY;
1487 ret = btrfs_alloc_reserved_extent(trans, root, leaf->start,
1488 root->root_key.objectid,
1489 trans->transid, inode->i_ino, &ins);
1490 BUG_ON(ret);
1491
1492 btrfs_free_path(path);
1493 return 0;
1494}
1495
1496/* as ordered data IO finishes, this gets called so we can finish
1497 * an ordered extent if the range of bytes in the file it covers are
1498 * fully written.
1499 */
1500static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1501{
1502 struct btrfs_root *root = BTRFS_I(inode)->root;
1503 struct btrfs_trans_handle *trans;
1504 struct btrfs_ordered_extent *ordered_extent;
1505 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1506 int compressed = 0;
1507 int ret;
1508
1509 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1510 if (!ret)
1511 return 0;
1512
1513 trans = btrfs_join_transaction(root, 1);
1514
1515 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1516 BUG_ON(!ordered_extent);
1517 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1518 goto nocow;
1519
1520 lock_extent(io_tree, ordered_extent->file_offset,
1521 ordered_extent->file_offset + ordered_extent->len - 1,
1522 GFP_NOFS);
1523
1524 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1525 compressed = 1;
1526 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1527 BUG_ON(compressed);
1528 ret = btrfs_mark_extent_written(trans, root, inode,
1529 ordered_extent->file_offset,
1530 ordered_extent->file_offset +
1531 ordered_extent->len);
1532 BUG_ON(ret);
1533 } else {
1534 ret = insert_reserved_file_extent(trans, inode,
1535 ordered_extent->file_offset,
1536 ordered_extent->start,
1537 ordered_extent->disk_len,
1538 ordered_extent->len,
1539 ordered_extent->len,
1540 compressed, 0, 0,
1541 BTRFS_FILE_EXTENT_REG);
1542 BUG_ON(ret);
1543 }
1544 unlock_extent(io_tree, ordered_extent->file_offset,
1545 ordered_extent->file_offset + ordered_extent->len - 1,
1546 GFP_NOFS);
1547nocow:
1548 add_pending_csums(trans, inode, ordered_extent->file_offset,
1549 &ordered_extent->list);
1550
1551 mutex_lock(&BTRFS_I(inode)->extent_mutex);
1552 btrfs_ordered_update_i_size(inode, ordered_extent);
1553 btrfs_update_inode(trans, root, inode);
1554 btrfs_remove_ordered_extent(inode, ordered_extent);
1555 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1556
1557 /* once for us */
1558 btrfs_put_ordered_extent(ordered_extent);
1559 /* once for the tree */
1560 btrfs_put_ordered_extent(ordered_extent);
1561
1562 btrfs_end_transaction(trans, root);
1563 return 0;
1564}
1565
1566static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1567 struct extent_state *state, int uptodate)
1568{
1569 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1570}
1571
1572/*
1573 * When IO fails, either with EIO or csum verification fails, we
1574 * try other mirrors that might have a good copy of the data. This
1575 * io_failure_record is used to record state as we go through all the
1576 * mirrors. If another mirror has good data, the page is set up to date
1577 * and things continue. If a good mirror can't be found, the original
1578 * bio end_io callback is called to indicate things have failed.
1579 */
1580struct io_failure_record {
1581 struct page *page;
1582 u64 start;
1583 u64 len;
1584 u64 logical;
1585 unsigned long bio_flags;
1586 int last_mirror;
1587};
1588
1589static int btrfs_io_failed_hook(struct bio *failed_bio,
1590 struct page *page, u64 start, u64 end,
1591 struct extent_state *state)
1592{
1593 struct io_failure_record *failrec = NULL;
1594 u64 private;
1595 struct extent_map *em;
1596 struct inode *inode = page->mapping->host;
1597 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1598 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1599 struct bio *bio;
1600 int num_copies;
1601 int ret;
1602 int rw;
1603 u64 logical;
1604
1605 ret = get_state_private(failure_tree, start, &private);
1606 if (ret) {
1607 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1608 if (!failrec)
1609 return -ENOMEM;
1610 failrec->start = start;
1611 failrec->len = end - start + 1;
1612 failrec->last_mirror = 0;
1613 failrec->bio_flags = 0;
1614
1615 spin_lock(&em_tree->lock);
1616 em = lookup_extent_mapping(em_tree, start, failrec->len);
1617 if (em->start > start || em->start + em->len < start) {
1618 free_extent_map(em);
1619 em = NULL;
1620 }
1621 spin_unlock(&em_tree->lock);
1622
1623 if (!em || IS_ERR(em)) {
1624 kfree(failrec);
1625 return -EIO;
1626 }
1627 logical = start - em->start;
1628 logical = em->block_start + logical;
1629 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1630 logical = em->block_start;
1631 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1632 }
1633 failrec->logical = logical;
1634 free_extent_map(em);
1635 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1636 EXTENT_DIRTY, GFP_NOFS);
1637 set_state_private(failure_tree, start,
1638 (u64)(unsigned long)failrec);
1639 } else {
1640 failrec = (struct io_failure_record *)(unsigned long)private;
1641 }
1642 num_copies = btrfs_num_copies(
1643 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1644 failrec->logical, failrec->len);
1645 failrec->last_mirror++;
1646 if (!state) {
1647 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1648 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1649 failrec->start,
1650 EXTENT_LOCKED);
1651 if (state && state->start != failrec->start)
1652 state = NULL;
1653 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1654 }
1655 if (!state || failrec->last_mirror > num_copies) {
1656 set_state_private(failure_tree, failrec->start, 0);
1657 clear_extent_bits(failure_tree, failrec->start,
1658 failrec->start + failrec->len - 1,
1659 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1660 kfree(failrec);
1661 return -EIO;
1662 }
1663 bio = bio_alloc(GFP_NOFS, 1);
1664 bio->bi_private = state;
1665 bio->bi_end_io = failed_bio->bi_end_io;
1666 bio->bi_sector = failrec->logical >> 9;
1667 bio->bi_bdev = failed_bio->bi_bdev;
1668 bio->bi_size = 0;
1669
1670 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1671 if (failed_bio->bi_rw & (1 << BIO_RW))
1672 rw = WRITE;
1673 else
1674 rw = READ;
1675
1676 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1677 failrec->last_mirror,
1678 failrec->bio_flags);
1679 return 0;
1680}
1681
1682/*
1683 * each time an IO finishes, we do a fast check in the IO failure tree
1684 * to see if we need to process or clean up an io_failure_record
1685 */
1686static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1687{
1688 u64 private;
1689 u64 private_failure;
1690 struct io_failure_record *failure;
1691 int ret;
1692
1693 private = 0;
1694 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1695 (u64)-1, 1, EXTENT_DIRTY)) {
1696 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1697 start, &private_failure);
1698 if (ret == 0) {
1699 failure = (struct io_failure_record *)(unsigned long)
1700 private_failure;
1701 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1702 failure->start, 0);
1703 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1704 failure->start,
1705 failure->start + failure->len - 1,
1706 EXTENT_DIRTY | EXTENT_LOCKED,
1707 GFP_NOFS);
1708 kfree(failure);
1709 }
1710 }
1711 return 0;
1712}
1713
1714/*
1715 * when reads are done, we need to check csums to verify the data is correct
1716 * if there's a match, we allow the bio to finish. If not, we go through
1717 * the io_failure_record routines to find good copies
1718 */
1719static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1720 struct extent_state *state)
1721{
1722 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1723 struct inode *inode = page->mapping->host;
1724 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1725 char *kaddr;
1726 u64 private = ~(u32)0;
1727 int ret;
1728 struct btrfs_root *root = BTRFS_I(inode)->root;
1729 u32 csum = ~(u32)0;
1730
1731 if (PageChecked(page)) {
1732 ClearPageChecked(page);
1733 goto good;
1734 }
1735 if (btrfs_test_flag(inode, NODATASUM))
1736 return 0;
1737
1738 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1739 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1)) {
1740 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1741 GFP_NOFS);
1742 return 0;
1743 }
1744
1745 if (state && state->start == start) {
1746 private = state->private;
1747 ret = 0;
1748 } else {
1749 ret = get_state_private(io_tree, start, &private);
1750 }
1751 kaddr = kmap_atomic(page, KM_USER0);
1752 if (ret)
1753 goto zeroit;
1754
1755 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1756 btrfs_csum_final(csum, (char *)&csum);
1757 if (csum != private)
1758 goto zeroit;
1759
1760 kunmap_atomic(kaddr, KM_USER0);
1761good:
1762 /* if the io failure tree for this inode is non-empty,
1763 * check to see if we've recovered from a failed IO
1764 */
1765 btrfs_clean_io_failures(inode, start);
1766 return 0;
1767
1768zeroit:
1769 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1770 "private %llu\n", page->mapping->host->i_ino,
1771 (unsigned long long)start, csum,
1772 (unsigned long long)private);
1773 memset(kaddr + offset, 1, end - start + 1);
1774 flush_dcache_page(page);
1775 kunmap_atomic(kaddr, KM_USER0);
1776 if (private == 0)
1777 return 0;
1778 return -EIO;
1779}
1780
1781/*
1782 * This creates an orphan entry for the given inode in case something goes
1783 * wrong in the middle of an unlink/truncate.
1784 */
1785int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
1786{
1787 struct btrfs_root *root = BTRFS_I(inode)->root;
1788 int ret = 0;
1789
1790 spin_lock(&root->list_lock);
1791
1792 /* already on the orphan list, we're good */
1793 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
1794 spin_unlock(&root->list_lock);
1795 return 0;
1796 }
1797
1798 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1799
1800 spin_unlock(&root->list_lock);
1801
1802 /*
1803 * insert an orphan item to track this unlinked/truncated file
1804 */
1805 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
1806
1807 return ret;
1808}
1809
1810/*
1811 * We have done the truncate/delete so we can go ahead and remove the orphan
1812 * item for this particular inode.
1813 */
1814int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
1815{
1816 struct btrfs_root *root = BTRFS_I(inode)->root;
1817 int ret = 0;
1818
1819 spin_lock(&root->list_lock);
1820
1821 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
1822 spin_unlock(&root->list_lock);
1823 return 0;
1824 }
1825
1826 list_del_init(&BTRFS_I(inode)->i_orphan);
1827 if (!trans) {
1828 spin_unlock(&root->list_lock);
1829 return 0;
1830 }
1831
1832 spin_unlock(&root->list_lock);
1833
1834 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
1835
1836 return ret;
1837}
1838
1839/*
1840 * this cleans up any orphans that may be left on the list from the last use
1841 * of this root.
1842 */
1843void btrfs_orphan_cleanup(struct btrfs_root *root)
1844{
1845 struct btrfs_path *path;
1846 struct extent_buffer *leaf;
1847 struct btrfs_item *item;
1848 struct btrfs_key key, found_key;
1849 struct btrfs_trans_handle *trans;
1850 struct inode *inode;
1851 int ret = 0, nr_unlink = 0, nr_truncate = 0;
1852
1853 path = btrfs_alloc_path();
1854 if (!path)
1855 return;
1856 path->reada = -1;
1857
1858 key.objectid = BTRFS_ORPHAN_OBJECTID;
1859 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1860 key.offset = (u64)-1;
1861
1862
1863 while (1) {
1864 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1865 if (ret < 0) {
1866 printk(KERN_ERR "Error searching slot for orphan: %d"
1867 "\n", ret);
1868 break;
1869 }
1870
1871 /*
1872 * if ret == 0 means we found what we were searching for, which
1873 * is weird, but possible, so only screw with path if we didnt
1874 * find the key and see if we have stuff that matches
1875 */
1876 if (ret > 0) {
1877 if (path->slots[0] == 0)
1878 break;
1879 path->slots[0]--;
1880 }
1881
1882 /* pull out the item */
1883 leaf = path->nodes[0];
1884 item = btrfs_item_nr(leaf, path->slots[0]);
1885 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1886
1887 /* make sure the item matches what we want */
1888 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
1889 break;
1890 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
1891 break;
1892
1893 /* release the path since we're done with it */
1894 btrfs_release_path(root, path);
1895
1896 /*
1897 * this is where we are basically btrfs_lookup, without the
1898 * crossing root thing. we store the inode number in the
1899 * offset of the orphan item.
1900 */
1901 inode = btrfs_iget_locked(root->fs_info->sb,
1902 found_key.offset, root);
1903 if (!inode)
1904 break;
1905
1906 if (inode->i_state & I_NEW) {
1907 BTRFS_I(inode)->root = root;
1908
1909 /* have to set the location manually */
1910 BTRFS_I(inode)->location.objectid = inode->i_ino;
1911 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
1912 BTRFS_I(inode)->location.offset = 0;
1913
1914 btrfs_read_locked_inode(inode);
1915 unlock_new_inode(inode);
1916 }
1917
1918 /*
1919 * add this inode to the orphan list so btrfs_orphan_del does
1920 * the proper thing when we hit it
1921 */
1922 spin_lock(&root->list_lock);
1923 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1924 spin_unlock(&root->list_lock);
1925
1926 /*
1927 * if this is a bad inode, means we actually succeeded in
1928 * removing the inode, but not the orphan record, which means
1929 * we need to manually delete the orphan since iput will just
1930 * do a destroy_inode
1931 */
1932 if (is_bad_inode(inode)) {
1933 trans = btrfs_start_transaction(root, 1);
1934 btrfs_orphan_del(trans, inode);
1935 btrfs_end_transaction(trans, root);
1936 iput(inode);
1937 continue;
1938 }
1939
1940 /* if we have links, this was a truncate, lets do that */
1941 if (inode->i_nlink) {
1942 nr_truncate++;
1943 btrfs_truncate(inode);
1944 } else {
1945 nr_unlink++;
1946 }
1947
1948 /* this will do delete_inode and everything for us */
1949 iput(inode);
1950 }
1951
1952 if (nr_unlink)
1953 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
1954 if (nr_truncate)
1955 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
1956
1957 btrfs_free_path(path);
1958}
1959
1960/*
1961 * read an inode from the btree into the in-memory inode
1962 */
1963void btrfs_read_locked_inode(struct inode *inode)
1964{
1965 struct btrfs_path *path;
1966 struct extent_buffer *leaf;
1967 struct btrfs_inode_item *inode_item;
1968 struct btrfs_timespec *tspec;
1969 struct btrfs_root *root = BTRFS_I(inode)->root;
1970 struct btrfs_key location;
1971 u64 alloc_group_block;
1972 u32 rdev;
1973 int ret;
1974
1975 path = btrfs_alloc_path();
1976 BUG_ON(!path);
1977 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
1978
1979 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
1980 if (ret)
1981 goto make_bad;
1982
1983 leaf = path->nodes[0];
1984 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1985 struct btrfs_inode_item);
1986
1987 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
1988 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
1989 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
1990 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
1991 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
1992
1993 tspec = btrfs_inode_atime(inode_item);
1994 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
1995 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
1996
1997 tspec = btrfs_inode_mtime(inode_item);
1998 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
1999 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2000
2001 tspec = btrfs_inode_ctime(inode_item);
2002 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2003 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2004
2005 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2006 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2007 BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2008 inode->i_generation = BTRFS_I(inode)->generation;
2009 inode->i_rdev = 0;
2010 rdev = btrfs_inode_rdev(leaf, inode_item);
2011
2012 BTRFS_I(inode)->index_cnt = (u64)-1;
2013 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2014
2015 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2016 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2017 alloc_group_block, 0);
2018 btrfs_free_path(path);
2019 inode_item = NULL;
2020
2021 switch (inode->i_mode & S_IFMT) {
2022 case S_IFREG:
2023 inode->i_mapping->a_ops = &btrfs_aops;
2024 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2025 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2026 inode->i_fop = &btrfs_file_operations;
2027 inode->i_op = &btrfs_file_inode_operations;
2028 break;
2029 case S_IFDIR:
2030 inode->i_fop = &btrfs_dir_file_operations;
2031 if (root == root->fs_info->tree_root)
2032 inode->i_op = &btrfs_dir_ro_inode_operations;
2033 else
2034 inode->i_op = &btrfs_dir_inode_operations;
2035 break;
2036 case S_IFLNK:
2037 inode->i_op = &btrfs_symlink_inode_operations;
2038 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2039 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2040 break;
2041 default:
2042 init_special_inode(inode, inode->i_mode, rdev);
2043 break;
2044 }
2045 return;
2046
2047make_bad:
2048 btrfs_free_path(path);
2049 make_bad_inode(inode);
2050}
2051
2052/*
2053 * given a leaf and an inode, copy the inode fields into the leaf
2054 */
2055static void fill_inode_item(struct btrfs_trans_handle *trans,
2056 struct extent_buffer *leaf,
2057 struct btrfs_inode_item *item,
2058 struct inode *inode)
2059{
2060 btrfs_set_inode_uid(leaf, item, inode->i_uid);
2061 btrfs_set_inode_gid(leaf, item, inode->i_gid);
2062 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2063 btrfs_set_inode_mode(leaf, item, inode->i_mode);
2064 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2065
2066 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2067 inode->i_atime.tv_sec);
2068 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2069 inode->i_atime.tv_nsec);
2070
2071 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2072 inode->i_mtime.tv_sec);
2073 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2074 inode->i_mtime.tv_nsec);
2075
2076 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2077 inode->i_ctime.tv_sec);
2078 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2079 inode->i_ctime.tv_nsec);
2080
2081 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2082 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2083 btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2084 btrfs_set_inode_transid(leaf, item, trans->transid);
2085 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2086 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2087 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2088}
2089
2090/*
2091 * copy everything in the in-memory inode into the btree.
2092 */
2093noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2094 struct btrfs_root *root, struct inode *inode)
2095{
2096 struct btrfs_inode_item *inode_item;
2097 struct btrfs_path *path;
2098 struct extent_buffer *leaf;
2099 int ret;
2100
2101 path = btrfs_alloc_path();
2102 BUG_ON(!path);
2103 ret = btrfs_lookup_inode(trans, root, path,
2104 &BTRFS_I(inode)->location, 1);
2105 if (ret) {
2106 if (ret > 0)
2107 ret = -ENOENT;
2108 goto failed;
2109 }
2110
2111 leaf = path->nodes[0];
2112 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2113 struct btrfs_inode_item);
2114
2115 fill_inode_item(trans, leaf, inode_item, inode);
2116 btrfs_mark_buffer_dirty(leaf);
2117 btrfs_set_inode_last_trans(trans, inode);
2118 ret = 0;
2119failed:
2120 btrfs_free_path(path);
2121 return ret;
2122}
2123
2124
2125/*
2126 * unlink helper that gets used here in inode.c and in the tree logging
2127 * recovery code. It remove a link in a directory with a given name, and
2128 * also drops the back refs in the inode to the directory
2129 */
2130int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2131 struct btrfs_root *root,
2132 struct inode *dir, struct inode *inode,
2133 const char *name, int name_len)
2134{
2135 struct btrfs_path *path;
2136 int ret = 0;
2137 struct extent_buffer *leaf;
2138 struct btrfs_dir_item *di;
2139 struct btrfs_key key;
2140 u64 index;
2141
2142 path = btrfs_alloc_path();
2143 if (!path) {
2144 ret = -ENOMEM;
2145 goto err;
2146 }
2147
2148 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2149 name, name_len, -1);
2150 if (IS_ERR(di)) {
2151 ret = PTR_ERR(di);
2152 goto err;
2153 }
2154 if (!di) {
2155 ret = -ENOENT;
2156 goto err;
2157 }
2158 leaf = path->nodes[0];
2159 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2160 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2161 if (ret)
2162 goto err;
2163 btrfs_release_path(root, path);
2164
2165 ret = btrfs_del_inode_ref(trans, root, name, name_len,
2166 inode->i_ino,
2167 dir->i_ino, &index);
2168 if (ret) {
2169 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2170 "inode %lu parent %lu\n", name_len, name,
2171 inode->i_ino, dir->i_ino);
2172 goto err;
2173 }
2174
2175 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2176 index, name, name_len, -1);
2177 if (IS_ERR(di)) {
2178 ret = PTR_ERR(di);
2179 goto err;
2180 }
2181 if (!di) {
2182 ret = -ENOENT;
2183 goto err;
2184 }
2185 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2186 btrfs_release_path(root, path);
2187
2188 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2189 inode, dir->i_ino);
2190 BUG_ON(ret != 0 && ret != -ENOENT);
2191 if (ret != -ENOENT)
2192 BTRFS_I(dir)->log_dirty_trans = trans->transid;
2193
2194 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2195 dir, index);
2196 BUG_ON(ret);
2197err:
2198 btrfs_free_path(path);
2199 if (ret)
2200 goto out;
2201
2202 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2203 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2204 btrfs_update_inode(trans, root, dir);
2205 btrfs_drop_nlink(inode);
2206 ret = btrfs_update_inode(trans, root, inode);
2207 dir->i_sb->s_dirt = 1;
2208out:
2209 return ret;
2210}
2211
2212static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2213{
2214 struct btrfs_root *root;
2215 struct btrfs_trans_handle *trans;
2216 struct inode *inode = dentry->d_inode;
2217 int ret;
2218 unsigned long nr = 0;
2219
2220 root = BTRFS_I(dir)->root;
2221
2222 ret = btrfs_check_free_space(root, 1, 1);
2223 if (ret)
2224 goto fail;
2225
2226 trans = btrfs_start_transaction(root, 1);
2227
2228 btrfs_set_trans_block_group(trans, dir);
2229 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2230 dentry->d_name.name, dentry->d_name.len);
2231
2232 if (inode->i_nlink == 0)
2233 ret = btrfs_orphan_add(trans, inode);
2234
2235 nr = trans->blocks_used;
2236
2237 btrfs_end_transaction_throttle(trans, root);
2238fail:
2239 btrfs_btree_balance_dirty(root, nr);
2240 return ret;
2241}
2242
2243static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2244{
2245 struct inode *inode = dentry->d_inode;
2246 int err = 0;
2247 int ret;
2248 struct btrfs_root *root = BTRFS_I(dir)->root;
2249 struct btrfs_trans_handle *trans;
2250 unsigned long nr = 0;
2251
2252 /*
2253 * the FIRST_FREE_OBJECTID check makes sure we don't try to rmdir
2254 * the root of a subvolume or snapshot
2255 */
2256 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2257 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
2258 return -ENOTEMPTY;
2259 }
2260
2261 ret = btrfs_check_free_space(root, 1, 1);
2262 if (ret)
2263 goto fail;
2264
2265 trans = btrfs_start_transaction(root, 1);
2266 btrfs_set_trans_block_group(trans, dir);
2267
2268 err = btrfs_orphan_add(trans, inode);
2269 if (err)
2270 goto fail_trans;
2271
2272 /* now the directory is empty */
2273 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2274 dentry->d_name.name, dentry->d_name.len);
2275 if (!err)
2276 btrfs_i_size_write(inode, 0);
2277
2278fail_trans:
2279 nr = trans->blocks_used;
2280 ret = btrfs_end_transaction_throttle(trans, root);
2281fail:
2282 btrfs_btree_balance_dirty(root, nr);
2283
2284 if (ret && !err)
2285 err = ret;
2286 return err;
2287}
2288
2289#if 0
2290/*
2291 * when truncating bytes in a file, it is possible to avoid reading
2292 * the leaves that contain only checksum items. This can be the
2293 * majority of the IO required to delete a large file, but it must
2294 * be done carefully.
2295 *
2296 * The keys in the level just above the leaves are checked to make sure
2297 * the lowest key in a given leaf is a csum key, and starts at an offset
2298 * after the new size.
2299 *
2300 * Then the key for the next leaf is checked to make sure it also has
2301 * a checksum item for the same file. If it does, we know our target leaf
2302 * contains only checksum items, and it can be safely freed without reading
2303 * it.
2304 *
2305 * This is just an optimization targeted at large files. It may do
2306 * nothing. It will return 0 unless things went badly.
2307 */
2308static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2309 struct btrfs_root *root,
2310 struct btrfs_path *path,
2311 struct inode *inode, u64 new_size)
2312{
2313 struct btrfs_key key;
2314 int ret;
2315 int nritems;
2316 struct btrfs_key found_key;
2317 struct btrfs_key other_key;
2318 struct btrfs_leaf_ref *ref;
2319 u64 leaf_gen;
2320 u64 leaf_start;
2321
2322 path->lowest_level = 1;
2323 key.objectid = inode->i_ino;
2324 key.type = BTRFS_CSUM_ITEM_KEY;
2325 key.offset = new_size;
2326again:
2327 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2328 if (ret < 0)
2329 goto out;
2330
2331 if (path->nodes[1] == NULL) {
2332 ret = 0;
2333 goto out;
2334 }
2335 ret = 0;
2336 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2337 nritems = btrfs_header_nritems(path->nodes[1]);
2338
2339 if (!nritems)
2340 goto out;
2341
2342 if (path->slots[1] >= nritems)
2343 goto next_node;
2344
2345 /* did we find a key greater than anything we want to delete? */
2346 if (found_key.objectid > inode->i_ino ||
2347 (found_key.objectid == inode->i_ino && found_key.type > key.type))
2348 goto out;
2349
2350 /* we check the next key in the node to make sure the leave contains
2351 * only checksum items. This comparison doesn't work if our
2352 * leaf is the last one in the node
2353 */
2354 if (path->slots[1] + 1 >= nritems) {
2355next_node:
2356 /* search forward from the last key in the node, this
2357 * will bring us into the next node in the tree
2358 */
2359 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2360
2361 /* unlikely, but we inc below, so check to be safe */
2362 if (found_key.offset == (u64)-1)
2363 goto out;
2364
2365 /* search_forward needs a path with locks held, do the
2366 * search again for the original key. It is possible
2367 * this will race with a balance and return a path that
2368 * we could modify, but this drop is just an optimization
2369 * and is allowed to miss some leaves.
2370 */
2371 btrfs_release_path(root, path);
2372 found_key.offset++;
2373
2374 /* setup a max key for search_forward */
2375 other_key.offset = (u64)-1;
2376 other_key.type = key.type;
2377 other_key.objectid = key.objectid;
2378
2379 path->keep_locks = 1;
2380 ret = btrfs_search_forward(root, &found_key, &other_key,
2381 path, 0, 0);
2382 path->keep_locks = 0;
2383 if (ret || found_key.objectid != key.objectid ||
2384 found_key.type != key.type) {
2385 ret = 0;
2386 goto out;
2387 }
2388
2389 key.offset = found_key.offset;
2390 btrfs_release_path(root, path);
2391 cond_resched();
2392 goto again;
2393 }
2394
2395 /* we know there's one more slot after us in the tree,
2396 * read that key so we can verify it is also a checksum item
2397 */
2398 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2399
2400 if (found_key.objectid < inode->i_ino)
2401 goto next_key;
2402
2403 if (found_key.type != key.type || found_key.offset < new_size)
2404 goto next_key;
2405
2406 /*
2407 * if the key for the next leaf isn't a csum key from this objectid,
2408 * we can't be sure there aren't good items inside this leaf.
2409 * Bail out
2410 */
2411 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2412 goto out;
2413
2414 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2415 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2416 /*
2417 * it is safe to delete this leaf, it contains only
2418 * csum items from this inode at an offset >= new_size
2419 */
2420 ret = btrfs_del_leaf(trans, root, path, leaf_start);
2421 BUG_ON(ret);
2422
2423 if (root->ref_cows && leaf_gen < trans->transid) {
2424 ref = btrfs_alloc_leaf_ref(root, 0);
2425 if (ref) {
2426 ref->root_gen = root->root_key.offset;
2427 ref->bytenr = leaf_start;
2428 ref->owner = 0;
2429 ref->generation = leaf_gen;
2430 ref->nritems = 0;
2431
2432 ret = btrfs_add_leaf_ref(root, ref, 0);
2433 WARN_ON(ret);
2434 btrfs_free_leaf_ref(root, ref);
2435 } else {
2436 WARN_ON(1);
2437 }
2438 }
2439next_key:
2440 btrfs_release_path(root, path);
2441
2442 if (other_key.objectid == inode->i_ino &&
2443 other_key.type == key.type && other_key.offset > key.offset) {
2444 key.offset = other_key.offset;
2445 cond_resched();
2446 goto again;
2447 }
2448 ret = 0;
2449out:
2450 /* fixup any changes we've made to the path */
2451 path->lowest_level = 0;
2452 path->keep_locks = 0;
2453 btrfs_release_path(root, path);
2454 return ret;
2455}
2456
2457#endif
2458
2459/*
2460 * this can truncate away extent items, csum items and directory items.
2461 * It starts at a high offset and removes keys until it can't find
2462 * any higher than new_size
2463 *
2464 * csum items that cross the new i_size are truncated to the new size
2465 * as well.
2466 *
2467 * min_type is the minimum key type to truncate down to. If set to 0, this
2468 * will kill all the items on this inode, including the INODE_ITEM_KEY.
2469 */
2470noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2471 struct btrfs_root *root,
2472 struct inode *inode,
2473 u64 new_size, u32 min_type)
2474{
2475 int ret;
2476 struct btrfs_path *path;
2477 struct btrfs_key key;
2478 struct btrfs_key found_key;
2479 u32 found_type;
2480 struct extent_buffer *leaf;
2481 struct btrfs_file_extent_item *fi;
2482 u64 extent_start = 0;
2483 u64 extent_num_bytes = 0;
2484 u64 item_end = 0;
2485 u64 root_gen = 0;
2486 u64 root_owner = 0;
2487 int found_extent;
2488 int del_item;
2489 int pending_del_nr = 0;
2490 int pending_del_slot = 0;
2491 int extent_type = -1;
2492 int encoding;
2493 u64 mask = root->sectorsize - 1;
2494
2495 if (root->ref_cows)
2496 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2497 path = btrfs_alloc_path();
2498 path->reada = -1;
2499 BUG_ON(!path);
2500
2501 /* FIXME, add redo link to tree so we don't leak on crash */
2502 key.objectid = inode->i_ino;
2503 key.offset = (u64)-1;
2504 key.type = (u8)-1;
2505
2506 btrfs_init_path(path);
2507
2508search_again:
2509 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2510 if (ret < 0)
2511 goto error;
2512
2513 if (ret > 0) {
2514 /* there are no items in the tree for us to truncate, we're
2515 * done
2516 */
2517 if (path->slots[0] == 0) {
2518 ret = 0;
2519 goto error;
2520 }
2521 path->slots[0]--;
2522 }
2523
2524 while (1) {
2525 fi = NULL;
2526 leaf = path->nodes[0];
2527 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2528 found_type = btrfs_key_type(&found_key);
2529 encoding = 0;
2530
2531 if (found_key.objectid != inode->i_ino)
2532 break;
2533
2534 if (found_type < min_type)
2535 break;
2536
2537 item_end = found_key.offset;
2538 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2539 fi = btrfs_item_ptr(leaf, path->slots[0],
2540 struct btrfs_file_extent_item);
2541 extent_type = btrfs_file_extent_type(leaf, fi);
2542 encoding = btrfs_file_extent_compression(leaf, fi);
2543 encoding |= btrfs_file_extent_encryption(leaf, fi);
2544 encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2545
2546 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2547 item_end +=
2548 btrfs_file_extent_num_bytes(leaf, fi);
2549 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2550 item_end += btrfs_file_extent_inline_len(leaf,
2551 fi);
2552 }
2553 item_end--;
2554 }
2555 if (item_end < new_size) {
2556 if (found_type == BTRFS_DIR_ITEM_KEY)
2557 found_type = BTRFS_INODE_ITEM_KEY;
2558 else if (found_type == BTRFS_EXTENT_ITEM_KEY)
2559 found_type = BTRFS_EXTENT_DATA_KEY;
2560 else if (found_type == BTRFS_EXTENT_DATA_KEY)
2561 found_type = BTRFS_XATTR_ITEM_KEY;
2562 else if (found_type == BTRFS_XATTR_ITEM_KEY)
2563 found_type = BTRFS_INODE_REF_KEY;
2564 else if (found_type)
2565 found_type--;
2566 else
2567 break;
2568 btrfs_set_key_type(&key, found_type);
2569 goto next;
2570 }
2571 if (found_key.offset >= new_size)
2572 del_item = 1;
2573 else
2574 del_item = 0;
2575 found_extent = 0;
2576
2577 /* FIXME, shrink the extent if the ref count is only 1 */
2578 if (found_type != BTRFS_EXTENT_DATA_KEY)
2579 goto delete;
2580
2581 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2582 u64 num_dec;
2583 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2584 if (!del_item && !encoding) {
2585 u64 orig_num_bytes =
2586 btrfs_file_extent_num_bytes(leaf, fi);
2587 extent_num_bytes = new_size -
2588 found_key.offset + root->sectorsize - 1;
2589 extent_num_bytes = extent_num_bytes &
2590 ~((u64)root->sectorsize - 1);
2591 btrfs_set_file_extent_num_bytes(leaf, fi,
2592 extent_num_bytes);
2593 num_dec = (orig_num_bytes -
2594 extent_num_bytes);
2595 if (root->ref_cows && extent_start != 0)
2596 inode_sub_bytes(inode, num_dec);
2597 btrfs_mark_buffer_dirty(leaf);
2598 } else {
2599 extent_num_bytes =
2600 btrfs_file_extent_disk_num_bytes(leaf,
2601 fi);
2602 /* FIXME blocksize != 4096 */
2603 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2604 if (extent_start != 0) {
2605 found_extent = 1;
2606 if (root->ref_cows)
2607 inode_sub_bytes(inode, num_dec);
2608 }
2609 root_gen = btrfs_header_generation(leaf);
2610 root_owner = btrfs_header_owner(leaf);
2611 }
2612 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2613 /*
2614 * we can't truncate inline items that have had
2615 * special encodings
2616 */
2617 if (!del_item &&
2618 btrfs_file_extent_compression(leaf, fi) == 0 &&
2619 btrfs_file_extent_encryption(leaf, fi) == 0 &&
2620 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2621 u32 size = new_size - found_key.offset;
2622
2623 if (root->ref_cows) {
2624 inode_sub_bytes(inode, item_end + 1 -
2625 new_size);
2626 }
2627 size =
2628 btrfs_file_extent_calc_inline_size(size);
2629 ret = btrfs_truncate_item(trans, root, path,
2630 size, 1);
2631 BUG_ON(ret);
2632 } else if (root->ref_cows) {
2633 inode_sub_bytes(inode, item_end + 1 -
2634 found_key.offset);
2635 }
2636 }
2637delete:
2638 if (del_item) {
2639 if (!pending_del_nr) {
2640 /* no pending yet, add ourselves */
2641 pending_del_slot = path->slots[0];
2642 pending_del_nr = 1;
2643 } else if (pending_del_nr &&
2644 path->slots[0] + 1 == pending_del_slot) {
2645 /* hop on the pending chunk */
2646 pending_del_nr++;
2647 pending_del_slot = path->slots[0];
2648 } else {
2649 BUG();
2650 }
2651 } else {
2652 break;
2653 }
2654 if (found_extent) {
2655 ret = btrfs_free_extent(trans, root, extent_start,
2656 extent_num_bytes,
2657 leaf->start, root_owner,
2658 root_gen, inode->i_ino, 0);
2659 BUG_ON(ret);
2660 }
2661next:
2662 if (path->slots[0] == 0) {
2663 if (pending_del_nr)
2664 goto del_pending;
2665 btrfs_release_path(root, path);
2666 goto search_again;
2667 }
2668
2669 path->slots[0]--;
2670 if (pending_del_nr &&
2671 path->slots[0] + 1 != pending_del_slot) {
2672 struct btrfs_key debug;
2673del_pending:
2674 btrfs_item_key_to_cpu(path->nodes[0], &debug,
2675 pending_del_slot);
2676 ret = btrfs_del_items(trans, root, path,
2677 pending_del_slot,
2678 pending_del_nr);
2679 BUG_ON(ret);
2680 pending_del_nr = 0;
2681 btrfs_release_path(root, path);
2682 goto search_again;
2683 }
2684 }
2685 ret = 0;
2686error:
2687 if (pending_del_nr) {
2688 ret = btrfs_del_items(trans, root, path, pending_del_slot,
2689 pending_del_nr);
2690 }
2691 btrfs_free_path(path);
2692 inode->i_sb->s_dirt = 1;
2693 return ret;
2694}
2695
2696/*
2697 * taken from block_truncate_page, but does cow as it zeros out
2698 * any bytes left in the last page in the file.
2699 */
2700static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
2701{
2702 struct inode *inode = mapping->host;
2703 struct btrfs_root *root = BTRFS_I(inode)->root;
2704 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2705 struct btrfs_ordered_extent *ordered;
2706 char *kaddr;
2707 u32 blocksize = root->sectorsize;
2708 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2709 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2710 struct page *page;
2711 int ret = 0;
2712 u64 page_start;
2713 u64 page_end;
2714
2715 if ((offset & (blocksize - 1)) == 0)
2716 goto out;
2717
2718 ret = -ENOMEM;
2719again:
2720 page = grab_cache_page(mapping, index);
2721 if (!page)
2722 goto out;
2723
2724 page_start = page_offset(page);
2725 page_end = page_start + PAGE_CACHE_SIZE - 1;
2726
2727 if (!PageUptodate(page)) {
2728 ret = btrfs_readpage(NULL, page);
2729 lock_page(page);
2730 if (page->mapping != mapping) {
2731 unlock_page(page);
2732 page_cache_release(page);
2733 goto again;
2734 }
2735 if (!PageUptodate(page)) {
2736 ret = -EIO;
2737 goto out_unlock;
2738 }
2739 }
2740 wait_on_page_writeback(page);
2741
2742 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2743 set_page_extent_mapped(page);
2744
2745 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2746 if (ordered) {
2747 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2748 unlock_page(page);
2749 page_cache_release(page);
2750 btrfs_start_ordered_extent(inode, ordered, 1);
2751 btrfs_put_ordered_extent(ordered);
2752 goto again;
2753 }
2754
2755 btrfs_set_extent_delalloc(inode, page_start, page_end);
2756 ret = 0;
2757 if (offset != PAGE_CACHE_SIZE) {
2758 kaddr = kmap(page);
2759 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2760 flush_dcache_page(page);
2761 kunmap(page);
2762 }
2763 ClearPageChecked(page);
2764 set_page_dirty(page);
2765 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2766
2767out_unlock:
2768 unlock_page(page);
2769 page_cache_release(page);
2770out:
2771 return ret;
2772}
2773
2774int btrfs_cont_expand(struct inode *inode, loff_t size)
2775{
2776 struct btrfs_trans_handle *trans;
2777 struct btrfs_root *root = BTRFS_I(inode)->root;
2778 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2779 struct extent_map *em;
2780 u64 mask = root->sectorsize - 1;
2781 u64 hole_start = (inode->i_size + mask) & ~mask;
2782 u64 block_end = (size + mask) & ~mask;
2783 u64 last_byte;
2784 u64 cur_offset;
2785 u64 hole_size;
2786 int err;
2787
2788 if (size <= hole_start)
2789 return 0;
2790
2791 err = btrfs_check_free_space(root, 1, 0);
2792 if (err)
2793 return err;
2794
2795 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2796
2797 while (1) {
2798 struct btrfs_ordered_extent *ordered;
2799 btrfs_wait_ordered_range(inode, hole_start,
2800 block_end - hole_start);
2801 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2802 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
2803 if (!ordered)
2804 break;
2805 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2806 btrfs_put_ordered_extent(ordered);
2807 }
2808
2809 trans = btrfs_start_transaction(root, 1);
2810 btrfs_set_trans_block_group(trans, inode);
2811
2812 cur_offset = hole_start;
2813 while (1) {
2814 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2815 block_end - cur_offset, 0);
2816 BUG_ON(IS_ERR(em) || !em);
2817 last_byte = min(extent_map_end(em), block_end);
2818 last_byte = (last_byte + mask) & ~mask;
2819 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2820 u64 hint_byte = 0;
2821 hole_size = last_byte - cur_offset;
2822 err = btrfs_drop_extents(trans, root, inode,
2823 cur_offset,
2824 cur_offset + hole_size,
2825 cur_offset, &hint_byte);
2826 if (err)
2827 break;
2828 err = btrfs_insert_file_extent(trans, root,
2829 inode->i_ino, cur_offset, 0,
2830 0, hole_size, 0, hole_size,
2831 0, 0, 0);
2832 btrfs_drop_extent_cache(inode, hole_start,
2833 last_byte - 1, 0);
2834 }
2835 free_extent_map(em);
2836 cur_offset = last_byte;
2837 if (err || cur_offset >= block_end)
2838 break;
2839 }
2840
2841 btrfs_end_transaction(trans, root);
2842 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2843 return err;
2844}
2845
2846static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
2847{
2848 struct inode *inode = dentry->d_inode;
2849 int err;
2850
2851 err = inode_change_ok(inode, attr);
2852 if (err)
2853 return err;
2854
2855 if (S_ISREG(inode->i_mode) &&
2856 attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
2857 err = btrfs_cont_expand(inode, attr->ia_size);
2858 if (err)
2859 return err;
2860 }
2861
2862 err = inode_setattr(inode, attr);
2863
2864 if (!err && ((attr->ia_valid & ATTR_MODE)))
2865 err = btrfs_acl_chmod(inode);
2866 return err;
2867}
2868
2869void btrfs_delete_inode(struct inode *inode)
2870{
2871 struct btrfs_trans_handle *trans;
2872 struct btrfs_root *root = BTRFS_I(inode)->root;
2873 unsigned long nr;
2874 int ret;
2875
2876 truncate_inode_pages(&inode->i_data, 0);
2877 if (is_bad_inode(inode)) {
2878 btrfs_orphan_del(NULL, inode);
2879 goto no_delete;
2880 }
2881 btrfs_wait_ordered_range(inode, 0, (u64)-1);
2882
2883 btrfs_i_size_write(inode, 0);
2884 trans = btrfs_join_transaction(root, 1);
2885
2886 btrfs_set_trans_block_group(trans, inode);
2887 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
2888 if (ret) {
2889 btrfs_orphan_del(NULL, inode);
2890 goto no_delete_lock;
2891 }
2892
2893 btrfs_orphan_del(trans, inode);
2894
2895 nr = trans->blocks_used;
2896 clear_inode(inode);
2897
2898 btrfs_end_transaction(trans, root);
2899 btrfs_btree_balance_dirty(root, nr);
2900 return;
2901
2902no_delete_lock:
2903 nr = trans->blocks_used;
2904 btrfs_end_transaction(trans, root);
2905 btrfs_btree_balance_dirty(root, nr);
2906no_delete:
2907 clear_inode(inode);
2908}
2909
2910/*
2911 * this returns the key found in the dir entry in the location pointer.
2912 * If no dir entries were found, location->objectid is 0.
2913 */
2914static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
2915 struct btrfs_key *location)
2916{
2917 const char *name = dentry->d_name.name;
2918 int namelen = dentry->d_name.len;
2919 struct btrfs_dir_item *di;
2920 struct btrfs_path *path;
2921 struct btrfs_root *root = BTRFS_I(dir)->root;
2922 int ret = 0;
2923
2924 path = btrfs_alloc_path();
2925 BUG_ON(!path);
2926
2927 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
2928 namelen, 0);
2929 if (IS_ERR(di))
2930 ret = PTR_ERR(di);
2931
2932 if (!di || IS_ERR(di))
2933 goto out_err;
2934
2935 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
2936out:
2937 btrfs_free_path(path);
2938 return ret;
2939out_err:
2940 location->objectid = 0;
2941 goto out;
2942}
2943
2944/*
2945 * when we hit a tree root in a directory, the btrfs part of the inode
2946 * needs to be changed to reflect the root directory of the tree root. This
2947 * is kind of like crossing a mount point.
2948 */
2949static int fixup_tree_root_location(struct btrfs_root *root,
2950 struct btrfs_key *location,
2951 struct btrfs_root **sub_root,
2952 struct dentry *dentry)
2953{
2954 struct btrfs_root_item *ri;
2955
2956 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
2957 return 0;
2958 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
2959 return 0;
2960
2961 *sub_root = btrfs_read_fs_root(root->fs_info, location,
2962 dentry->d_name.name,
2963 dentry->d_name.len);
2964 if (IS_ERR(*sub_root))
2965 return PTR_ERR(*sub_root);
2966
2967 ri = &(*sub_root)->root_item;
2968 location->objectid = btrfs_root_dirid(ri);
2969 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
2970 location->offset = 0;
2971
2972 return 0;
2973}
2974
2975static noinline void init_btrfs_i(struct inode *inode)
2976{
2977 struct btrfs_inode *bi = BTRFS_I(inode);
2978
2979 bi->i_acl = NULL;
2980 bi->i_default_acl = NULL;
2981
2982 bi->generation = 0;
2983 bi->sequence = 0;
2984 bi->last_trans = 0;
2985 bi->logged_trans = 0;
2986 bi->delalloc_bytes = 0;
2987 bi->disk_i_size = 0;
2988 bi->flags = 0;
2989 bi->index_cnt = (u64)-1;
2990 bi->log_dirty_trans = 0;
2991 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
2992 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
2993 inode->i_mapping, GFP_NOFS);
2994 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
2995 inode->i_mapping, GFP_NOFS);
2996 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
2997 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
2998 mutex_init(&BTRFS_I(inode)->extent_mutex);
2999 mutex_init(&BTRFS_I(inode)->log_mutex);
3000}
3001
3002static int btrfs_init_locked_inode(struct inode *inode, void *p)
3003{
3004 struct btrfs_iget_args *args = p;
3005 inode->i_ino = args->ino;
3006 init_btrfs_i(inode);
3007 BTRFS_I(inode)->root = args->root;
3008 return 0;
3009}
3010
3011static int btrfs_find_actor(struct inode *inode, void *opaque)
3012{
3013 struct btrfs_iget_args *args = opaque;
3014 return args->ino == inode->i_ino &&
3015 args->root == BTRFS_I(inode)->root;
3016}
3017
3018struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
3019 struct btrfs_root *root, int wait)
3020{
3021 struct inode *inode;
3022 struct btrfs_iget_args args;
3023 args.ino = objectid;
3024 args.root = root;
3025
3026 if (wait) {
3027 inode = ilookup5(s, objectid, btrfs_find_actor,
3028 (void *)&args);
3029 } else {
3030 inode = ilookup5_nowait(s, objectid, btrfs_find_actor,
3031 (void *)&args);
3032 }
3033 return inode;
3034}
3035
3036struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
3037 struct btrfs_root *root)
3038{
3039 struct inode *inode;
3040 struct btrfs_iget_args args;
3041 args.ino = objectid;
3042 args.root = root;
3043
3044 inode = iget5_locked(s, objectid, btrfs_find_actor,
3045 btrfs_init_locked_inode,
3046 (void *)&args);
3047 return inode;
3048}
3049
3050/* Get an inode object given its location and corresponding root.
3051 * Returns in *is_new if the inode was read from disk
3052 */
3053struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3054 struct btrfs_root *root, int *is_new)
3055{
3056 struct inode *inode;
3057
3058 inode = btrfs_iget_locked(s, location->objectid, root);
3059 if (!inode)
3060 return ERR_PTR(-EACCES);
3061
3062 if (inode->i_state & I_NEW) {
3063 BTRFS_I(inode)->root = root;
3064 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3065 btrfs_read_locked_inode(inode);
3066 unlock_new_inode(inode);
3067 if (is_new)
3068 *is_new = 1;
3069 } else {
3070 if (is_new)
3071 *is_new = 0;
3072 }
3073
3074 return inode;
3075}
3076
3077struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3078{
3079 struct inode *inode;
3080 struct btrfs_inode *bi = BTRFS_I(dir);
3081 struct btrfs_root *root = bi->root;
3082 struct btrfs_root *sub_root = root;
3083 struct btrfs_key location;
3084 int ret, new;
3085
3086 if (dentry->d_name.len > BTRFS_NAME_LEN)
3087 return ERR_PTR(-ENAMETOOLONG);
3088
3089 ret = btrfs_inode_by_name(dir, dentry, &location);
3090
3091 if (ret < 0)
3092 return ERR_PTR(ret);
3093
3094 inode = NULL;
3095 if (location.objectid) {
3096 ret = fixup_tree_root_location(root, &location, &sub_root,
3097 dentry);
3098 if (ret < 0)
3099 return ERR_PTR(ret);
3100 if (ret > 0)
3101 return ERR_PTR(-ENOENT);
3102 inode = btrfs_iget(dir->i_sb, &location, sub_root, &new);
3103 if (IS_ERR(inode))
3104 return ERR_CAST(inode);
3105 }
3106 return inode;
3107}
3108
3109static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3110 struct nameidata *nd)
3111{
3112 struct inode *inode;
3113
3114 if (dentry->d_name.len > BTRFS_NAME_LEN)
3115 return ERR_PTR(-ENAMETOOLONG);
3116
3117 inode = btrfs_lookup_dentry(dir, dentry);
3118 if (IS_ERR(inode))
3119 return ERR_CAST(inode);
3120
3121 return d_splice_alias(inode, dentry);
3122}
3123
3124static unsigned char btrfs_filetype_table[] = {
3125 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3126};
3127
3128static int btrfs_real_readdir(struct file *filp, void *dirent,
3129 filldir_t filldir)
3130{
3131 struct inode *inode = filp->f_dentry->d_inode;
3132 struct btrfs_root *root = BTRFS_I(inode)->root;
3133 struct btrfs_item *item;
3134 struct btrfs_dir_item *di;
3135 struct btrfs_key key;
3136 struct btrfs_key found_key;
3137 struct btrfs_path *path;
3138 int ret;
3139 u32 nritems;
3140 struct extent_buffer *leaf;
3141 int slot;
3142 int advance;
3143 unsigned char d_type;
3144 int over = 0;
3145 u32 di_cur;
3146 u32 di_total;
3147 u32 di_len;
3148 int key_type = BTRFS_DIR_INDEX_KEY;
3149 char tmp_name[32];
3150 char *name_ptr;
3151 int name_len;
3152
3153 /* FIXME, use a real flag for deciding about the key type */
3154 if (root->fs_info->tree_root == root)
3155 key_type = BTRFS_DIR_ITEM_KEY;
3156
3157 /* special case for "." */
3158 if (filp->f_pos == 0) {
3159 over = filldir(dirent, ".", 1,
3160 1, inode->i_ino,
3161 DT_DIR);
3162 if (over)
3163 return 0;
3164 filp->f_pos = 1;
3165 }
3166 /* special case for .., just use the back ref */
3167 if (filp->f_pos == 1) {
3168 u64 pino = parent_ino(filp->f_path.dentry);
3169 over = filldir(dirent, "..", 2,
3170 2, pino, DT_DIR);
3171 if (over)
3172 return 0;
3173 filp->f_pos = 2;
3174 }
3175 path = btrfs_alloc_path();
3176 path->reada = 2;
3177
3178 btrfs_set_key_type(&key, key_type);
3179 key.offset = filp->f_pos;
3180 key.objectid = inode->i_ino;
3181
3182 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3183 if (ret < 0)
3184 goto err;
3185 advance = 0;
3186
3187 while (1) {
3188 leaf = path->nodes[0];
3189 nritems = btrfs_header_nritems(leaf);
3190 slot = path->slots[0];
3191 if (advance || slot >= nritems) {
3192 if (slot >= nritems - 1) {
3193 ret = btrfs_next_leaf(root, path);
3194 if (ret)
3195 break;
3196 leaf = path->nodes[0];
3197 nritems = btrfs_header_nritems(leaf);
3198 slot = path->slots[0];
3199 } else {
3200 slot++;
3201 path->slots[0]++;
3202 }
3203 }
3204
3205 advance = 1;
3206 item = btrfs_item_nr(leaf, slot);
3207 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3208
3209 if (found_key.objectid != key.objectid)
3210 break;
3211 if (btrfs_key_type(&found_key) != key_type)
3212 break;
3213 if (found_key.offset < filp->f_pos)
3214 continue;
3215
3216 filp->f_pos = found_key.offset;
3217
3218 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3219 di_cur = 0;
3220 di_total = btrfs_item_size(leaf, item);
3221
3222 while (di_cur < di_total) {
3223 struct btrfs_key location;
3224
3225 name_len = btrfs_dir_name_len(leaf, di);
3226 if (name_len <= sizeof(tmp_name)) {
3227 name_ptr = tmp_name;
3228 } else {
3229 name_ptr = kmalloc(name_len, GFP_NOFS);
3230 if (!name_ptr) {
3231 ret = -ENOMEM;
3232 goto err;
3233 }
3234 }
3235 read_extent_buffer(leaf, name_ptr,
3236 (unsigned long)(di + 1), name_len);
3237
3238 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3239 btrfs_dir_item_key_to_cpu(leaf, di, &location);
3240
3241 /* is this a reference to our own snapshot? If so
3242 * skip it
3243 */
3244 if (location.type == BTRFS_ROOT_ITEM_KEY &&
3245 location.objectid == root->root_key.objectid) {
3246 over = 0;
3247 goto skip;
3248 }
3249 over = filldir(dirent, name_ptr, name_len,
3250 found_key.offset, location.objectid,
3251 d_type);
3252
3253skip:
3254 if (name_ptr != tmp_name)
3255 kfree(name_ptr);
3256
3257 if (over)
3258 goto nopos;
3259 di_len = btrfs_dir_name_len(leaf, di) +
3260 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3261 di_cur += di_len;
3262 di = (struct btrfs_dir_item *)((char *)di + di_len);
3263 }
3264 }
3265
3266 /* Reached end of directory/root. Bump pos past the last item. */
3267 if (key_type == BTRFS_DIR_INDEX_KEY)
3268 filp->f_pos = INT_LIMIT(typeof(filp->f_pos));
3269 else
3270 filp->f_pos++;
3271nopos:
3272 ret = 0;
3273err:
3274 btrfs_free_path(path);
3275 return ret;
3276}
3277
3278int btrfs_write_inode(struct inode *inode, int wait)
3279{
3280 struct btrfs_root *root = BTRFS_I(inode)->root;
3281 struct btrfs_trans_handle *trans;
3282 int ret = 0;
3283
3284 if (root->fs_info->btree_inode == inode)
3285 return 0;
3286
3287 if (wait) {
3288 trans = btrfs_join_transaction(root, 1);
3289 btrfs_set_trans_block_group(trans, inode);
3290 ret = btrfs_commit_transaction(trans, root);
3291 }
3292 return ret;
3293}
3294
3295/*
3296 * This is somewhat expensive, updating the tree every time the
3297 * inode changes. But, it is most likely to find the inode in cache.
3298 * FIXME, needs more benchmarking...there are no reasons other than performance
3299 * to keep or drop this code.
3300 */
3301void btrfs_dirty_inode(struct inode *inode)
3302{
3303 struct btrfs_root *root = BTRFS_I(inode)->root;
3304 struct btrfs_trans_handle *trans;
3305
3306 trans = btrfs_join_transaction(root, 1);
3307 btrfs_set_trans_block_group(trans, inode);
3308 btrfs_update_inode(trans, root, inode);
3309 btrfs_end_transaction(trans, root);
3310}
3311
3312/*
3313 * find the highest existing sequence number in a directory
3314 * and then set the in-memory index_cnt variable to reflect
3315 * free sequence numbers
3316 */
3317static int btrfs_set_inode_index_count(struct inode *inode)
3318{
3319 struct btrfs_root *root = BTRFS_I(inode)->root;
3320 struct btrfs_key key, found_key;
3321 struct btrfs_path *path;
3322 struct extent_buffer *leaf;
3323 int ret;
3324
3325 key.objectid = inode->i_ino;
3326 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
3327 key.offset = (u64)-1;
3328
3329 path = btrfs_alloc_path();
3330 if (!path)
3331 return -ENOMEM;
3332
3333 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3334 if (ret < 0)
3335 goto out;
3336 /* FIXME: we should be able to handle this */
3337 if (ret == 0)
3338 goto out;
3339 ret = 0;
3340
3341 /*
3342 * MAGIC NUMBER EXPLANATION:
3343 * since we search a directory based on f_pos we have to start at 2
3344 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
3345 * else has to start at 2
3346 */
3347 if (path->slots[0] == 0) {
3348 BTRFS_I(inode)->index_cnt = 2;
3349 goto out;
3350 }
3351
3352 path->slots[0]--;
3353
3354 leaf = path->nodes[0];
3355 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3356
3357 if (found_key.objectid != inode->i_ino ||
3358 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
3359 BTRFS_I(inode)->index_cnt = 2;
3360 goto out;
3361 }
3362
3363 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
3364out:
3365 btrfs_free_path(path);
3366 return ret;
3367}
3368
3369/*
3370 * helper to find a free sequence number in a given directory. This current
3371 * code is very simple, later versions will do smarter things in the btree
3372 */
3373int btrfs_set_inode_index(struct inode *dir, u64 *index)
3374{
3375 int ret = 0;
3376
3377 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
3378 ret = btrfs_set_inode_index_count(dir);
3379 if (ret)
3380 return ret;
3381 }
3382
3383 *index = BTRFS_I(dir)->index_cnt;
3384 BTRFS_I(dir)->index_cnt++;
3385
3386 return ret;
3387}
3388
3389static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3390 struct btrfs_root *root,
3391 struct inode *dir,
3392 const char *name, int name_len,
3393 u64 ref_objectid, u64 objectid,
3394 u64 alloc_hint, int mode, u64 *index)
3395{
3396 struct inode *inode;
3397 struct btrfs_inode_item *inode_item;
3398 struct btrfs_key *location;
3399 struct btrfs_path *path;
3400 struct btrfs_inode_ref *ref;
3401 struct btrfs_key key[2];
3402 u32 sizes[2];
3403 unsigned long ptr;
3404 int ret;
3405 int owner;
3406
3407 path = btrfs_alloc_path();
3408 BUG_ON(!path);
3409
3410 inode = new_inode(root->fs_info->sb);
3411 if (!inode)
3412 return ERR_PTR(-ENOMEM);
3413
3414 if (dir) {
3415 ret = btrfs_set_inode_index(dir, index);
3416 if (ret)
3417 return ERR_PTR(ret);
3418 }
3419 /*
3420 * index_cnt is ignored for everything but a dir,
3421 * btrfs_get_inode_index_count has an explanation for the magic
3422 * number
3423 */
3424 init_btrfs_i(inode);
3425 BTRFS_I(inode)->index_cnt = 2;
3426 BTRFS_I(inode)->root = root;
3427 BTRFS_I(inode)->generation = trans->transid;
3428
3429 if (mode & S_IFDIR)
3430 owner = 0;
3431 else
3432 owner = 1;
3433 BTRFS_I(inode)->block_group =
3434 btrfs_find_block_group(root, 0, alloc_hint, owner);
3435 if ((mode & S_IFREG)) {
3436 if (btrfs_test_opt(root, NODATASUM))
3437 btrfs_set_flag(inode, NODATASUM);
3438 if (btrfs_test_opt(root, NODATACOW))
3439 btrfs_set_flag(inode, NODATACOW);
3440 }
3441
3442 key[0].objectid = objectid;
3443 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
3444 key[0].offset = 0;
3445
3446 key[1].objectid = objectid;
3447 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
3448 key[1].offset = ref_objectid;
3449
3450 sizes[0] = sizeof(struct btrfs_inode_item);
3451 sizes[1] = name_len + sizeof(*ref);
3452
3453 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
3454 if (ret != 0)
3455 goto fail;
3456
3457 if (objectid > root->highest_inode)
3458 root->highest_inode = objectid;
3459
3460 inode->i_uid = current_fsuid();
3461 inode->i_gid = current_fsgid();
3462 inode->i_mode = mode;
3463 inode->i_ino = objectid;
3464 inode_set_bytes(inode, 0);
3465 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3466 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3467 struct btrfs_inode_item);
3468 fill_inode_item(trans, path->nodes[0], inode_item, inode);
3469
3470 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3471 struct btrfs_inode_ref);
3472 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
3473 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
3474 ptr = (unsigned long)(ref + 1);
3475 write_extent_buffer(path->nodes[0], name, ptr, name_len);
3476
3477 btrfs_mark_buffer_dirty(path->nodes[0]);
3478 btrfs_free_path(path);
3479
3480 location = &BTRFS_I(inode)->location;
3481 location->objectid = objectid;
3482 location->offset = 0;
3483 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3484
3485 insert_inode_hash(inode);
3486 return inode;
3487fail:
3488 if (dir)
3489 BTRFS_I(dir)->index_cnt--;
3490 btrfs_free_path(path);
3491 return ERR_PTR(ret);
3492}
3493
3494static inline u8 btrfs_inode_type(struct inode *inode)
3495{
3496 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
3497}
3498
3499/*
3500 * utility function to add 'inode' into 'parent_inode' with
3501 * a give name and a given sequence number.
3502 * if 'add_backref' is true, also insert a backref from the
3503 * inode to the parent directory.
3504 */
3505int btrfs_add_link(struct btrfs_trans_handle *trans,
3506 struct inode *parent_inode, struct inode *inode,
3507 const char *name, int name_len, int add_backref, u64 index)
3508{
3509 int ret;
3510 struct btrfs_key key;
3511 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
3512
3513 key.objectid = inode->i_ino;
3514 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
3515 key.offset = 0;
3516
3517 ret = btrfs_insert_dir_item(trans, root, name, name_len,
3518 parent_inode->i_ino,
3519 &key, btrfs_inode_type(inode),
3520 index);
3521 if (ret == 0) {
3522 if (add_backref) {
3523 ret = btrfs_insert_inode_ref(trans, root,
3524 name, name_len,
3525 inode->i_ino,
3526 parent_inode->i_ino,
3527 index);
3528 }
3529 btrfs_i_size_write(parent_inode, parent_inode->i_size +
3530 name_len * 2);
3531 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
3532 ret = btrfs_update_inode(trans, root, parent_inode);
3533 }
3534 return ret;
3535}
3536
3537static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
3538 struct dentry *dentry, struct inode *inode,
3539 int backref, u64 index)
3540{
3541 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3542 inode, dentry->d_name.name,
3543 dentry->d_name.len, backref, index);
3544 if (!err) {
3545 d_instantiate(dentry, inode);
3546 return 0;
3547 }
3548 if (err > 0)
3549 err = -EEXIST;
3550 return err;
3551}
3552
3553static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3554 int mode, dev_t rdev)
3555{
3556 struct btrfs_trans_handle *trans;
3557 struct btrfs_root *root = BTRFS_I(dir)->root;
3558 struct inode *inode = NULL;
3559 int err;
3560 int drop_inode = 0;
3561 u64 objectid;
3562 unsigned long nr = 0;
3563 u64 index = 0;
3564
3565 if (!new_valid_dev(rdev))
3566 return -EINVAL;
3567
3568 err = btrfs_check_free_space(root, 1, 0);
3569 if (err)
3570 goto fail;
3571
3572 trans = btrfs_start_transaction(root, 1);
3573 btrfs_set_trans_block_group(trans, dir);
3574
3575 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3576 if (err) {
3577 err = -ENOSPC;
3578 goto out_unlock;
3579 }
3580
3581 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3582 dentry->d_name.len,
3583 dentry->d_parent->d_inode->i_ino, objectid,
3584 BTRFS_I(dir)->block_group, mode, &index);
3585 err = PTR_ERR(inode);
3586 if (IS_ERR(inode))
3587 goto out_unlock;
3588
3589 err = btrfs_init_acl(inode, dir);
3590 if (err) {
3591 drop_inode = 1;
3592 goto out_unlock;
3593 }
3594
3595 btrfs_set_trans_block_group(trans, inode);
3596 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3597 if (err)
3598 drop_inode = 1;
3599 else {
3600 inode->i_op = &btrfs_special_inode_operations;
3601 init_special_inode(inode, inode->i_mode, rdev);
3602 btrfs_update_inode(trans, root, inode);
3603 }
3604 dir->i_sb->s_dirt = 1;
3605 btrfs_update_inode_block_group(trans, inode);
3606 btrfs_update_inode_block_group(trans, dir);
3607out_unlock:
3608 nr = trans->blocks_used;
3609 btrfs_end_transaction_throttle(trans, root);
3610fail:
3611 if (drop_inode) {
3612 inode_dec_link_count(inode);
3613 iput(inode);
3614 }
3615 btrfs_btree_balance_dirty(root, nr);
3616 return err;
3617}
3618
3619static int btrfs_create(struct inode *dir, struct dentry *dentry,
3620 int mode, struct nameidata *nd)
3621{
3622 struct btrfs_trans_handle *trans;
3623 struct btrfs_root *root = BTRFS_I(dir)->root;
3624 struct inode *inode = NULL;
3625 int err;
3626 int drop_inode = 0;
3627 unsigned long nr = 0;
3628 u64 objectid;
3629 u64 index = 0;
3630
3631 err = btrfs_check_free_space(root, 1, 0);
3632 if (err)
3633 goto fail;
3634 trans = btrfs_start_transaction(root, 1);
3635 btrfs_set_trans_block_group(trans, dir);
3636
3637 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3638 if (err) {
3639 err = -ENOSPC;
3640 goto out_unlock;
3641 }
3642
3643 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3644 dentry->d_name.len,
3645 dentry->d_parent->d_inode->i_ino,
3646 objectid, BTRFS_I(dir)->block_group, mode,
3647 &index);
3648 err = PTR_ERR(inode);
3649 if (IS_ERR(inode))
3650 goto out_unlock;
3651
3652 err = btrfs_init_acl(inode, dir);
3653 if (err) {
3654 drop_inode = 1;
3655 goto out_unlock;
3656 }
3657
3658 btrfs_set_trans_block_group(trans, inode);
3659 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3660 if (err)
3661 drop_inode = 1;
3662 else {
3663 inode->i_mapping->a_ops = &btrfs_aops;
3664 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3665 inode->i_fop = &btrfs_file_operations;
3666 inode->i_op = &btrfs_file_inode_operations;
3667 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3668 }
3669 dir->i_sb->s_dirt = 1;
3670 btrfs_update_inode_block_group(trans, inode);
3671 btrfs_update_inode_block_group(trans, dir);
3672out_unlock:
3673 nr = trans->blocks_used;
3674 btrfs_end_transaction_throttle(trans, root);
3675fail:
3676 if (drop_inode) {
3677 inode_dec_link_count(inode);
3678 iput(inode);
3679 }
3680 btrfs_btree_balance_dirty(root, nr);
3681 return err;
3682}
3683
3684static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
3685 struct dentry *dentry)
3686{
3687 struct btrfs_trans_handle *trans;
3688 struct btrfs_root *root = BTRFS_I(dir)->root;
3689 struct inode *inode = old_dentry->d_inode;
3690 u64 index;
3691 unsigned long nr = 0;
3692 int err;
3693 int drop_inode = 0;
3694
3695 if (inode->i_nlink == 0)
3696 return -ENOENT;
3697
3698 btrfs_inc_nlink(inode);
3699 err = btrfs_check_free_space(root, 1, 0);
3700 if (err)
3701 goto fail;
3702 err = btrfs_set_inode_index(dir, &index);
3703 if (err)
3704 goto fail;
3705
3706 trans = btrfs_start_transaction(root, 1);
3707
3708 btrfs_set_trans_block_group(trans, dir);
3709 atomic_inc(&inode->i_count);
3710
3711 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
3712
3713 if (err)
3714 drop_inode = 1;
3715
3716 dir->i_sb->s_dirt = 1;
3717 btrfs_update_inode_block_group(trans, dir);
3718 err = btrfs_update_inode(trans, root, inode);
3719
3720 if (err)
3721 drop_inode = 1;
3722
3723 nr = trans->blocks_used;
3724 btrfs_end_transaction_throttle(trans, root);
3725fail:
3726 if (drop_inode) {
3727 inode_dec_link_count(inode);
3728 iput(inode);
3729 }
3730 btrfs_btree_balance_dirty(root, nr);
3731 return err;
3732}
3733
3734static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3735{
3736 struct inode *inode = NULL;
3737 struct btrfs_trans_handle *trans;
3738 struct btrfs_root *root = BTRFS_I(dir)->root;
3739 int err = 0;
3740 int drop_on_err = 0;
3741 u64 objectid = 0;
3742 u64 index = 0;
3743 unsigned long nr = 1;
3744
3745 err = btrfs_check_free_space(root, 1, 0);
3746 if (err)
3747 goto out_unlock;
3748
3749 trans = btrfs_start_transaction(root, 1);
3750 btrfs_set_trans_block_group(trans, dir);
3751
3752 if (IS_ERR(trans)) {
3753 err = PTR_ERR(trans);
3754 goto out_unlock;
3755 }
3756
3757 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3758 if (err) {
3759 err = -ENOSPC;
3760 goto out_unlock;
3761 }
3762
3763 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3764 dentry->d_name.len,
3765 dentry->d_parent->d_inode->i_ino, objectid,
3766 BTRFS_I(dir)->block_group, S_IFDIR | mode,
3767 &index);
3768 if (IS_ERR(inode)) {
3769 err = PTR_ERR(inode);
3770 goto out_fail;
3771 }
3772
3773 drop_on_err = 1;
3774
3775 err = btrfs_init_acl(inode, dir);
3776 if (err)
3777 goto out_fail;
3778
3779 inode->i_op = &btrfs_dir_inode_operations;
3780 inode->i_fop = &btrfs_dir_file_operations;
3781 btrfs_set_trans_block_group(trans, inode);
3782
3783 btrfs_i_size_write(inode, 0);
3784 err = btrfs_update_inode(trans, root, inode);
3785 if (err)
3786 goto out_fail;
3787
3788 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3789 inode, dentry->d_name.name,
3790 dentry->d_name.len, 0, index);
3791 if (err)
3792 goto out_fail;
3793
3794 d_instantiate(dentry, inode);
3795 drop_on_err = 0;
3796 dir->i_sb->s_dirt = 1;
3797 btrfs_update_inode_block_group(trans, inode);
3798 btrfs_update_inode_block_group(trans, dir);
3799
3800out_fail:
3801 nr = trans->blocks_used;
3802 btrfs_end_transaction_throttle(trans, root);
3803
3804out_unlock:
3805 if (drop_on_err)
3806 iput(inode);
3807 btrfs_btree_balance_dirty(root, nr);
3808 return err;
3809}
3810
3811/* helper for btfs_get_extent. Given an existing extent in the tree,
3812 * and an extent that you want to insert, deal with overlap and insert
3813 * the new extent into the tree.
3814 */
3815static int merge_extent_mapping(struct extent_map_tree *em_tree,
3816 struct extent_map *existing,
3817 struct extent_map *em,
3818 u64 map_start, u64 map_len)
3819{
3820 u64 start_diff;
3821
3822 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
3823 start_diff = map_start - em->start;
3824 em->start = map_start;
3825 em->len = map_len;
3826 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
3827 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3828 em->block_start += start_diff;
3829 em->block_len -= start_diff;
3830 }
3831 return add_extent_mapping(em_tree, em);
3832}
3833
3834static noinline int uncompress_inline(struct btrfs_path *path,
3835 struct inode *inode, struct page *page,
3836 size_t pg_offset, u64 extent_offset,
3837 struct btrfs_file_extent_item *item)
3838{
3839 int ret;
3840 struct extent_buffer *leaf = path->nodes[0];
3841 char *tmp;
3842 size_t max_size;
3843 unsigned long inline_size;
3844 unsigned long ptr;
3845
3846 WARN_ON(pg_offset != 0);
3847 max_size = btrfs_file_extent_ram_bytes(leaf, item);
3848 inline_size = btrfs_file_extent_inline_item_len(leaf,
3849 btrfs_item_nr(leaf, path->slots[0]));
3850 tmp = kmalloc(inline_size, GFP_NOFS);
3851 ptr = btrfs_file_extent_inline_start(item);
3852
3853 read_extent_buffer(leaf, tmp, ptr, inline_size);
3854
3855 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
3856 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
3857 inline_size, max_size);
3858 if (ret) {
3859 char *kaddr = kmap_atomic(page, KM_USER0);
3860 unsigned long copy_size = min_t(u64,
3861 PAGE_CACHE_SIZE - pg_offset,
3862 max_size - extent_offset);
3863 memset(kaddr + pg_offset, 0, copy_size);
3864 kunmap_atomic(kaddr, KM_USER0);
3865 }
3866 kfree(tmp);
3867 return 0;
3868}
3869
3870/*
3871 * a bit scary, this does extent mapping from logical file offset to the disk.
3872 * the ugly parts come from merging extents from the disk with the in-ram
3873 * representation. This gets more complex because of the data=ordered code,
3874 * where the in-ram extents might be locked pending data=ordered completion.
3875 *
3876 * This also copies inline extents directly into the page.
3877 */
3878
3879struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
3880 size_t pg_offset, u64 start, u64 len,
3881 int create)
3882{
3883 int ret;
3884 int err = 0;
3885 u64 bytenr;
3886 u64 extent_start = 0;
3887 u64 extent_end = 0;
3888 u64 objectid = inode->i_ino;
3889 u32 found_type;
3890 struct btrfs_path *path = NULL;
3891 struct btrfs_root *root = BTRFS_I(inode)->root;
3892 struct btrfs_file_extent_item *item;
3893 struct extent_buffer *leaf;
3894 struct btrfs_key found_key;
3895 struct extent_map *em = NULL;
3896 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3897 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3898 struct btrfs_trans_handle *trans = NULL;
3899 int compressed;
3900
3901again:
3902 spin_lock(&em_tree->lock);
3903 em = lookup_extent_mapping(em_tree, start, len);
3904 if (em)
3905 em->bdev = root->fs_info->fs_devices->latest_bdev;
3906 spin_unlock(&em_tree->lock);
3907
3908 if (em) {
3909 if (em->start > start || em->start + em->len <= start)
3910 free_extent_map(em);
3911 else if (em->block_start == EXTENT_MAP_INLINE && page)
3912 free_extent_map(em);
3913 else
3914 goto out;
3915 }
3916 em = alloc_extent_map(GFP_NOFS);
3917 if (!em) {
3918 err = -ENOMEM;
3919 goto out;
3920 }
3921 em->bdev = root->fs_info->fs_devices->latest_bdev;
3922 em->start = EXTENT_MAP_HOLE;
3923 em->orig_start = EXTENT_MAP_HOLE;
3924 em->len = (u64)-1;
3925 em->block_len = (u64)-1;
3926
3927 if (!path) {
3928 path = btrfs_alloc_path();
3929 BUG_ON(!path);
3930 }
3931
3932 ret = btrfs_lookup_file_extent(trans, root, path,
3933 objectid, start, trans != NULL);
3934 if (ret < 0) {
3935 err = ret;
3936 goto out;
3937 }
3938
3939 if (ret != 0) {
3940 if (path->slots[0] == 0)
3941 goto not_found;
3942 path->slots[0]--;
3943 }
3944
3945 leaf = path->nodes[0];
3946 item = btrfs_item_ptr(leaf, path->slots[0],
3947 struct btrfs_file_extent_item);
3948 /* are we inside the extent that was found? */
3949 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3950 found_type = btrfs_key_type(&found_key);
3951 if (found_key.objectid != objectid ||
3952 found_type != BTRFS_EXTENT_DATA_KEY) {
3953 goto not_found;
3954 }
3955
3956 found_type = btrfs_file_extent_type(leaf, item);
3957 extent_start = found_key.offset;
3958 compressed = btrfs_file_extent_compression(leaf, item);
3959 if (found_type == BTRFS_FILE_EXTENT_REG ||
3960 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
3961 extent_end = extent_start +
3962 btrfs_file_extent_num_bytes(leaf, item);
3963 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
3964 size_t size;
3965 size = btrfs_file_extent_inline_len(leaf, item);
3966 extent_end = (extent_start + size + root->sectorsize - 1) &
3967 ~((u64)root->sectorsize - 1);
3968 }
3969
3970 if (start >= extent_end) {
3971 path->slots[0]++;
3972 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3973 ret = btrfs_next_leaf(root, path);
3974 if (ret < 0) {
3975 err = ret;
3976 goto out;
3977 }
3978 if (ret > 0)
3979 goto not_found;
3980 leaf = path->nodes[0];
3981 }
3982 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3983 if (found_key.objectid != objectid ||
3984 found_key.type != BTRFS_EXTENT_DATA_KEY)
3985 goto not_found;
3986 if (start + len <= found_key.offset)
3987 goto not_found;
3988 em->start = start;
3989 em->len = found_key.offset - start;
3990 goto not_found_em;
3991 }
3992
3993 if (found_type == BTRFS_FILE_EXTENT_REG ||
3994 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
3995 em->start = extent_start;
3996 em->len = extent_end - extent_start;
3997 em->orig_start = extent_start -
3998 btrfs_file_extent_offset(leaf, item);
3999 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4000 if (bytenr == 0) {
4001 em->block_start = EXTENT_MAP_HOLE;
4002 goto insert;
4003 }
4004 if (compressed) {
4005 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4006 em->block_start = bytenr;
4007 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4008 item);
4009 } else {
4010 bytenr += btrfs_file_extent_offset(leaf, item);
4011 em->block_start = bytenr;
4012 em->block_len = em->len;
4013 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4014 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4015 }
4016 goto insert;
4017 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4018 unsigned long ptr;
4019 char *map;
4020 size_t size;
4021 size_t extent_offset;
4022 size_t copy_size;
4023
4024 em->block_start = EXTENT_MAP_INLINE;
4025 if (!page || create) {
4026 em->start = extent_start;
4027 em->len = extent_end - extent_start;
4028 goto out;
4029 }
4030
4031 size = btrfs_file_extent_inline_len(leaf, item);
4032 extent_offset = page_offset(page) + pg_offset - extent_start;
4033 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4034 size - extent_offset);
4035 em->start = extent_start + extent_offset;
4036 em->len = (copy_size + root->sectorsize - 1) &
4037 ~((u64)root->sectorsize - 1);
4038 em->orig_start = EXTENT_MAP_INLINE;
4039 if (compressed)
4040 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4041 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4042 if (create == 0 && !PageUptodate(page)) {
4043 if (btrfs_file_extent_compression(leaf, item) ==
4044 BTRFS_COMPRESS_ZLIB) {
4045 ret = uncompress_inline(path, inode, page,
4046 pg_offset,
4047 extent_offset, item);
4048 BUG_ON(ret);
4049 } else {
4050 map = kmap(page);
4051 read_extent_buffer(leaf, map + pg_offset, ptr,
4052 copy_size);
4053 kunmap(page);
4054 }
4055 flush_dcache_page(page);
4056 } else if (create && PageUptodate(page)) {
4057 if (!trans) {
4058 kunmap(page);
4059 free_extent_map(em);
4060 em = NULL;
4061 btrfs_release_path(root, path);
4062 trans = btrfs_join_transaction(root, 1);
4063 goto again;
4064 }
4065 map = kmap(page);
4066 write_extent_buffer(leaf, map + pg_offset, ptr,
4067 copy_size);
4068 kunmap(page);
4069 btrfs_mark_buffer_dirty(leaf);
4070 }
4071 set_extent_uptodate(io_tree, em->start,
4072 extent_map_end(em) - 1, GFP_NOFS);
4073 goto insert;
4074 } else {
4075 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4076 WARN_ON(1);
4077 }
4078not_found:
4079 em->start = start;
4080 em->len = len;
4081not_found_em:
4082 em->block_start = EXTENT_MAP_HOLE;
4083 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4084insert:
4085 btrfs_release_path(root, path);
4086 if (em->start > start || extent_map_end(em) <= start) {
4087 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4088 "[%llu %llu]\n", (unsigned long long)em->start,
4089 (unsigned long long)em->len,
4090 (unsigned long long)start,
4091 (unsigned long long)len);
4092 err = -EIO;
4093 goto out;
4094 }
4095
4096 err = 0;
4097 spin_lock(&em_tree->lock);
4098 ret = add_extent_mapping(em_tree, em);
4099 /* it is possible that someone inserted the extent into the tree
4100 * while we had the lock dropped. It is also possible that
4101 * an overlapping map exists in the tree
4102 */
4103 if (ret == -EEXIST) {
4104 struct extent_map *existing;
4105
4106 ret = 0;
4107
4108 existing = lookup_extent_mapping(em_tree, start, len);
4109 if (existing && (existing->start > start ||
4110 existing->start + existing->len <= start)) {
4111 free_extent_map(existing);
4112 existing = NULL;
4113 }
4114 if (!existing) {
4115 existing = lookup_extent_mapping(em_tree, em->start,
4116 em->len);
4117 if (existing) {
4118 err = merge_extent_mapping(em_tree, existing,
4119 em, start,
4120 root->sectorsize);
4121 free_extent_map(existing);
4122 if (err) {
4123 free_extent_map(em);
4124 em = NULL;
4125 }
4126 } else {
4127 err = -EIO;
4128 free_extent_map(em);
4129 em = NULL;
4130 }
4131 } else {
4132 free_extent_map(em);
4133 em = existing;
4134 err = 0;
4135 }
4136 }
4137 spin_unlock(&em_tree->lock);
4138out:
4139 if (path)
4140 btrfs_free_path(path);
4141 if (trans) {
4142 ret = btrfs_end_transaction(trans, root);
4143 if (!err)
4144 err = ret;
4145 }
4146 if (err) {
4147 free_extent_map(em);
4148 WARN_ON(1);
4149 return ERR_PTR(err);
4150 }
4151 return em;
4152}
4153
4154static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4155 const struct iovec *iov, loff_t offset,
4156 unsigned long nr_segs)
4157{
4158 return -EINVAL;
4159}
4160
4161static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock)
4162{
4163 return extent_bmap(mapping, iblock, btrfs_get_extent);
4164}
4165
4166int btrfs_readpage(struct file *file, struct page *page)
4167{
4168 struct extent_io_tree *tree;
4169 tree = &BTRFS_I(page->mapping->host)->io_tree;
4170 return extent_read_full_page(tree, page, btrfs_get_extent);
4171}
4172
4173static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4174{
4175 struct extent_io_tree *tree;
4176
4177
4178 if (current->flags & PF_MEMALLOC) {
4179 redirty_page_for_writepage(wbc, page);
4180 unlock_page(page);
4181 return 0;
4182 }
4183 tree = &BTRFS_I(page->mapping->host)->io_tree;
4184 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4185}
4186
4187int btrfs_writepages(struct address_space *mapping,
4188 struct writeback_control *wbc)
4189{
4190 struct extent_io_tree *tree;
4191
4192 tree = &BTRFS_I(mapping->host)->io_tree;
4193 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4194}
4195
4196static int
4197btrfs_readpages(struct file *file, struct address_space *mapping,
4198 struct list_head *pages, unsigned nr_pages)
4199{
4200 struct extent_io_tree *tree;
4201 tree = &BTRFS_I(mapping->host)->io_tree;
4202 return extent_readpages(tree, mapping, pages, nr_pages,
4203 btrfs_get_extent);
4204}
4205static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4206{
4207 struct extent_io_tree *tree;
4208 struct extent_map_tree *map;
4209 int ret;
4210
4211 tree = &BTRFS_I(page->mapping->host)->io_tree;
4212 map = &BTRFS_I(page->mapping->host)->extent_tree;
4213 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
4214 if (ret == 1) {
4215 ClearPagePrivate(page);
4216 set_page_private(page, 0);
4217 page_cache_release(page);
4218 }
4219 return ret;
4220}
4221
4222static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4223{
4224 if (PageWriteback(page) || PageDirty(page))
4225 return 0;
4226 return __btrfs_releasepage(page, gfp_flags);
4227}
4228
4229static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4230{
4231 struct extent_io_tree *tree;
4232 struct btrfs_ordered_extent *ordered;
4233 u64 page_start = page_offset(page);
4234 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4235
4236 wait_on_page_writeback(page);
4237 tree = &BTRFS_I(page->mapping->host)->io_tree;
4238 if (offset) {
4239 btrfs_releasepage(page, GFP_NOFS);
4240 return;
4241 }
4242
4243 lock_extent(tree, page_start, page_end, GFP_NOFS);
4244 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
4245 page_offset(page));
4246 if (ordered) {
4247 /*
4248 * IO on this page will never be started, so we need
4249 * to account for any ordered extents now
4250 */
4251 clear_extent_bit(tree, page_start, page_end,
4252 EXTENT_DIRTY | EXTENT_DELALLOC |
4253 EXTENT_LOCKED, 1, 0, GFP_NOFS);
4254 btrfs_finish_ordered_io(page->mapping->host,
4255 page_start, page_end);
4256 btrfs_put_ordered_extent(ordered);
4257 lock_extent(tree, page_start, page_end, GFP_NOFS);
4258 }
4259 clear_extent_bit(tree, page_start, page_end,
4260 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4261 EXTENT_ORDERED,
4262 1, 1, GFP_NOFS);
4263 __btrfs_releasepage(page, GFP_NOFS);
4264
4265 ClearPageChecked(page);
4266 if (PagePrivate(page)) {
4267 ClearPagePrivate(page);
4268 set_page_private(page, 0);
4269 page_cache_release(page);
4270 }
4271}
4272
4273/*
4274 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
4275 * called from a page fault handler when a page is first dirtied. Hence we must
4276 * be careful to check for EOF conditions here. We set the page up correctly
4277 * for a written page which means we get ENOSPC checking when writing into
4278 * holes and correct delalloc and unwritten extent mapping on filesystems that
4279 * support these features.
4280 *
4281 * We are not allowed to take the i_mutex here so we have to play games to
4282 * protect against truncate races as the page could now be beyond EOF. Because
4283 * vmtruncate() writes the inode size before removing pages, once we have the
4284 * page lock we can determine safely if the page is beyond EOF. If it is not
4285 * beyond EOF, then the page is guaranteed safe against truncation until we
4286 * unlock the page.
4287 */
4288int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
4289{
4290 struct inode *inode = fdentry(vma->vm_file)->d_inode;
4291 struct btrfs_root *root = BTRFS_I(inode)->root;
4292 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4293 struct btrfs_ordered_extent *ordered;
4294 char *kaddr;
4295 unsigned long zero_start;
4296 loff_t size;
4297 int ret;
4298 u64 page_start;
4299 u64 page_end;
4300
4301 ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0);
4302 if (ret)
4303 goto out;
4304
4305 ret = -EINVAL;
4306again:
4307 lock_page(page);
4308 size = i_size_read(inode);
4309 page_start = page_offset(page);
4310 page_end = page_start + PAGE_CACHE_SIZE - 1;
4311
4312 if ((page->mapping != inode->i_mapping) ||
4313 (page_start >= size)) {
4314 /* page got truncated out from underneath us */
4315 goto out_unlock;
4316 }
4317 wait_on_page_writeback(page);
4318
4319 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
4320 set_page_extent_mapped(page);
4321
4322 /*
4323 * we can't set the delalloc bits if there are pending ordered
4324 * extents. Drop our locks and wait for them to finish
4325 */
4326 ordered = btrfs_lookup_ordered_extent(inode, page_start);
4327 if (ordered) {
4328 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4329 unlock_page(page);
4330 btrfs_start_ordered_extent(inode, ordered, 1);
4331 btrfs_put_ordered_extent(ordered);
4332 goto again;
4333 }
4334
4335 btrfs_set_extent_delalloc(inode, page_start, page_end);
4336 ret = 0;
4337
4338 /* page is wholly or partially inside EOF */
4339 if (page_start + PAGE_CACHE_SIZE > size)
4340 zero_start = size & ~PAGE_CACHE_MASK;
4341 else
4342 zero_start = PAGE_CACHE_SIZE;
4343
4344 if (zero_start != PAGE_CACHE_SIZE) {
4345 kaddr = kmap(page);
4346 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
4347 flush_dcache_page(page);
4348 kunmap(page);
4349 }
4350 ClearPageChecked(page);
4351 set_page_dirty(page);
4352 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4353
4354out_unlock:
4355 unlock_page(page);
4356out:
4357 return ret;
4358}
4359
4360static void btrfs_truncate(struct inode *inode)
4361{
4362 struct btrfs_root *root = BTRFS_I(inode)->root;
4363 int ret;
4364 struct btrfs_trans_handle *trans;
4365 unsigned long nr;
4366 u64 mask = root->sectorsize - 1;
4367
4368 if (!S_ISREG(inode->i_mode))
4369 return;
4370 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4371 return;
4372
4373 btrfs_truncate_page(inode->i_mapping, inode->i_size);
4374 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
4375
4376 trans = btrfs_start_transaction(root, 1);
4377 btrfs_set_trans_block_group(trans, inode);
4378 btrfs_i_size_write(inode, inode->i_size);
4379
4380 ret = btrfs_orphan_add(trans, inode);
4381 if (ret)
4382 goto out;
4383 /* FIXME, add redo link to tree so we don't leak on crash */
4384 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
4385 BTRFS_EXTENT_DATA_KEY);
4386 btrfs_update_inode(trans, root, inode);
4387
4388 ret = btrfs_orphan_del(trans, inode);
4389 BUG_ON(ret);
4390
4391out:
4392 nr = trans->blocks_used;
4393 ret = btrfs_end_transaction_throttle(trans, root);
4394 BUG_ON(ret);
4395 btrfs_btree_balance_dirty(root, nr);
4396}
4397
4398/*
4399 * create a new subvolume directory/inode (helper for the ioctl).
4400 */
4401int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
4402 struct btrfs_root *new_root, struct dentry *dentry,
4403 u64 new_dirid, u64 alloc_hint)
4404{
4405 struct inode *inode;
4406 int error;
4407 u64 index = 0;
4408
4409 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
4410 new_dirid, alloc_hint, S_IFDIR | 0700, &index);
4411 if (IS_ERR(inode))
4412 return PTR_ERR(inode);
4413 inode->i_op = &btrfs_dir_inode_operations;
4414 inode->i_fop = &btrfs_dir_file_operations;
4415
4416 inode->i_nlink = 1;
4417 btrfs_i_size_write(inode, 0);
4418
4419 error = btrfs_update_inode(trans, new_root, inode);
4420 if (error)
4421 return error;
4422
4423 d_instantiate(dentry, inode);
4424 return 0;
4425}
4426
4427/* helper function for file defrag and space balancing. This
4428 * forces readahead on a given range of bytes in an inode
4429 */
4430unsigned long btrfs_force_ra(struct address_space *mapping,
4431 struct file_ra_state *ra, struct file *file,
4432 pgoff_t offset, pgoff_t last_index)
4433{
4434 pgoff_t req_size = last_index - offset + 1;
4435
4436 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
4437 return offset + req_size;
4438}
4439
4440struct inode *btrfs_alloc_inode(struct super_block *sb)
4441{
4442 struct btrfs_inode *ei;
4443
4444 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
4445 if (!ei)
4446 return NULL;
4447 ei->last_trans = 0;
4448 ei->logged_trans = 0;
4449 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
4450 ei->i_acl = BTRFS_ACL_NOT_CACHED;
4451 ei->i_default_acl = BTRFS_ACL_NOT_CACHED;
4452 INIT_LIST_HEAD(&ei->i_orphan);
4453 return &ei->vfs_inode;
4454}
4455
4456void btrfs_destroy_inode(struct inode *inode)
4457{
4458 struct btrfs_ordered_extent *ordered;
4459 WARN_ON(!list_empty(&inode->i_dentry));
4460 WARN_ON(inode->i_data.nrpages);
4461
4462 if (BTRFS_I(inode)->i_acl &&
4463 BTRFS_I(inode)->i_acl != BTRFS_ACL_NOT_CACHED)
4464 posix_acl_release(BTRFS_I(inode)->i_acl);
4465 if (BTRFS_I(inode)->i_default_acl &&
4466 BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED)
4467 posix_acl_release(BTRFS_I(inode)->i_default_acl);
4468
4469 spin_lock(&BTRFS_I(inode)->root->list_lock);
4470 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
4471 printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
4472 " list\n", inode->i_ino);
4473 dump_stack();
4474 }
4475 spin_unlock(&BTRFS_I(inode)->root->list_lock);
4476
4477 while (1) {
4478 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
4479 if (!ordered)
4480 break;
4481 else {
4482 printk(KERN_ERR "btrfs found ordered "
4483 "extent %llu %llu on inode cleanup\n",
4484 (unsigned long long)ordered->file_offset,
4485 (unsigned long long)ordered->len);
4486 btrfs_remove_ordered_extent(inode, ordered);
4487 btrfs_put_ordered_extent(ordered);
4488 btrfs_put_ordered_extent(ordered);
4489 }
4490 }
4491 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
4492 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
4493}
4494
4495static void init_once(void *foo)
4496{
4497 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
4498
4499 inode_init_once(&ei->vfs_inode);
4500}
4501
4502void btrfs_destroy_cachep(void)
4503{
4504 if (btrfs_inode_cachep)
4505 kmem_cache_destroy(btrfs_inode_cachep);
4506 if (btrfs_trans_handle_cachep)
4507 kmem_cache_destroy(btrfs_trans_handle_cachep);
4508 if (btrfs_transaction_cachep)
4509 kmem_cache_destroy(btrfs_transaction_cachep);
4510 if (btrfs_bit_radix_cachep)
4511 kmem_cache_destroy(btrfs_bit_radix_cachep);
4512 if (btrfs_path_cachep)
4513 kmem_cache_destroy(btrfs_path_cachep);
4514}
4515
4516struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
4517 unsigned long extra_flags,
4518 void (*ctor)(void *))
4519{
4520 return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
4521 SLAB_MEM_SPREAD | extra_flags), ctor);
4522}
4523
4524int btrfs_init_cachep(void)
4525{
4526 btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache",
4527 sizeof(struct btrfs_inode),
4528 0, init_once);
4529 if (!btrfs_inode_cachep)
4530 goto fail;
4531 btrfs_trans_handle_cachep =
4532 btrfs_cache_create("btrfs_trans_handle_cache",
4533 sizeof(struct btrfs_trans_handle),
4534 0, NULL);
4535 if (!btrfs_trans_handle_cachep)
4536 goto fail;
4537 btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache",
4538 sizeof(struct btrfs_transaction),
4539 0, NULL);
4540 if (!btrfs_transaction_cachep)
4541 goto fail;
4542 btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache",
4543 sizeof(struct btrfs_path),
4544 0, NULL);
4545 if (!btrfs_path_cachep)
4546 goto fail;
4547 btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256,
4548 SLAB_DESTROY_BY_RCU, NULL);
4549 if (!btrfs_bit_radix_cachep)
4550 goto fail;
4551 return 0;
4552fail:
4553 btrfs_destroy_cachep();
4554 return -ENOMEM;
4555}
4556
4557static int btrfs_getattr(struct vfsmount *mnt,
4558 struct dentry *dentry, struct kstat *stat)
4559{
4560 struct inode *inode = dentry->d_inode;
4561 generic_fillattr(inode, stat);
4562 stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
4563 stat->blksize = PAGE_CACHE_SIZE;
4564 stat->blocks = (inode_get_bytes(inode) +
4565 BTRFS_I(inode)->delalloc_bytes) >> 9;
4566 return 0;
4567}
4568
4569static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
4570 struct inode *new_dir, struct dentry *new_dentry)
4571{
4572 struct btrfs_trans_handle *trans;
4573 struct btrfs_root *root = BTRFS_I(old_dir)->root;
4574 struct inode *new_inode = new_dentry->d_inode;
4575 struct inode *old_inode = old_dentry->d_inode;
4576 struct timespec ctime = CURRENT_TIME;
4577 u64 index = 0;
4578 int ret;
4579
4580 /* we're not allowed to rename between subvolumes */
4581 if (BTRFS_I(old_inode)->root->root_key.objectid !=
4582 BTRFS_I(new_dir)->root->root_key.objectid)
4583 return -EXDEV;
4584
4585 if (S_ISDIR(old_inode->i_mode) && new_inode &&
4586 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
4587 return -ENOTEMPTY;
4588 }
4589
4590 /* to rename a snapshot or subvolume, we need to juggle the
4591 * backrefs. This isn't coded yet
4592 */
4593 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
4594 return -EXDEV;
4595
4596 ret = btrfs_check_free_space(root, 1, 0);
4597 if (ret)
4598 goto out_unlock;
4599
4600 trans = btrfs_start_transaction(root, 1);
4601
4602 btrfs_set_trans_block_group(trans, new_dir);
4603
4604 btrfs_inc_nlink(old_dentry->d_inode);
4605 old_dir->i_ctime = old_dir->i_mtime = ctime;
4606 new_dir->i_ctime = new_dir->i_mtime = ctime;
4607 old_inode->i_ctime = ctime;
4608
4609 ret = btrfs_unlink_inode(trans, root, old_dir, old_dentry->d_inode,
4610 old_dentry->d_name.name,
4611 old_dentry->d_name.len);
4612 if (ret)
4613 goto out_fail;
4614
4615 if (new_inode) {
4616 new_inode->i_ctime = CURRENT_TIME;
4617 ret = btrfs_unlink_inode(trans, root, new_dir,
4618 new_dentry->d_inode,
4619 new_dentry->d_name.name,
4620 new_dentry->d_name.len);
4621 if (ret)
4622 goto out_fail;
4623 if (new_inode->i_nlink == 0) {
4624 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
4625 if (ret)
4626 goto out_fail;
4627 }
4628
4629 }
4630 ret = btrfs_set_inode_index(new_dir, &index);
4631 if (ret)
4632 goto out_fail;
4633
4634 ret = btrfs_add_link(trans, new_dentry->d_parent->d_inode,
4635 old_inode, new_dentry->d_name.name,
4636 new_dentry->d_name.len, 1, index);
4637 if (ret)
4638 goto out_fail;
4639
4640out_fail:
4641 btrfs_end_transaction_throttle(trans, root);
4642out_unlock:
4643 return ret;
4644}
4645
4646/*
4647 * some fairly slow code that needs optimization. This walks the list
4648 * of all the inodes with pending delalloc and forces them to disk.
4649 */
4650int btrfs_start_delalloc_inodes(struct btrfs_root *root)
4651{
4652 struct list_head *head = &root->fs_info->delalloc_inodes;
4653 struct btrfs_inode *binode;
4654 struct inode *inode;
4655
4656 if (root->fs_info->sb->s_flags & MS_RDONLY)
4657 return -EROFS;
4658
4659 spin_lock(&root->fs_info->delalloc_lock);
4660 while (!list_empty(head)) {
4661 binode = list_entry(head->next, struct btrfs_inode,
4662 delalloc_inodes);
4663 inode = igrab(&binode->vfs_inode);
4664 if (!inode)
4665 list_del_init(&binode->delalloc_inodes);
4666 spin_unlock(&root->fs_info->delalloc_lock);
4667 if (inode) {
4668 filemap_flush(inode->i_mapping);
4669 iput(inode);
4670 }
4671 cond_resched();
4672 spin_lock(&root->fs_info->delalloc_lock);
4673 }
4674 spin_unlock(&root->fs_info->delalloc_lock);
4675
4676 /* the filemap_flush will queue IO into the worker threads, but
4677 * we have to make sure the IO is actually started and that
4678 * ordered extents get created before we return
4679 */
4680 atomic_inc(&root->fs_info->async_submit_draining);
4681 while (atomic_read(&root->fs_info->nr_async_submits) ||
4682 atomic_read(&root->fs_info->async_delalloc_pages)) {
4683 wait_event(root->fs_info->async_submit_wait,
4684 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
4685 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
4686 }
4687 atomic_dec(&root->fs_info->async_submit_draining);
4688 return 0;
4689}
4690
4691static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
4692 const char *symname)
4693{
4694 struct btrfs_trans_handle *trans;
4695 struct btrfs_root *root = BTRFS_I(dir)->root;
4696 struct btrfs_path *path;
4697 struct btrfs_key key;
4698 struct inode *inode = NULL;
4699 int err;
4700 int drop_inode = 0;
4701 u64 objectid;
4702 u64 index = 0 ;
4703 int name_len;
4704 int datasize;
4705 unsigned long ptr;
4706 struct btrfs_file_extent_item *ei;
4707 struct extent_buffer *leaf;
4708 unsigned long nr = 0;
4709
4710 name_len = strlen(symname) + 1;
4711 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
4712 return -ENAMETOOLONG;
4713
4714 err = btrfs_check_free_space(root, 1, 0);
4715 if (err)
4716 goto out_fail;
4717
4718 trans = btrfs_start_transaction(root, 1);
4719 btrfs_set_trans_block_group(trans, dir);
4720
4721 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4722 if (err) {
4723 err = -ENOSPC;
4724 goto out_unlock;
4725 }
4726
4727 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4728 dentry->d_name.len,
4729 dentry->d_parent->d_inode->i_ino, objectid,
4730 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
4731 &index);
4732 err = PTR_ERR(inode);
4733 if (IS_ERR(inode))
4734 goto out_unlock;
4735
4736 err = btrfs_init_acl(inode, dir);
4737 if (err) {
4738 drop_inode = 1;
4739 goto out_unlock;
4740 }
4741
4742 btrfs_set_trans_block_group(trans, inode);
4743 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4744 if (err)
4745 drop_inode = 1;
4746 else {
4747 inode->i_mapping->a_ops = &btrfs_aops;
4748 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4749 inode->i_fop = &btrfs_file_operations;
4750 inode->i_op = &btrfs_file_inode_operations;
4751 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4752 }
4753 dir->i_sb->s_dirt = 1;
4754 btrfs_update_inode_block_group(trans, inode);
4755 btrfs_update_inode_block_group(trans, dir);
4756 if (drop_inode)
4757 goto out_unlock;
4758
4759 path = btrfs_alloc_path();
4760 BUG_ON(!path);
4761 key.objectid = inode->i_ino;
4762 key.offset = 0;
4763 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
4764 datasize = btrfs_file_extent_calc_inline_size(name_len);
4765 err = btrfs_insert_empty_item(trans, root, path, &key,
4766 datasize);
4767 if (err) {
4768 drop_inode = 1;
4769 goto out_unlock;
4770 }
4771 leaf = path->nodes[0];
4772 ei = btrfs_item_ptr(leaf, path->slots[0],
4773 struct btrfs_file_extent_item);
4774 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
4775 btrfs_set_file_extent_type(leaf, ei,
4776 BTRFS_FILE_EXTENT_INLINE);
4777 btrfs_set_file_extent_encryption(leaf, ei, 0);
4778 btrfs_set_file_extent_compression(leaf, ei, 0);
4779 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
4780 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
4781
4782 ptr = btrfs_file_extent_inline_start(ei);
4783 write_extent_buffer(leaf, symname, ptr, name_len);
4784 btrfs_mark_buffer_dirty(leaf);
4785 btrfs_free_path(path);
4786
4787 inode->i_op = &btrfs_symlink_inode_operations;
4788 inode->i_mapping->a_ops = &btrfs_symlink_aops;
4789 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4790 inode_set_bytes(inode, name_len);
4791 btrfs_i_size_write(inode, name_len - 1);
4792 err = btrfs_update_inode(trans, root, inode);
4793 if (err)
4794 drop_inode = 1;
4795
4796out_unlock:
4797 nr = trans->blocks_used;
4798 btrfs_end_transaction_throttle(trans, root);
4799out_fail:
4800 if (drop_inode) {
4801 inode_dec_link_count(inode);
4802 iput(inode);
4803 }
4804 btrfs_btree_balance_dirty(root, nr);
4805 return err;
4806}
4807
4808static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
4809 u64 alloc_hint, int mode)
4810{
4811 struct btrfs_trans_handle *trans;
4812 struct btrfs_root *root = BTRFS_I(inode)->root;
4813 struct btrfs_key ins;
4814 u64 alloc_size;
4815 u64 cur_offset = start;
4816 u64 num_bytes = end - start;
4817 int ret = 0;
4818
4819 trans = btrfs_join_transaction(root, 1);
4820 BUG_ON(!trans);
4821 btrfs_set_trans_block_group(trans, inode);
4822
4823 while (num_bytes > 0) {
4824 alloc_size = min(num_bytes, root->fs_info->max_extent);
4825 ret = btrfs_reserve_extent(trans, root, alloc_size,
4826 root->sectorsize, 0, alloc_hint,
4827 (u64)-1, &ins, 1);
4828 if (ret) {
4829 WARN_ON(1);
4830 goto out;
4831 }
4832 ret = insert_reserved_file_extent(trans, inode,
4833 cur_offset, ins.objectid,
4834 ins.offset, ins.offset,
4835 ins.offset, 0, 0, 0,
4836 BTRFS_FILE_EXTENT_PREALLOC);
4837 BUG_ON(ret);
4838 num_bytes -= ins.offset;
4839 cur_offset += ins.offset;
4840 alloc_hint = ins.objectid + ins.offset;
4841 }
4842out:
4843 if (cur_offset > start) {
4844 inode->i_ctime = CURRENT_TIME;
4845 btrfs_set_flag(inode, PREALLOC);
4846 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4847 cur_offset > i_size_read(inode))
4848 btrfs_i_size_write(inode, cur_offset);
4849 ret = btrfs_update_inode(trans, root, inode);
4850 BUG_ON(ret);
4851 }
4852
4853 btrfs_end_transaction(trans, root);
4854 return ret;
4855}
4856
4857static long btrfs_fallocate(struct inode *inode, int mode,
4858 loff_t offset, loff_t len)
4859{
4860 u64 cur_offset;
4861 u64 last_byte;
4862 u64 alloc_start;
4863 u64 alloc_end;
4864 u64 alloc_hint = 0;
4865 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
4866 struct extent_map *em;
4867 int ret;
4868
4869 alloc_start = offset & ~mask;
4870 alloc_end = (offset + len + mask) & ~mask;
4871
4872 mutex_lock(&inode->i_mutex);
4873 if (alloc_start > inode->i_size) {
4874 ret = btrfs_cont_expand(inode, alloc_start);
4875 if (ret)
4876 goto out;
4877 }
4878
4879 while (1) {
4880 struct btrfs_ordered_extent *ordered;
4881 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start,
4882 alloc_end - 1, GFP_NOFS);
4883 ordered = btrfs_lookup_first_ordered_extent(inode,
4884 alloc_end - 1);
4885 if (ordered &&
4886 ordered->file_offset + ordered->len > alloc_start &&
4887 ordered->file_offset < alloc_end) {
4888 btrfs_put_ordered_extent(ordered);
4889 unlock_extent(&BTRFS_I(inode)->io_tree,
4890 alloc_start, alloc_end - 1, GFP_NOFS);
4891 btrfs_wait_ordered_range(inode, alloc_start,
4892 alloc_end - alloc_start);
4893 } else {
4894 if (ordered)
4895 btrfs_put_ordered_extent(ordered);
4896 break;
4897 }
4898 }
4899
4900 cur_offset = alloc_start;
4901 while (1) {
4902 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4903 alloc_end - cur_offset, 0);
4904 BUG_ON(IS_ERR(em) || !em);
4905 last_byte = min(extent_map_end(em), alloc_end);
4906 last_byte = (last_byte + mask) & ~mask;
4907 if (em->block_start == EXTENT_MAP_HOLE) {
4908 ret = prealloc_file_range(inode, cur_offset,
4909 last_byte, alloc_hint, mode);
4910 if (ret < 0) {
4911 free_extent_map(em);
4912 break;
4913 }
4914 }
4915 if (em->block_start <= EXTENT_MAP_LAST_BYTE)
4916 alloc_hint = em->block_start;
4917 free_extent_map(em);
4918
4919 cur_offset = last_byte;
4920 if (cur_offset >= alloc_end) {
4921 ret = 0;
4922 break;
4923 }
4924 }
4925 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, alloc_end - 1,
4926 GFP_NOFS);
4927out:
4928 mutex_unlock(&inode->i_mutex);
4929 return ret;
4930}
4931
4932static int btrfs_set_page_dirty(struct page *page)
4933{
4934 return __set_page_dirty_nobuffers(page);
4935}
4936
4937static int btrfs_permission(struct inode *inode, int mask)
4938{
4939 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
4940 return -EACCES;
4941 return generic_permission(inode, mask, btrfs_check_acl);
4942}
4943
4944static struct inode_operations btrfs_dir_inode_operations = {
4945 .getattr = btrfs_getattr,
4946 .lookup = btrfs_lookup,
4947 .create = btrfs_create,
4948 .unlink = btrfs_unlink,
4949 .link = btrfs_link,
4950 .mkdir = btrfs_mkdir,
4951 .rmdir = btrfs_rmdir,
4952 .rename = btrfs_rename,
4953 .symlink = btrfs_symlink,
4954 .setattr = btrfs_setattr,
4955 .mknod = btrfs_mknod,
4956 .setxattr = btrfs_setxattr,
4957 .getxattr = btrfs_getxattr,
4958 .listxattr = btrfs_listxattr,
4959 .removexattr = btrfs_removexattr,
4960 .permission = btrfs_permission,
4961};
4962static struct inode_operations btrfs_dir_ro_inode_operations = {
4963 .lookup = btrfs_lookup,
4964 .permission = btrfs_permission,
4965};
4966static struct file_operations btrfs_dir_file_operations = {
4967 .llseek = generic_file_llseek,
4968 .read = generic_read_dir,
4969 .readdir = btrfs_real_readdir,
4970 .unlocked_ioctl = btrfs_ioctl,
4971#ifdef CONFIG_COMPAT
4972 .compat_ioctl = btrfs_ioctl,
4973#endif
4974 .release = btrfs_release_file,
4975 .fsync = btrfs_sync_file,
4976};
4977
4978static struct extent_io_ops btrfs_extent_io_ops = {
4979 .fill_delalloc = run_delalloc_range,
4980 .submit_bio_hook = btrfs_submit_bio_hook,
4981 .merge_bio_hook = btrfs_merge_bio_hook,
4982 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
4983 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
4984 .writepage_start_hook = btrfs_writepage_start_hook,
4985 .readpage_io_failed_hook = btrfs_io_failed_hook,
4986 .set_bit_hook = btrfs_set_bit_hook,
4987 .clear_bit_hook = btrfs_clear_bit_hook,
4988};
4989
4990static struct address_space_operations btrfs_aops = {
4991 .readpage = btrfs_readpage,
4992 .writepage = btrfs_writepage,
4993 .writepages = btrfs_writepages,
4994 .readpages = btrfs_readpages,
4995 .sync_page = block_sync_page,
4996 .bmap = btrfs_bmap,
4997 .direct_IO = btrfs_direct_IO,
4998 .invalidatepage = btrfs_invalidatepage,
4999 .releasepage = btrfs_releasepage,
5000 .set_page_dirty = btrfs_set_page_dirty,
5001};
5002
5003static struct address_space_operations btrfs_symlink_aops = {
5004 .readpage = btrfs_readpage,
5005 .writepage = btrfs_writepage,
5006 .invalidatepage = btrfs_invalidatepage,
5007 .releasepage = btrfs_releasepage,
5008};
5009
5010static struct inode_operations btrfs_file_inode_operations = {
5011 .truncate = btrfs_truncate,
5012 .getattr = btrfs_getattr,
5013 .setattr = btrfs_setattr,
5014 .setxattr = btrfs_setxattr,
5015 .getxattr = btrfs_getxattr,
5016 .listxattr = btrfs_listxattr,
5017 .removexattr = btrfs_removexattr,
5018 .permission = btrfs_permission,
5019 .fallocate = btrfs_fallocate,
5020};
5021static struct inode_operations btrfs_special_inode_operations = {
5022 .getattr = btrfs_getattr,
5023 .setattr = btrfs_setattr,
5024 .permission = btrfs_permission,
5025 .setxattr = btrfs_setxattr,
5026 .getxattr = btrfs_getxattr,
5027 .listxattr = btrfs_listxattr,
5028 .removexattr = btrfs_removexattr,
5029};
5030static struct inode_operations btrfs_symlink_inode_operations = {
5031 .readlink = generic_readlink,
5032 .follow_link = page_follow_link_light,
5033 .put_link = page_put_link,
5034 .permission = btrfs_permission,
5035};
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
new file mode 100644
index 000000000000..c2aa33e3feb5
--- /dev/null
+++ b/fs/btrfs/ioctl.c
@@ -0,0 +1,1132 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/kernel.h>
20#include <linux/bio.h>
21#include <linux/buffer_head.h>
22#include <linux/file.h>
23#include <linux/fs.h>
24#include <linux/fsnotify.h>
25#include <linux/pagemap.h>
26#include <linux/highmem.h>
27#include <linux/time.h>
28#include <linux/init.h>
29#include <linux/string.h>
30#include <linux/smp_lock.h>
31#include <linux/backing-dev.h>
32#include <linux/mount.h>
33#include <linux/mpage.h>
34#include <linux/namei.h>
35#include <linux/swap.h>
36#include <linux/writeback.h>
37#include <linux/statfs.h>
38#include <linux/compat.h>
39#include <linux/bit_spinlock.h>
40#include <linux/security.h>
41#include <linux/version.h>
42#include <linux/xattr.h>
43#include <linux/vmalloc.h>
44#include "compat.h"
45#include "ctree.h"
46#include "disk-io.h"
47#include "transaction.h"
48#include "btrfs_inode.h"
49#include "ioctl.h"
50#include "print-tree.h"
51#include "volumes.h"
52#include "locking.h"
53
54
55
56static noinline int create_subvol(struct btrfs_root *root,
57 struct dentry *dentry,
58 char *name, int namelen)
59{
60 struct btrfs_trans_handle *trans;
61 struct btrfs_key key;
62 struct btrfs_root_item root_item;
63 struct btrfs_inode_item *inode_item;
64 struct extent_buffer *leaf;
65 struct btrfs_root *new_root = root;
66 struct inode *dir;
67 int ret;
68 int err;
69 u64 objectid;
70 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
71 u64 index = 0;
72 unsigned long nr = 1;
73
74 ret = btrfs_check_free_space(root, 1, 0);
75 if (ret)
76 goto fail_commit;
77
78 trans = btrfs_start_transaction(root, 1);
79 BUG_ON(!trans);
80
81 ret = btrfs_find_free_objectid(trans, root->fs_info->tree_root,
82 0, &objectid);
83 if (ret)
84 goto fail;
85
86 leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
87 objectid, trans->transid, 0, 0, 0);
88 if (IS_ERR(leaf)) {
89 ret = PTR_ERR(leaf);
90 goto fail;
91 }
92
93 btrfs_set_header_nritems(leaf, 0);
94 btrfs_set_header_level(leaf, 0);
95 btrfs_set_header_bytenr(leaf, leaf->start);
96 btrfs_set_header_generation(leaf, trans->transid);
97 btrfs_set_header_owner(leaf, objectid);
98
99 write_extent_buffer(leaf, root->fs_info->fsid,
100 (unsigned long)btrfs_header_fsid(leaf),
101 BTRFS_FSID_SIZE);
102 btrfs_mark_buffer_dirty(leaf);
103
104 inode_item = &root_item.inode;
105 memset(inode_item, 0, sizeof(*inode_item));
106 inode_item->generation = cpu_to_le64(1);
107 inode_item->size = cpu_to_le64(3);
108 inode_item->nlink = cpu_to_le32(1);
109 inode_item->nbytes = cpu_to_le64(root->leafsize);
110 inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
111
112 btrfs_set_root_bytenr(&root_item, leaf->start);
113 btrfs_set_root_generation(&root_item, trans->transid);
114 btrfs_set_root_level(&root_item, 0);
115 btrfs_set_root_refs(&root_item, 1);
116 btrfs_set_root_used(&root_item, 0);
117 btrfs_set_root_last_snapshot(&root_item, 0);
118
119 memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress));
120 root_item.drop_level = 0;
121
122 btrfs_tree_unlock(leaf);
123 free_extent_buffer(leaf);
124 leaf = NULL;
125
126 btrfs_set_root_dirid(&root_item, new_dirid);
127
128 key.objectid = objectid;
129 key.offset = 1;
130 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
131 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
132 &root_item);
133 if (ret)
134 goto fail;
135
136 /*
137 * insert the directory item
138 */
139 key.offset = (u64)-1;
140 dir = dentry->d_parent->d_inode;
141 ret = btrfs_set_inode_index(dir, &index);
142 BUG_ON(ret);
143
144 ret = btrfs_insert_dir_item(trans, root,
145 name, namelen, dir->i_ino, &key,
146 BTRFS_FT_DIR, index);
147 if (ret)
148 goto fail;
149
150 btrfs_i_size_write(dir, dir->i_size + namelen * 2);
151 ret = btrfs_update_inode(trans, root, dir);
152 BUG_ON(ret);
153
154 /* add the backref first */
155 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
156 objectid, BTRFS_ROOT_BACKREF_KEY,
157 root->root_key.objectid,
158 dir->i_ino, index, name, namelen);
159
160 BUG_ON(ret);
161
162 /* now add the forward ref */
163 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
164 root->root_key.objectid, BTRFS_ROOT_REF_KEY,
165 objectid,
166 dir->i_ino, index, name, namelen);
167
168 BUG_ON(ret);
169
170 ret = btrfs_commit_transaction(trans, root);
171 if (ret)
172 goto fail_commit;
173
174 new_root = btrfs_read_fs_root_no_name(root->fs_info, &key);
175 BUG_ON(!new_root);
176
177 trans = btrfs_start_transaction(new_root, 1);
178 BUG_ON(!trans);
179
180 ret = btrfs_create_subvol_root(trans, new_root, dentry, new_dirid,
181 BTRFS_I(dir)->block_group);
182 if (ret)
183 goto fail;
184
185fail:
186 nr = trans->blocks_used;
187 err = btrfs_commit_transaction(trans, new_root);
188 if (err && !ret)
189 ret = err;
190fail_commit:
191 btrfs_btree_balance_dirty(root, nr);
192 return ret;
193}
194
195static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
196 char *name, int namelen)
197{
198 struct btrfs_pending_snapshot *pending_snapshot;
199 struct btrfs_trans_handle *trans;
200 int ret = 0;
201 int err;
202 unsigned long nr = 0;
203
204 if (!root->ref_cows)
205 return -EINVAL;
206
207 ret = btrfs_check_free_space(root, 1, 0);
208 if (ret)
209 goto fail_unlock;
210
211 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
212 if (!pending_snapshot) {
213 ret = -ENOMEM;
214 goto fail_unlock;
215 }
216 pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS);
217 if (!pending_snapshot->name) {
218 ret = -ENOMEM;
219 kfree(pending_snapshot);
220 goto fail_unlock;
221 }
222 memcpy(pending_snapshot->name, name, namelen);
223 pending_snapshot->name[namelen] = '\0';
224 pending_snapshot->dentry = dentry;
225 trans = btrfs_start_transaction(root, 1);
226 BUG_ON(!trans);
227 pending_snapshot->root = root;
228 list_add(&pending_snapshot->list,
229 &trans->transaction->pending_snapshots);
230 err = btrfs_commit_transaction(trans, root);
231
232fail_unlock:
233 btrfs_btree_balance_dirty(root, nr);
234 return ret;
235}
236
237/* copy of may_create in fs/namei.c() */
238static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
239{
240 if (child->d_inode)
241 return -EEXIST;
242 if (IS_DEADDIR(dir))
243 return -ENOENT;
244 return inode_permission(dir, MAY_WRITE | MAY_EXEC);
245}
246
247/*
248 * Create a new subvolume below @parent. This is largely modeled after
249 * sys_mkdirat and vfs_mkdir, but we only do a single component lookup
250 * inside this filesystem so it's quite a bit simpler.
251 */
252static noinline int btrfs_mksubvol(struct path *parent, char *name,
253 int mode, int namelen,
254 struct btrfs_root *snap_src)
255{
256 struct dentry *dentry;
257 int error;
258
259 mutex_lock_nested(&parent->dentry->d_inode->i_mutex, I_MUTEX_PARENT);
260
261 dentry = lookup_one_len(name, parent->dentry, namelen);
262 error = PTR_ERR(dentry);
263 if (IS_ERR(dentry))
264 goto out_unlock;
265
266 error = -EEXIST;
267 if (dentry->d_inode)
268 goto out_dput;
269
270 if (!IS_POSIXACL(parent->dentry->d_inode))
271 mode &= ~current->fs->umask;
272
273 error = mnt_want_write(parent->mnt);
274 if (error)
275 goto out_dput;
276
277 error = btrfs_may_create(parent->dentry->d_inode, dentry);
278 if (error)
279 goto out_drop_write;
280
281 /*
282 * Actually perform the low-level subvolume creation after all
283 * this VFS fuzz.
284 *
285 * Eventually we want to pass in an inode under which we create this
286 * subvolume, but for now all are under the filesystem root.
287 *
288 * Also we should pass on the mode eventually to allow creating new
289 * subvolume with specific mode bits.
290 */
291 if (snap_src) {
292 struct dentry *dir = dentry->d_parent;
293 struct dentry *test = dir->d_parent;
294 struct btrfs_path *path = btrfs_alloc_path();
295 int ret;
296 u64 test_oid;
297 u64 parent_oid = BTRFS_I(dir->d_inode)->root->root_key.objectid;
298
299 test_oid = snap_src->root_key.objectid;
300
301 ret = btrfs_find_root_ref(snap_src->fs_info->tree_root,
302 path, parent_oid, test_oid);
303 if (ret == 0)
304 goto create;
305 btrfs_release_path(snap_src->fs_info->tree_root, path);
306
307 /* we need to make sure we aren't creating a directory loop
308 * by taking a snapshot of something that has our current
309 * subvol in its directory tree. So, this loops through
310 * the dentries and checks the forward refs for each subvolume
311 * to see if is references the subvolume where we are
312 * placing this new snapshot.
313 */
314 while (1) {
315 if (!test ||
316 dir == snap_src->fs_info->sb->s_root ||
317 test == snap_src->fs_info->sb->s_root ||
318 test->d_inode->i_sb != snap_src->fs_info->sb) {
319 break;
320 }
321 if (S_ISLNK(test->d_inode->i_mode)) {
322 printk(KERN_INFO "Btrfs symlink in snapshot "
323 "path, failed\n");
324 error = -EMLINK;
325 btrfs_free_path(path);
326 goto out_drop_write;
327 }
328 test_oid =
329 BTRFS_I(test->d_inode)->root->root_key.objectid;
330 ret = btrfs_find_root_ref(snap_src->fs_info->tree_root,
331 path, test_oid, parent_oid);
332 if (ret == 0) {
333 printk(KERN_INFO "Btrfs snapshot creation "
334 "failed, looping\n");
335 error = -EMLINK;
336 btrfs_free_path(path);
337 goto out_drop_write;
338 }
339 btrfs_release_path(snap_src->fs_info->tree_root, path);
340 test = test->d_parent;
341 }
342create:
343 btrfs_free_path(path);
344 error = create_snapshot(snap_src, dentry, name, namelen);
345 } else {
346 error = create_subvol(BTRFS_I(parent->dentry->d_inode)->root,
347 dentry, name, namelen);
348 }
349 if (error)
350 goto out_drop_write;
351
352 fsnotify_mkdir(parent->dentry->d_inode, dentry);
353out_drop_write:
354 mnt_drop_write(parent->mnt);
355out_dput:
356 dput(dentry);
357out_unlock:
358 mutex_unlock(&parent->dentry->d_inode->i_mutex);
359 return error;
360}
361
362
363static int btrfs_defrag_file(struct file *file)
364{
365 struct inode *inode = fdentry(file)->d_inode;
366 struct btrfs_root *root = BTRFS_I(inode)->root;
367 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
368 struct btrfs_ordered_extent *ordered;
369 struct page *page;
370 unsigned long last_index;
371 unsigned long ra_pages = root->fs_info->bdi.ra_pages;
372 unsigned long total_read = 0;
373 u64 page_start;
374 u64 page_end;
375 unsigned long i;
376 int ret;
377
378 ret = btrfs_check_free_space(root, inode->i_size, 0);
379 if (ret)
380 return -ENOSPC;
381
382 mutex_lock(&inode->i_mutex);
383 last_index = inode->i_size >> PAGE_CACHE_SHIFT;
384 for (i = 0; i <= last_index; i++) {
385 if (total_read % ra_pages == 0) {
386 btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i,
387 min(last_index, i + ra_pages - 1));
388 }
389 total_read++;
390again:
391 page = grab_cache_page(inode->i_mapping, i);
392 if (!page)
393 goto out_unlock;
394 if (!PageUptodate(page)) {
395 btrfs_readpage(NULL, page);
396 lock_page(page);
397 if (!PageUptodate(page)) {
398 unlock_page(page);
399 page_cache_release(page);
400 goto out_unlock;
401 }
402 }
403
404 wait_on_page_writeback(page);
405
406 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
407 page_end = page_start + PAGE_CACHE_SIZE - 1;
408 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
409
410 ordered = btrfs_lookup_ordered_extent(inode, page_start);
411 if (ordered) {
412 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
413 unlock_page(page);
414 page_cache_release(page);
415 btrfs_start_ordered_extent(inode, ordered, 1);
416 btrfs_put_ordered_extent(ordered);
417 goto again;
418 }
419 set_page_extent_mapped(page);
420
421 /*
422 * this makes sure page_mkwrite is called on the
423 * page if it is dirtied again later
424 */
425 clear_page_dirty_for_io(page);
426
427 btrfs_set_extent_delalloc(inode, page_start, page_end);
428
429 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
430 set_page_dirty(page);
431 unlock_page(page);
432 page_cache_release(page);
433 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1);
434 }
435
436out_unlock:
437 mutex_unlock(&inode->i_mutex);
438 return 0;
439}
440
441/*
442 * Called inside transaction, so use GFP_NOFS
443 */
444
445static int btrfs_ioctl_resize(struct btrfs_root *root, void __user *arg)
446{
447 u64 new_size;
448 u64 old_size;
449 u64 devid = 1;
450 struct btrfs_ioctl_vol_args *vol_args;
451 struct btrfs_trans_handle *trans;
452 struct btrfs_device *device = NULL;
453 char *sizestr;
454 char *devstr = NULL;
455 int ret = 0;
456 int namelen;
457 int mod = 0;
458
459 if (root->fs_info->sb->s_flags & MS_RDONLY)
460 return -EROFS;
461
462 if (!capable(CAP_SYS_ADMIN))
463 return -EPERM;
464
465 vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
466
467 if (!vol_args)
468 return -ENOMEM;
469
470 if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
471 ret = -EFAULT;
472 goto out;
473 }
474
475 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
476 namelen = strlen(vol_args->name);
477
478 mutex_lock(&root->fs_info->volume_mutex);
479 sizestr = vol_args->name;
480 devstr = strchr(sizestr, ':');
481 if (devstr) {
482 char *end;
483 sizestr = devstr + 1;
484 *devstr = '\0';
485 devstr = vol_args->name;
486 devid = simple_strtoull(devstr, &end, 10);
487 printk(KERN_INFO "resizing devid %llu\n", devid);
488 }
489 device = btrfs_find_device(root, devid, NULL, NULL);
490 if (!device) {
491 printk(KERN_INFO "resizer unable to find device %llu\n", devid);
492 ret = -EINVAL;
493 goto out_unlock;
494 }
495 if (!strcmp(sizestr, "max"))
496 new_size = device->bdev->bd_inode->i_size;
497 else {
498 if (sizestr[0] == '-') {
499 mod = -1;
500 sizestr++;
501 } else if (sizestr[0] == '+') {
502 mod = 1;
503 sizestr++;
504 }
505 new_size = btrfs_parse_size(sizestr);
506 if (new_size == 0) {
507 ret = -EINVAL;
508 goto out_unlock;
509 }
510 }
511
512 old_size = device->total_bytes;
513
514 if (mod < 0) {
515 if (new_size > old_size) {
516 ret = -EINVAL;
517 goto out_unlock;
518 }
519 new_size = old_size - new_size;
520 } else if (mod > 0) {
521 new_size = old_size + new_size;
522 }
523
524 if (new_size < 256 * 1024 * 1024) {
525 ret = -EINVAL;
526 goto out_unlock;
527 }
528 if (new_size > device->bdev->bd_inode->i_size) {
529 ret = -EFBIG;
530 goto out_unlock;
531 }
532
533 do_div(new_size, root->sectorsize);
534 new_size *= root->sectorsize;
535
536 printk(KERN_INFO "new size for %s is %llu\n",
537 device->name, (unsigned long long)new_size);
538
539 if (new_size > old_size) {
540 trans = btrfs_start_transaction(root, 1);
541 ret = btrfs_grow_device(trans, device, new_size);
542 btrfs_commit_transaction(trans, root);
543 } else {
544 ret = btrfs_shrink_device(device, new_size);
545 }
546
547out_unlock:
548 mutex_unlock(&root->fs_info->volume_mutex);
549out:
550 kfree(vol_args);
551 return ret;
552}
553
554static noinline int btrfs_ioctl_snap_create(struct file *file,
555 void __user *arg, int subvol)
556{
557 struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
558 struct btrfs_ioctl_vol_args *vol_args;
559 struct btrfs_dir_item *di;
560 struct btrfs_path *path;
561 struct file *src_file;
562 u64 root_dirid;
563 int namelen;
564 int ret = 0;
565
566 if (root->fs_info->sb->s_flags & MS_RDONLY)
567 return -EROFS;
568
569 vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
570
571 if (!vol_args)
572 return -ENOMEM;
573
574 if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
575 ret = -EFAULT;
576 goto out;
577 }
578
579 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
580 namelen = strlen(vol_args->name);
581 if (strchr(vol_args->name, '/')) {
582 ret = -EINVAL;
583 goto out;
584 }
585
586 path = btrfs_alloc_path();
587 if (!path) {
588 ret = -ENOMEM;
589 goto out;
590 }
591
592 root_dirid = root->fs_info->sb->s_root->d_inode->i_ino,
593 di = btrfs_lookup_dir_item(NULL, root->fs_info->tree_root,
594 path, root_dirid,
595 vol_args->name, namelen, 0);
596 btrfs_free_path(path);
597
598 if (di && !IS_ERR(di)) {
599 ret = -EEXIST;
600 goto out;
601 }
602
603 if (IS_ERR(di)) {
604 ret = PTR_ERR(di);
605 goto out;
606 }
607
608 if (subvol) {
609 ret = btrfs_mksubvol(&file->f_path, vol_args->name,
610 file->f_path.dentry->d_inode->i_mode,
611 namelen, NULL);
612 } else {
613 struct inode *src_inode;
614 src_file = fget(vol_args->fd);
615 if (!src_file) {
616 ret = -EINVAL;
617 goto out;
618 }
619
620 src_inode = src_file->f_path.dentry->d_inode;
621 if (src_inode->i_sb != file->f_path.dentry->d_inode->i_sb) {
622 printk(KERN_INFO "btrfs: Snapshot src from "
623 "another FS\n");
624 ret = -EINVAL;
625 fput(src_file);
626 goto out;
627 }
628 ret = btrfs_mksubvol(&file->f_path, vol_args->name,
629 file->f_path.dentry->d_inode->i_mode,
630 namelen, BTRFS_I(src_inode)->root);
631 fput(src_file);
632 }
633
634out:
635 kfree(vol_args);
636 return ret;
637}
638
639static int btrfs_ioctl_defrag(struct file *file)
640{
641 struct inode *inode = fdentry(file)->d_inode;
642 struct btrfs_root *root = BTRFS_I(inode)->root;
643 int ret;
644
645 ret = mnt_want_write(file->f_path.mnt);
646 if (ret)
647 return ret;
648
649 switch (inode->i_mode & S_IFMT) {
650 case S_IFDIR:
651 if (!capable(CAP_SYS_ADMIN)) {
652 ret = -EPERM;
653 goto out;
654 }
655 btrfs_defrag_root(root, 0);
656 btrfs_defrag_root(root->fs_info->extent_root, 0);
657 break;
658 case S_IFREG:
659 if (!(file->f_mode & FMODE_WRITE)) {
660 ret = -EINVAL;
661 goto out;
662 }
663 btrfs_defrag_file(file);
664 break;
665 }
666out:
667 mnt_drop_write(file->f_path.mnt);
668 return ret;
669}
670
671static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
672{
673 struct btrfs_ioctl_vol_args *vol_args;
674 int ret;
675
676 if (!capable(CAP_SYS_ADMIN))
677 return -EPERM;
678
679 vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
680
681 if (!vol_args)
682 return -ENOMEM;
683
684 if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
685 ret = -EFAULT;
686 goto out;
687 }
688 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
689 ret = btrfs_init_new_device(root, vol_args->name);
690
691out:
692 kfree(vol_args);
693 return ret;
694}
695
696static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
697{
698 struct btrfs_ioctl_vol_args *vol_args;
699 int ret;
700
701 if (!capable(CAP_SYS_ADMIN))
702 return -EPERM;
703
704 if (root->fs_info->sb->s_flags & MS_RDONLY)
705 return -EROFS;
706
707 vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
708
709 if (!vol_args)
710 return -ENOMEM;
711
712 if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
713 ret = -EFAULT;
714 goto out;
715 }
716 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
717 ret = btrfs_rm_device(root, vol_args->name);
718
719out:
720 kfree(vol_args);
721 return ret;
722}
723
724static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
725 u64 off, u64 olen, u64 destoff)
726{
727 struct inode *inode = fdentry(file)->d_inode;
728 struct btrfs_root *root = BTRFS_I(inode)->root;
729 struct file *src_file;
730 struct inode *src;
731 struct btrfs_trans_handle *trans;
732 struct btrfs_path *path;
733 struct extent_buffer *leaf;
734 char *buf;
735 struct btrfs_key key;
736 u32 nritems;
737 int slot;
738 int ret;
739 u64 len = olen;
740 u64 bs = root->fs_info->sb->s_blocksize;
741 u64 hint_byte;
742
743 /*
744 * TODO:
745 * - split compressed inline extents. annoying: we need to
746 * decompress into destination's address_space (the file offset
747 * may change, so source mapping won't do), then recompress (or
748 * otherwise reinsert) a subrange.
749 * - allow ranges within the same file to be cloned (provided
750 * they don't overlap)?
751 */
752
753 /* the destination must be opened for writing */
754 if (!(file->f_mode & FMODE_WRITE))
755 return -EINVAL;
756
757 ret = mnt_want_write(file->f_path.mnt);
758 if (ret)
759 return ret;
760
761 src_file = fget(srcfd);
762 if (!src_file) {
763 ret = -EBADF;
764 goto out_drop_write;
765 }
766 src = src_file->f_dentry->d_inode;
767
768 ret = -EINVAL;
769 if (src == inode)
770 goto out_fput;
771
772 ret = -EISDIR;
773 if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
774 goto out_fput;
775
776 ret = -EXDEV;
777 if (src->i_sb != inode->i_sb || BTRFS_I(src)->root != root)
778 goto out_fput;
779
780 ret = -ENOMEM;
781 buf = vmalloc(btrfs_level_size(root, 0));
782 if (!buf)
783 goto out_fput;
784
785 path = btrfs_alloc_path();
786 if (!path) {
787 vfree(buf);
788 goto out_fput;
789 }
790 path->reada = 2;
791
792 if (inode < src) {
793 mutex_lock(&inode->i_mutex);
794 mutex_lock(&src->i_mutex);
795 } else {
796 mutex_lock(&src->i_mutex);
797 mutex_lock(&inode->i_mutex);
798 }
799
800 /* determine range to clone */
801 ret = -EINVAL;
802 if (off >= src->i_size || off + len > src->i_size)
803 goto out_unlock;
804 if (len == 0)
805 olen = len = src->i_size - off;
806 /* if we extend to eof, continue to block boundary */
807 if (off + len == src->i_size)
808 len = ((src->i_size + bs-1) & ~(bs-1))
809 - off;
810
811 /* verify the end result is block aligned */
812 if ((off & (bs-1)) ||
813 ((off + len) & (bs-1)))
814 goto out_unlock;
815
816 /* do any pending delalloc/csum calc on src, one way or
817 another, and lock file content */
818 while (1) {
819 struct btrfs_ordered_extent *ordered;
820 lock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
821 ordered = btrfs_lookup_first_ordered_extent(inode, off+len);
822 if (BTRFS_I(src)->delalloc_bytes == 0 && !ordered)
823 break;
824 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
825 if (ordered)
826 btrfs_put_ordered_extent(ordered);
827 btrfs_wait_ordered_range(src, off, off+len);
828 }
829
830 trans = btrfs_start_transaction(root, 1);
831 BUG_ON(!trans);
832
833 /* punch hole in destination first */
834 btrfs_drop_extents(trans, root, inode, off, off+len, 0, &hint_byte);
835
836 /* clone data */
837 key.objectid = src->i_ino;
838 key.type = BTRFS_EXTENT_DATA_KEY;
839 key.offset = 0;
840
841 while (1) {
842 /*
843 * note the key will change type as we walk through the
844 * tree.
845 */
846 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
847 if (ret < 0)
848 goto out;
849
850 nritems = btrfs_header_nritems(path->nodes[0]);
851 if (path->slots[0] >= nritems) {
852 ret = btrfs_next_leaf(root, path);
853 if (ret < 0)
854 goto out;
855 if (ret > 0)
856 break;
857 nritems = btrfs_header_nritems(path->nodes[0]);
858 }
859 leaf = path->nodes[0];
860 slot = path->slots[0];
861
862 btrfs_item_key_to_cpu(leaf, &key, slot);
863 if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
864 key.objectid != src->i_ino)
865 break;
866
867 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
868 struct btrfs_file_extent_item *extent;
869 int type;
870 u32 size;
871 struct btrfs_key new_key;
872 u64 disko = 0, diskl = 0;
873 u64 datao = 0, datal = 0;
874 u8 comp;
875
876 size = btrfs_item_size_nr(leaf, slot);
877 read_extent_buffer(leaf, buf,
878 btrfs_item_ptr_offset(leaf, slot),
879 size);
880
881 extent = btrfs_item_ptr(leaf, slot,
882 struct btrfs_file_extent_item);
883 comp = btrfs_file_extent_compression(leaf, extent);
884 type = btrfs_file_extent_type(leaf, extent);
885 if (type == BTRFS_FILE_EXTENT_REG) {
886 disko = btrfs_file_extent_disk_bytenr(leaf,
887 extent);
888 diskl = btrfs_file_extent_disk_num_bytes(leaf,
889 extent);
890 datao = btrfs_file_extent_offset(leaf, extent);
891 datal = btrfs_file_extent_num_bytes(leaf,
892 extent);
893 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
894 /* take upper bound, may be compressed */
895 datal = btrfs_file_extent_ram_bytes(leaf,
896 extent);
897 }
898 btrfs_release_path(root, path);
899
900 if (key.offset + datal < off ||
901 key.offset >= off+len)
902 goto next;
903
904 memcpy(&new_key, &key, sizeof(new_key));
905 new_key.objectid = inode->i_ino;
906 new_key.offset = key.offset + destoff - off;
907
908 if (type == BTRFS_FILE_EXTENT_REG) {
909 ret = btrfs_insert_empty_item(trans, root, path,
910 &new_key, size);
911 if (ret)
912 goto out;
913
914 leaf = path->nodes[0];
915 slot = path->slots[0];
916 write_extent_buffer(leaf, buf,
917 btrfs_item_ptr_offset(leaf, slot),
918 size);
919
920 extent = btrfs_item_ptr(leaf, slot,
921 struct btrfs_file_extent_item);
922
923 if (off > key.offset) {
924 datao += off - key.offset;
925 datal -= off - key.offset;
926 }
927 if (key.offset + datao + datal + key.offset >
928 off + len)
929 datal = off + len - key.offset - datao;
930 /* disko == 0 means it's a hole */
931 if (!disko)
932 datao = 0;
933
934 btrfs_set_file_extent_offset(leaf, extent,
935 datao);
936 btrfs_set_file_extent_num_bytes(leaf, extent,
937 datal);
938 if (disko) {
939 inode_add_bytes(inode, datal);
940 ret = btrfs_inc_extent_ref(trans, root,
941 disko, diskl, leaf->start,
942 root->root_key.objectid,
943 trans->transid,
944 inode->i_ino);
945 BUG_ON(ret);
946 }
947 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
948 u64 skip = 0;
949 u64 trim = 0;
950 if (off > key.offset) {
951 skip = off - key.offset;
952 new_key.offset += skip;
953 }
954
955 if (key.offset + datal > off+len)
956 trim = key.offset + datal - (off+len);
957
958 if (comp && (skip || trim)) {
959 ret = -EINVAL;
960 goto out;
961 }
962 size -= skip + trim;
963 datal -= skip + trim;
964 ret = btrfs_insert_empty_item(trans, root, path,
965 &new_key, size);
966 if (ret)
967 goto out;
968
969 if (skip) {
970 u32 start =
971 btrfs_file_extent_calc_inline_size(0);
972 memmove(buf+start, buf+start+skip,
973 datal);
974 }
975
976 leaf = path->nodes[0];
977 slot = path->slots[0];
978 write_extent_buffer(leaf, buf,
979 btrfs_item_ptr_offset(leaf, slot),
980 size);
981 inode_add_bytes(inode, datal);
982 }
983
984 btrfs_mark_buffer_dirty(leaf);
985 }
986
987next:
988 btrfs_release_path(root, path);
989 key.offset++;
990 }
991 ret = 0;
992out:
993 btrfs_release_path(root, path);
994 if (ret == 0) {
995 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
996 if (destoff + olen > inode->i_size)
997 btrfs_i_size_write(inode, destoff + olen);
998 BTRFS_I(inode)->flags = BTRFS_I(src)->flags;
999 ret = btrfs_update_inode(trans, root, inode);
1000 }
1001 btrfs_end_transaction(trans, root);
1002 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
1003 if (ret)
1004 vmtruncate(inode, 0);
1005out_unlock:
1006 mutex_unlock(&src->i_mutex);
1007 mutex_unlock(&inode->i_mutex);
1008 vfree(buf);
1009 btrfs_free_path(path);
1010out_fput:
1011 fput(src_file);
1012out_drop_write:
1013 mnt_drop_write(file->f_path.mnt);
1014 return ret;
1015}
1016
1017static long btrfs_ioctl_clone_range(struct file *file, void __user *argp)
1018{
1019 struct btrfs_ioctl_clone_range_args args;
1020
1021 if (copy_from_user(&args, argp, sizeof(args)))
1022 return -EFAULT;
1023 return btrfs_ioctl_clone(file, args.src_fd, args.src_offset,
1024 args.src_length, args.dest_offset);
1025}
1026
1027/*
1028 * there are many ways the trans_start and trans_end ioctls can lead
1029 * to deadlocks. They should only be used by applications that
1030 * basically own the machine, and have a very in depth understanding
1031 * of all the possible deadlocks and enospc problems.
1032 */
1033static long btrfs_ioctl_trans_start(struct file *file)
1034{
1035 struct inode *inode = fdentry(file)->d_inode;
1036 struct btrfs_root *root = BTRFS_I(inode)->root;
1037 struct btrfs_trans_handle *trans;
1038 int ret = 0;
1039
1040 if (!capable(CAP_SYS_ADMIN))
1041 return -EPERM;
1042
1043 if (file->private_data) {
1044 ret = -EINPROGRESS;
1045 goto out;
1046 }
1047
1048 ret = mnt_want_write(file->f_path.mnt);
1049 if (ret)
1050 goto out;
1051
1052 mutex_lock(&root->fs_info->trans_mutex);
1053 root->fs_info->open_ioctl_trans++;
1054 mutex_unlock(&root->fs_info->trans_mutex);
1055
1056 trans = btrfs_start_ioctl_transaction(root, 0);
1057 if (trans)
1058 file->private_data = trans;
1059 else
1060 ret = -ENOMEM;
1061 /*printk(KERN_INFO "btrfs_ioctl_trans_start on %p\n", file);*/
1062out:
1063 return ret;
1064}
1065
1066/*
1067 * there are many ways the trans_start and trans_end ioctls can lead
1068 * to deadlocks. They should only be used by applications that
1069 * basically own the machine, and have a very in depth understanding
1070 * of all the possible deadlocks and enospc problems.
1071 */
1072long btrfs_ioctl_trans_end(struct file *file)
1073{
1074 struct inode *inode = fdentry(file)->d_inode;
1075 struct btrfs_root *root = BTRFS_I(inode)->root;
1076 struct btrfs_trans_handle *trans;
1077 int ret = 0;
1078
1079 trans = file->private_data;
1080 if (!trans) {
1081 ret = -EINVAL;
1082 goto out;
1083 }
1084 btrfs_end_transaction(trans, root);
1085 file->private_data = NULL;
1086
1087 mutex_lock(&root->fs_info->trans_mutex);
1088 root->fs_info->open_ioctl_trans--;
1089 mutex_unlock(&root->fs_info->trans_mutex);
1090
1091 mnt_drop_write(file->f_path.mnt);
1092
1093out:
1094 return ret;
1095}
1096
1097long btrfs_ioctl(struct file *file, unsigned int
1098 cmd, unsigned long arg)
1099{
1100 struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
1101 void __user *argp = (void __user *)arg;
1102
1103 switch (cmd) {
1104 case BTRFS_IOC_SNAP_CREATE:
1105 return btrfs_ioctl_snap_create(file, argp, 0);
1106 case BTRFS_IOC_SUBVOL_CREATE:
1107 return btrfs_ioctl_snap_create(file, argp, 1);
1108 case BTRFS_IOC_DEFRAG:
1109 return btrfs_ioctl_defrag(file);
1110 case BTRFS_IOC_RESIZE:
1111 return btrfs_ioctl_resize(root, argp);
1112 case BTRFS_IOC_ADD_DEV:
1113 return btrfs_ioctl_add_dev(root, argp);
1114 case BTRFS_IOC_RM_DEV:
1115 return btrfs_ioctl_rm_dev(root, argp);
1116 case BTRFS_IOC_BALANCE:
1117 return btrfs_balance(root->fs_info->dev_root);
1118 case BTRFS_IOC_CLONE:
1119 return btrfs_ioctl_clone(file, arg, 0, 0, 0);
1120 case BTRFS_IOC_CLONE_RANGE:
1121 return btrfs_ioctl_clone_range(file, argp);
1122 case BTRFS_IOC_TRANS_START:
1123 return btrfs_ioctl_trans_start(file);
1124 case BTRFS_IOC_TRANS_END:
1125 return btrfs_ioctl_trans_end(file);
1126 case BTRFS_IOC_SYNC:
1127 btrfs_sync_fs(file->f_dentry->d_sb, 1);
1128 return 0;
1129 }
1130
1131 return -ENOTTY;
1132}
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
new file mode 100644
index 000000000000..78049ea208db
--- /dev/null
+++ b/fs/btrfs/ioctl.h
@@ -0,0 +1,67 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#ifndef __IOCTL_
20#define __IOCTL_
21#include <linux/ioctl.h>
22
23#define BTRFS_IOCTL_MAGIC 0x94
24#define BTRFS_VOL_NAME_MAX 255
25#define BTRFS_PATH_NAME_MAX 3072
26
27struct btrfs_ioctl_vol_args {
28 __s64 fd;
29 char name[BTRFS_PATH_NAME_MAX + 1];
30};
31
32#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
33 struct btrfs_ioctl_vol_args)
34#define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \
35 struct btrfs_ioctl_vol_args)
36#define BTRFS_IOC_RESIZE _IOW(BTRFS_IOCTL_MAGIC, 3, \
37 struct btrfs_ioctl_vol_args)
38#define BTRFS_IOC_SCAN_DEV _IOW(BTRFS_IOCTL_MAGIC, 4, \
39 struct btrfs_ioctl_vol_args)
40/* trans start and trans end are dangerous, and only for
41 * use by applications that know how to avoid the
42 * resulting deadlocks
43 */
44#define BTRFS_IOC_TRANS_START _IO(BTRFS_IOCTL_MAGIC, 6)
45#define BTRFS_IOC_TRANS_END _IO(BTRFS_IOCTL_MAGIC, 7)
46#define BTRFS_IOC_SYNC _IO(BTRFS_IOCTL_MAGIC, 8)
47
48#define BTRFS_IOC_CLONE _IOW(BTRFS_IOCTL_MAGIC, 9, int)
49#define BTRFS_IOC_ADD_DEV _IOW(BTRFS_IOCTL_MAGIC, 10, \
50 struct btrfs_ioctl_vol_args)
51#define BTRFS_IOC_RM_DEV _IOW(BTRFS_IOCTL_MAGIC, 11, \
52 struct btrfs_ioctl_vol_args)
53#define BTRFS_IOC_BALANCE _IOW(BTRFS_IOCTL_MAGIC, 12, \
54 struct btrfs_ioctl_vol_args)
55struct btrfs_ioctl_clone_range_args {
56 __s64 src_fd;
57 __u64 src_offset, src_length;
58 __u64 dest_offset;
59};
60
61#define BTRFS_IOC_CLONE_RANGE _IOW(BTRFS_IOCTL_MAGIC, 13, \
62 struct btrfs_ioctl_clone_range_args)
63
64#define BTRFS_IOC_SUBVOL_CREATE _IOW(BTRFS_IOCTL_MAGIC, 14, \
65 struct btrfs_ioctl_vol_args)
66
67#endif
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
new file mode 100644
index 000000000000..39bae7761db6
--- /dev/null
+++ b/fs/btrfs/locking.c
@@ -0,0 +1,88 @@
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/gfp.h>
20#include <linux/pagemap.h>
21#include <linux/spinlock.h>
22#include <linux/page-flags.h>
23#include <asm/bug.h>
24#include "ctree.h"
25#include "extent_io.h"
26#include "locking.h"
27
28/*
29 * locks the per buffer mutex in an extent buffer. This uses adaptive locks
30 * and the spin is not tuned very extensively. The spinning does make a big
31 * difference in almost every workload, but spinning for the right amount of
32 * time needs some help.
33 *
34 * In general, we want to spin as long as the lock holder is doing btree
35 * searches, and we should give up if they are in more expensive code.
36 */
37
38int btrfs_tree_lock(struct extent_buffer *eb)
39{
40 int i;
41
42 if (mutex_trylock(&eb->mutex))
43 return 0;
44 for (i = 0; i < 512; i++) {
45 cpu_relax();
46 if (mutex_trylock(&eb->mutex))
47 return 0;
48 }
49 cpu_relax();
50 mutex_lock_nested(&eb->mutex, BTRFS_MAX_LEVEL - btrfs_header_level(eb));
51 return 0;
52}
53
54int btrfs_try_tree_lock(struct extent_buffer *eb)
55{
56 return mutex_trylock(&eb->mutex);
57}
58
59int btrfs_tree_unlock(struct extent_buffer *eb)
60{
61 mutex_unlock(&eb->mutex);
62 return 0;
63}
64
65int btrfs_tree_locked(struct extent_buffer *eb)
66{
67 return mutex_is_locked(&eb->mutex);
68}
69
70/*
71 * btrfs_search_slot uses this to decide if it should drop its locks
72 * before doing something expensive like allocating free blocks for cow.
73 */
74int btrfs_path_lock_waiting(struct btrfs_path *path, int level)
75{
76 int i;
77 struct extent_buffer *eb;
78 for (i = level; i <= level + 1 && i < BTRFS_MAX_LEVEL; i++) {
79 eb = path->nodes[i];
80 if (!eb)
81 break;
82 smp_mb();
83 if (!list_empty(&eb->mutex.wait_list))
84 return 1;
85 }
86 return 0;
87}
88
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
new file mode 100644
index 000000000000..bc1faef12519
--- /dev/null
+++ b/fs/btrfs/locking.h
@@ -0,0 +1,27 @@
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#ifndef __BTRFS_LOCKING_
20#define __BTRFS_LOCKING_
21
22int btrfs_tree_lock(struct extent_buffer *eb);
23int btrfs_tree_unlock(struct extent_buffer *eb);
24int btrfs_tree_locked(struct extent_buffer *eb);
25int btrfs_try_tree_lock(struct extent_buffer *eb);
26int btrfs_path_lock_waiting(struct btrfs_path *path, int level);
27#endif
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
new file mode 100644
index 000000000000..a20940170274
--- /dev/null
+++ b/fs/btrfs/ordered-data.c
@@ -0,0 +1,730 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/gfp.h>
20#include <linux/slab.h>
21#include <linux/blkdev.h>
22#include <linux/writeback.h>
23#include <linux/pagevec.h>
24#include "ctree.h"
25#include "transaction.h"
26#include "btrfs_inode.h"
27#include "extent_io.h"
28
29static u64 entry_end(struct btrfs_ordered_extent *entry)
30{
31 if (entry->file_offset + entry->len < entry->file_offset)
32 return (u64)-1;
33 return entry->file_offset + entry->len;
34}
35
36/* returns NULL if the insertion worked, or it returns the node it did find
37 * in the tree
38 */
39static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
40 struct rb_node *node)
41{
42 struct rb_node **p = &root->rb_node;
43 struct rb_node *parent = NULL;
44 struct btrfs_ordered_extent *entry;
45
46 while (*p) {
47 parent = *p;
48 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
49
50 if (file_offset < entry->file_offset)
51 p = &(*p)->rb_left;
52 else if (file_offset >= entry_end(entry))
53 p = &(*p)->rb_right;
54 else
55 return parent;
56 }
57
58 rb_link_node(node, parent, p);
59 rb_insert_color(node, root);
60 return NULL;
61}
62
63/*
64 * look for a given offset in the tree, and if it can't be found return the
65 * first lesser offset
66 */
67static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
68 struct rb_node **prev_ret)
69{
70 struct rb_node *n = root->rb_node;
71 struct rb_node *prev = NULL;
72 struct rb_node *test;
73 struct btrfs_ordered_extent *entry;
74 struct btrfs_ordered_extent *prev_entry = NULL;
75
76 while (n) {
77 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
78 prev = n;
79 prev_entry = entry;
80
81 if (file_offset < entry->file_offset)
82 n = n->rb_left;
83 else if (file_offset >= entry_end(entry))
84 n = n->rb_right;
85 else
86 return n;
87 }
88 if (!prev_ret)
89 return NULL;
90
91 while (prev && file_offset >= entry_end(prev_entry)) {
92 test = rb_next(prev);
93 if (!test)
94 break;
95 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
96 rb_node);
97 if (file_offset < entry_end(prev_entry))
98 break;
99
100 prev = test;
101 }
102 if (prev)
103 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
104 rb_node);
105 while (prev && file_offset < entry_end(prev_entry)) {
106 test = rb_prev(prev);
107 if (!test)
108 break;
109 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
110 rb_node);
111 prev = test;
112 }
113 *prev_ret = prev;
114 return NULL;
115}
116
117/*
118 * helper to check if a given offset is inside a given entry
119 */
120static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
121{
122 if (file_offset < entry->file_offset ||
123 entry->file_offset + entry->len <= file_offset)
124 return 0;
125 return 1;
126}
127
128/*
129 * look find the first ordered struct that has this offset, otherwise
130 * the first one less than this offset
131 */
132static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
133 u64 file_offset)
134{
135 struct rb_root *root = &tree->tree;
136 struct rb_node *prev;
137 struct rb_node *ret;
138 struct btrfs_ordered_extent *entry;
139
140 if (tree->last) {
141 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
142 rb_node);
143 if (offset_in_entry(entry, file_offset))
144 return tree->last;
145 }
146 ret = __tree_search(root, file_offset, &prev);
147 if (!ret)
148 ret = prev;
149 if (ret)
150 tree->last = ret;
151 return ret;
152}
153
154/* allocate and add a new ordered_extent into the per-inode tree.
155 * file_offset is the logical offset in the file
156 *
157 * start is the disk block number of an extent already reserved in the
158 * extent allocation tree
159 *
160 * len is the length of the extent
161 *
162 * This also sets the EXTENT_ORDERED bit on the range in the inode.
163 *
164 * The tree is given a single reference on the ordered extent that was
165 * inserted.
166 */
167int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
168 u64 start, u64 len, u64 disk_len, int type)
169{
170 struct btrfs_ordered_inode_tree *tree;
171 struct rb_node *node;
172 struct btrfs_ordered_extent *entry;
173
174 tree = &BTRFS_I(inode)->ordered_tree;
175 entry = kzalloc(sizeof(*entry), GFP_NOFS);
176 if (!entry)
177 return -ENOMEM;
178
179 mutex_lock(&tree->mutex);
180 entry->file_offset = file_offset;
181 entry->start = start;
182 entry->len = len;
183 entry->disk_len = disk_len;
184 entry->inode = inode;
185 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
186 set_bit(type, &entry->flags);
187
188 /* one ref for the tree */
189 atomic_set(&entry->refs, 1);
190 init_waitqueue_head(&entry->wait);
191 INIT_LIST_HEAD(&entry->list);
192 INIT_LIST_HEAD(&entry->root_extent_list);
193
194 node = tree_insert(&tree->tree, file_offset,
195 &entry->rb_node);
196 BUG_ON(node);
197
198 set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset,
199 entry_end(entry) - 1, GFP_NOFS);
200
201 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
202 list_add_tail(&entry->root_extent_list,
203 &BTRFS_I(inode)->root->fs_info->ordered_extents);
204 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
205
206 mutex_unlock(&tree->mutex);
207 BUG_ON(node);
208 return 0;
209}
210
211/*
212 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
213 * when an ordered extent is finished. If the list covers more than one
214 * ordered extent, it is split across multiples.
215 */
216int btrfs_add_ordered_sum(struct inode *inode,
217 struct btrfs_ordered_extent *entry,
218 struct btrfs_ordered_sum *sum)
219{
220 struct btrfs_ordered_inode_tree *tree;
221
222 tree = &BTRFS_I(inode)->ordered_tree;
223 mutex_lock(&tree->mutex);
224 list_add_tail(&sum->list, &entry->list);
225 mutex_unlock(&tree->mutex);
226 return 0;
227}
228
229/*
230 * this is used to account for finished IO across a given range
231 * of the file. The IO should not span ordered extents. If
232 * a given ordered_extent is completely done, 1 is returned, otherwise
233 * 0.
234 *
235 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
236 * to make sure this function only returns 1 once for a given ordered extent.
237 */
238int btrfs_dec_test_ordered_pending(struct inode *inode,
239 u64 file_offset, u64 io_size)
240{
241 struct btrfs_ordered_inode_tree *tree;
242 struct rb_node *node;
243 struct btrfs_ordered_extent *entry;
244 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
245 int ret;
246
247 tree = &BTRFS_I(inode)->ordered_tree;
248 mutex_lock(&tree->mutex);
249 clear_extent_ordered(io_tree, file_offset, file_offset + io_size - 1,
250 GFP_NOFS);
251 node = tree_search(tree, file_offset);
252 if (!node) {
253 ret = 1;
254 goto out;
255 }
256
257 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
258 if (!offset_in_entry(entry, file_offset)) {
259 ret = 1;
260 goto out;
261 }
262
263 ret = test_range_bit(io_tree, entry->file_offset,
264 entry->file_offset + entry->len - 1,
265 EXTENT_ORDERED, 0);
266 if (ret == 0)
267 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
268out:
269 mutex_unlock(&tree->mutex);
270 return ret == 0;
271}
272
273/*
274 * used to drop a reference on an ordered extent. This will free
275 * the extent if the last reference is dropped
276 */
277int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
278{
279 struct list_head *cur;
280 struct btrfs_ordered_sum *sum;
281
282 if (atomic_dec_and_test(&entry->refs)) {
283 while (!list_empty(&entry->list)) {
284 cur = entry->list.next;
285 sum = list_entry(cur, struct btrfs_ordered_sum, list);
286 list_del(&sum->list);
287 kfree(sum);
288 }
289 kfree(entry);
290 }
291 return 0;
292}
293
294/*
295 * remove an ordered extent from the tree. No references are dropped
296 * but, anyone waiting on this extent is woken up.
297 */
298int btrfs_remove_ordered_extent(struct inode *inode,
299 struct btrfs_ordered_extent *entry)
300{
301 struct btrfs_ordered_inode_tree *tree;
302 struct rb_node *node;
303
304 tree = &BTRFS_I(inode)->ordered_tree;
305 mutex_lock(&tree->mutex);
306 node = &entry->rb_node;
307 rb_erase(node, &tree->tree);
308 tree->last = NULL;
309 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
310
311 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
312 list_del_init(&entry->root_extent_list);
313 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
314
315 mutex_unlock(&tree->mutex);
316 wake_up(&entry->wait);
317 return 0;
318}
319
320/*
321 * wait for all the ordered extents in a root. This is done when balancing
322 * space between drives.
323 */
324int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only)
325{
326 struct list_head splice;
327 struct list_head *cur;
328 struct btrfs_ordered_extent *ordered;
329 struct inode *inode;
330
331 INIT_LIST_HEAD(&splice);
332
333 spin_lock(&root->fs_info->ordered_extent_lock);
334 list_splice_init(&root->fs_info->ordered_extents, &splice);
335 while (!list_empty(&splice)) {
336 cur = splice.next;
337 ordered = list_entry(cur, struct btrfs_ordered_extent,
338 root_extent_list);
339 if (nocow_only &&
340 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
341 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
342 list_move(&ordered->root_extent_list,
343 &root->fs_info->ordered_extents);
344 cond_resched_lock(&root->fs_info->ordered_extent_lock);
345 continue;
346 }
347
348 list_del_init(&ordered->root_extent_list);
349 atomic_inc(&ordered->refs);
350
351 /*
352 * the inode may be getting freed (in sys_unlink path).
353 */
354 inode = igrab(ordered->inode);
355
356 spin_unlock(&root->fs_info->ordered_extent_lock);
357
358 if (inode) {
359 btrfs_start_ordered_extent(inode, ordered, 1);
360 btrfs_put_ordered_extent(ordered);
361 iput(inode);
362 } else {
363 btrfs_put_ordered_extent(ordered);
364 }
365
366 spin_lock(&root->fs_info->ordered_extent_lock);
367 }
368 spin_unlock(&root->fs_info->ordered_extent_lock);
369 return 0;
370}
371
372/*
373 * Used to start IO or wait for a given ordered extent to finish.
374 *
375 * If wait is one, this effectively waits on page writeback for all the pages
376 * in the extent, and it waits on the io completion code to insert
377 * metadata into the btree corresponding to the extent
378 */
379void btrfs_start_ordered_extent(struct inode *inode,
380 struct btrfs_ordered_extent *entry,
381 int wait)
382{
383 u64 start = entry->file_offset;
384 u64 end = start + entry->len - 1;
385
386 /*
387 * pages in the range can be dirty, clean or writeback. We
388 * start IO on any dirty ones so the wait doesn't stall waiting
389 * for pdflush to find them
390 */
391 btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_ALL);
392 if (wait) {
393 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
394 &entry->flags));
395 }
396}
397
398/*
399 * Used to wait on ordered extents across a large range of bytes.
400 */
401int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
402{
403 u64 end;
404 u64 orig_end;
405 u64 wait_end;
406 struct btrfs_ordered_extent *ordered;
407
408 if (start + len < start) {
409 orig_end = INT_LIMIT(loff_t);
410 } else {
411 orig_end = start + len - 1;
412 if (orig_end > INT_LIMIT(loff_t))
413 orig_end = INT_LIMIT(loff_t);
414 }
415 wait_end = orig_end;
416again:
417 /* start IO across the range first to instantiate any delalloc
418 * extents
419 */
420 btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_NONE);
421
422 /* The compression code will leave pages locked but return from
423 * writepage without setting the page writeback. Starting again
424 * with WB_SYNC_ALL will end up waiting for the IO to actually start.
425 */
426 btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL);
427
428 btrfs_wait_on_page_writeback_range(inode->i_mapping,
429 start >> PAGE_CACHE_SHIFT,
430 orig_end >> PAGE_CACHE_SHIFT);
431
432 end = orig_end;
433 while (1) {
434 ordered = btrfs_lookup_first_ordered_extent(inode, end);
435 if (!ordered)
436 break;
437 if (ordered->file_offset > orig_end) {
438 btrfs_put_ordered_extent(ordered);
439 break;
440 }
441 if (ordered->file_offset + ordered->len < start) {
442 btrfs_put_ordered_extent(ordered);
443 break;
444 }
445 btrfs_start_ordered_extent(inode, ordered, 1);
446 end = ordered->file_offset;
447 btrfs_put_ordered_extent(ordered);
448 if (end == 0 || end == start)
449 break;
450 end--;
451 }
452 if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
453 EXTENT_ORDERED | EXTENT_DELALLOC, 0)) {
454 schedule_timeout(1);
455 goto again;
456 }
457 return 0;
458}
459
460/*
461 * find an ordered extent corresponding to file_offset. return NULL if
462 * nothing is found, otherwise take a reference on the extent and return it
463 */
464struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
465 u64 file_offset)
466{
467 struct btrfs_ordered_inode_tree *tree;
468 struct rb_node *node;
469 struct btrfs_ordered_extent *entry = NULL;
470
471 tree = &BTRFS_I(inode)->ordered_tree;
472 mutex_lock(&tree->mutex);
473 node = tree_search(tree, file_offset);
474 if (!node)
475 goto out;
476
477 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
478 if (!offset_in_entry(entry, file_offset))
479 entry = NULL;
480 if (entry)
481 atomic_inc(&entry->refs);
482out:
483 mutex_unlock(&tree->mutex);
484 return entry;
485}
486
487/*
488 * lookup and return any extent before 'file_offset'. NULL is returned
489 * if none is found
490 */
491struct btrfs_ordered_extent *
492btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
493{
494 struct btrfs_ordered_inode_tree *tree;
495 struct rb_node *node;
496 struct btrfs_ordered_extent *entry = NULL;
497
498 tree = &BTRFS_I(inode)->ordered_tree;
499 mutex_lock(&tree->mutex);
500 node = tree_search(tree, file_offset);
501 if (!node)
502 goto out;
503
504 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
505 atomic_inc(&entry->refs);
506out:
507 mutex_unlock(&tree->mutex);
508 return entry;
509}
510
511/*
512 * After an extent is done, call this to conditionally update the on disk
513 * i_size. i_size is updated to cover any fully written part of the file.
514 */
515int btrfs_ordered_update_i_size(struct inode *inode,
516 struct btrfs_ordered_extent *ordered)
517{
518 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
519 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
520 u64 disk_i_size;
521 u64 new_i_size;
522 u64 i_size_test;
523 struct rb_node *node;
524 struct btrfs_ordered_extent *test;
525
526 mutex_lock(&tree->mutex);
527 disk_i_size = BTRFS_I(inode)->disk_i_size;
528
529 /*
530 * if the disk i_size is already at the inode->i_size, or
531 * this ordered extent is inside the disk i_size, we're done
532 */
533 if (disk_i_size >= inode->i_size ||
534 ordered->file_offset + ordered->len <= disk_i_size) {
535 goto out;
536 }
537
538 /*
539 * we can't update the disk_isize if there are delalloc bytes
540 * between disk_i_size and this ordered extent
541 */
542 if (test_range_bit(io_tree, disk_i_size,
543 ordered->file_offset + ordered->len - 1,
544 EXTENT_DELALLOC, 0)) {
545 goto out;
546 }
547 /*
548 * walk backward from this ordered extent to disk_i_size.
549 * if we find an ordered extent then we can't update disk i_size
550 * yet
551 */
552 node = &ordered->rb_node;
553 while (1) {
554 node = rb_prev(node);
555 if (!node)
556 break;
557 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
558 if (test->file_offset + test->len <= disk_i_size)
559 break;
560 if (test->file_offset >= inode->i_size)
561 break;
562 if (test->file_offset >= disk_i_size)
563 goto out;
564 }
565 new_i_size = min_t(u64, entry_end(ordered), i_size_read(inode));
566
567 /*
568 * at this point, we know we can safely update i_size to at least
569 * the offset from this ordered extent. But, we need to
570 * walk forward and see if ios from higher up in the file have
571 * finished.
572 */
573 node = rb_next(&ordered->rb_node);
574 i_size_test = 0;
575 if (node) {
576 /*
577 * do we have an area where IO might have finished
578 * between our ordered extent and the next one.
579 */
580 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
581 if (test->file_offset > entry_end(ordered))
582 i_size_test = test->file_offset;
583 } else {
584 i_size_test = i_size_read(inode);
585 }
586
587 /*
588 * i_size_test is the end of a region after this ordered
589 * extent where there are no ordered extents. As long as there
590 * are no delalloc bytes in this area, it is safe to update
591 * disk_i_size to the end of the region.
592 */
593 if (i_size_test > entry_end(ordered) &&
594 !test_range_bit(io_tree, entry_end(ordered), i_size_test - 1,
595 EXTENT_DELALLOC, 0)) {
596 new_i_size = min_t(u64, i_size_test, i_size_read(inode));
597 }
598 BTRFS_I(inode)->disk_i_size = new_i_size;
599out:
600 mutex_unlock(&tree->mutex);
601 return 0;
602}
603
604/*
605 * search the ordered extents for one corresponding to 'offset' and
606 * try to find a checksum. This is used because we allow pages to
607 * be reclaimed before their checksum is actually put into the btree
608 */
609int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
610 u32 *sum)
611{
612 struct btrfs_ordered_sum *ordered_sum;
613 struct btrfs_sector_sum *sector_sums;
614 struct btrfs_ordered_extent *ordered;
615 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
616 struct list_head *cur;
617 unsigned long num_sectors;
618 unsigned long i;
619 u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
620 int ret = 1;
621
622 ordered = btrfs_lookup_ordered_extent(inode, offset);
623 if (!ordered)
624 return 1;
625
626 mutex_lock(&tree->mutex);
627 list_for_each_prev(cur, &ordered->list) {
628 ordered_sum = list_entry(cur, struct btrfs_ordered_sum, list);
629 if (disk_bytenr >= ordered_sum->bytenr) {
630 num_sectors = ordered_sum->len / sectorsize;
631 sector_sums = ordered_sum->sums;
632 for (i = 0; i < num_sectors; i++) {
633 if (sector_sums[i].bytenr == disk_bytenr) {
634 *sum = sector_sums[i].sum;
635 ret = 0;
636 goto out;
637 }
638 }
639 }
640 }
641out:
642 mutex_unlock(&tree->mutex);
643 btrfs_put_ordered_extent(ordered);
644 return ret;
645}
646
647
648/**
649 * taken from mm/filemap.c because it isn't exported
650 *
651 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
652 * @mapping: address space structure to write
653 * @start: offset in bytes where the range starts
654 * @end: offset in bytes where the range ends (inclusive)
655 * @sync_mode: enable synchronous operation
656 *
657 * Start writeback against all of a mapping's dirty pages that lie
658 * within the byte offsets <start, end> inclusive.
659 *
660 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
661 * opposed to a regular memory cleansing writeback. The difference between
662 * these two operations is that if a dirty page/buffer is encountered, it must
663 * be waited upon, and not just skipped over.
664 */
665int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start,
666 loff_t end, int sync_mode)
667{
668 struct writeback_control wbc = {
669 .sync_mode = sync_mode,
670 .nr_to_write = mapping->nrpages * 2,
671 .range_start = start,
672 .range_end = end,
673 .for_writepages = 1,
674 };
675 return btrfs_writepages(mapping, &wbc);
676}
677
678/**
679 * taken from mm/filemap.c because it isn't exported
680 *
681 * wait_on_page_writeback_range - wait for writeback to complete
682 * @mapping: target address_space
683 * @start: beginning page index
684 * @end: ending page index
685 *
686 * Wait for writeback to complete against pages indexed by start->end
687 * inclusive
688 */
689int btrfs_wait_on_page_writeback_range(struct address_space *mapping,
690 pgoff_t start, pgoff_t end)
691{
692 struct pagevec pvec;
693 int nr_pages;
694 int ret = 0;
695 pgoff_t index;
696
697 if (end < start)
698 return 0;
699
700 pagevec_init(&pvec, 0);
701 index = start;
702 while ((index <= end) &&
703 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
704 PAGECACHE_TAG_WRITEBACK,
705 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
706 unsigned i;
707
708 for (i = 0; i < nr_pages; i++) {
709 struct page *page = pvec.pages[i];
710
711 /* until radix tree lookup accepts end_index */
712 if (page->index > end)
713 continue;
714
715 wait_on_page_writeback(page);
716 if (PageError(page))
717 ret = -EIO;
718 }
719 pagevec_release(&pvec);
720 cond_resched();
721 }
722
723 /* Check for outstanding write errors */
724 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
725 ret = -ENOSPC;
726 if (test_and_clear_bit(AS_EIO, &mapping->flags))
727 ret = -EIO;
728
729 return ret;
730}
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
new file mode 100644
index 000000000000..ab66d5e8d6d6
--- /dev/null
+++ b/fs/btrfs/ordered-data.h
@@ -0,0 +1,158 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#ifndef __BTRFS_ORDERED_DATA__
20#define __BTRFS_ORDERED_DATA__
21
22/* one of these per inode */
23struct btrfs_ordered_inode_tree {
24 struct mutex mutex;
25 struct rb_root tree;
26 struct rb_node *last;
27};
28
29/*
30 * these are used to collect checksums done just before bios submission.
31 * They are attached via a list into the ordered extent, and
32 * checksum items are inserted into the tree after all the blocks in
33 * the ordered extent are on disk
34 */
35struct btrfs_sector_sum {
36 /* bytenr on disk */
37 u64 bytenr;
38 u32 sum;
39};
40
41struct btrfs_ordered_sum {
42 /* bytenr is the start of this extent on disk */
43 u64 bytenr;
44
45 /*
46 * this is the length in bytes covered by the sums array below.
47 */
48 unsigned long len;
49 struct list_head list;
50 /* last field is a variable length array of btrfs_sector_sums */
51 struct btrfs_sector_sum sums[];
52};
53
54/*
55 * bits for the flags field:
56 *
57 * BTRFS_ORDERED_IO_DONE is set when all of the blocks are written.
58 * It is used to make sure metadata is inserted into the tree only once
59 * per extent.
60 *
61 * BTRFS_ORDERED_COMPLETE is set when the extent is removed from the
62 * rbtree, just before waking any waiters. It is used to indicate the
63 * IO is done and any metadata is inserted into the tree.
64 */
65#define BTRFS_ORDERED_IO_DONE 0 /* set when all the pages are written */
66
67#define BTRFS_ORDERED_COMPLETE 1 /* set when removed from the tree */
68
69#define BTRFS_ORDERED_NOCOW 2 /* set when we want to write in place */
70
71#define BTRFS_ORDERED_COMPRESSED 3 /* writing a compressed extent */
72
73#define BTRFS_ORDERED_PREALLOC 4 /* set when writing to prealloced extent */
74
75struct btrfs_ordered_extent {
76 /* logical offset in the file */
77 u64 file_offset;
78
79 /* disk byte number */
80 u64 start;
81
82 /* ram length of the extent in bytes */
83 u64 len;
84
85 /* extent length on disk */
86 u64 disk_len;
87
88 /* flags (described above) */
89 unsigned long flags;
90
91 /* reference count */
92 atomic_t refs;
93
94 /* the inode we belong to */
95 struct inode *inode;
96
97 /* list of checksums for insertion when the extent io is done */
98 struct list_head list;
99
100 /* used to wait for the BTRFS_ORDERED_COMPLETE bit */
101 wait_queue_head_t wait;
102
103 /* our friendly rbtree entry */
104 struct rb_node rb_node;
105
106 /* a per root list of all the pending ordered extents */
107 struct list_head root_extent_list;
108};
109
110
111/*
112 * calculates the total size you need to allocate for an ordered sum
113 * structure spanning 'bytes' in the file
114 */
115static inline int btrfs_ordered_sum_size(struct btrfs_root *root,
116 unsigned long bytes)
117{
118 unsigned long num_sectors = (bytes + root->sectorsize - 1) /
119 root->sectorsize;
120 num_sectors++;
121 return sizeof(struct btrfs_ordered_sum) +
122 num_sectors * sizeof(struct btrfs_sector_sum);
123}
124
125static inline void
126btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t)
127{
128 mutex_init(&t->mutex);
129 t->tree.rb_node = NULL;
130 t->last = NULL;
131}
132
133int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
134int btrfs_remove_ordered_extent(struct inode *inode,
135 struct btrfs_ordered_extent *entry);
136int btrfs_dec_test_ordered_pending(struct inode *inode,
137 u64 file_offset, u64 io_size);
138int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
139 u64 start, u64 len, u64 disk_len, int tyep);
140int btrfs_add_ordered_sum(struct inode *inode,
141 struct btrfs_ordered_extent *entry,
142 struct btrfs_ordered_sum *sum);
143struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
144 u64 file_offset);
145void btrfs_start_ordered_extent(struct inode *inode,
146 struct btrfs_ordered_extent *entry, int wait);
147int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
148struct btrfs_ordered_extent *
149btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
150int btrfs_ordered_update_i_size(struct inode *inode,
151 struct btrfs_ordered_extent *ordered);
152int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
153int btrfs_wait_on_page_writeback_range(struct address_space *mapping,
154 pgoff_t start, pgoff_t end);
155int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start,
156 loff_t end, int sync_mode);
157int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only);
158#endif
diff --git a/fs/btrfs/orphan.c b/fs/btrfs/orphan.c
new file mode 100644
index 000000000000..3c0d52af4f80
--- /dev/null
+++ b/fs/btrfs/orphan.c
@@ -0,0 +1,67 @@
1/*
2 * Copyright (C) 2008 Red Hat. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include "ctree.h"
20#include "disk-io.h"
21
22int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
23 struct btrfs_root *root, u64 offset)
24{
25 struct btrfs_path *path;
26 struct btrfs_key key;
27 int ret = 0;
28
29 key.objectid = BTRFS_ORPHAN_OBJECTID;
30 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
31 key.offset = offset;
32
33 path = btrfs_alloc_path();
34 if (!path)
35 return -ENOMEM;
36
37 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
38
39 btrfs_free_path(path);
40 return ret;
41}
42
43int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
44 struct btrfs_root *root, u64 offset)
45{
46 struct btrfs_path *path;
47 struct btrfs_key key;
48 int ret = 0;
49
50 key.objectid = BTRFS_ORPHAN_OBJECTID;
51 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
52 key.offset = offset;
53
54 path = btrfs_alloc_path();
55 if (!path)
56 return -ENOMEM;
57
58 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
59 if (ret)
60 goto out;
61
62 ret = btrfs_del_item(trans, root, path);
63
64out:
65 btrfs_free_path(path);
66 return ret;
67}
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
new file mode 100644
index 000000000000..5f8f218c1005
--- /dev/null
+++ b/fs/btrfs/print-tree.c
@@ -0,0 +1,216 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include "ctree.h"
20#include "disk-io.h"
21#include "print-tree.h"
22
23static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk)
24{
25 int num_stripes = btrfs_chunk_num_stripes(eb, chunk);
26 int i;
27 printk(KERN_INFO "\t\tchunk length %llu owner %llu type %llu "
28 "num_stripes %d\n",
29 (unsigned long long)btrfs_chunk_length(eb, chunk),
30 (unsigned long long)btrfs_chunk_owner(eb, chunk),
31 (unsigned long long)btrfs_chunk_type(eb, chunk),
32 num_stripes);
33 for (i = 0 ; i < num_stripes ; i++) {
34 printk(KERN_INFO "\t\t\tstripe %d devid %llu offset %llu\n", i,
35 (unsigned long long)btrfs_stripe_devid_nr(eb, chunk, i),
36 (unsigned long long)btrfs_stripe_offset_nr(eb, chunk, i));
37 }
38}
39static void print_dev_item(struct extent_buffer *eb,
40 struct btrfs_dev_item *dev_item)
41{
42 printk(KERN_INFO "\t\tdev item devid %llu "
43 "total_bytes %llu bytes used %llu\n",
44 (unsigned long long)btrfs_device_id(eb, dev_item),
45 (unsigned long long)btrfs_device_total_bytes(eb, dev_item),
46 (unsigned long long)btrfs_device_bytes_used(eb, dev_item));
47}
48void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
49{
50 int i;
51 u32 nr = btrfs_header_nritems(l);
52 struct btrfs_item *item;
53 struct btrfs_extent_item *ei;
54 struct btrfs_root_item *ri;
55 struct btrfs_dir_item *di;
56 struct btrfs_inode_item *ii;
57 struct btrfs_block_group_item *bi;
58 struct btrfs_file_extent_item *fi;
59 struct btrfs_key key;
60 struct btrfs_key found_key;
61 struct btrfs_extent_ref *ref;
62 struct btrfs_dev_extent *dev_extent;
63 u32 type;
64
65 printk(KERN_INFO "leaf %llu total ptrs %d free space %d\n",
66 (unsigned long long)btrfs_header_bytenr(l), nr,
67 btrfs_leaf_free_space(root, l));
68 for (i = 0 ; i < nr ; i++) {
69 item = btrfs_item_nr(l, i);
70 btrfs_item_key_to_cpu(l, &key, i);
71 type = btrfs_key_type(&key);
72 printk(KERN_INFO "\titem %d key (%llu %x %llu) itemoff %d "
73 "itemsize %d\n",
74 i,
75 (unsigned long long)key.objectid, type,
76 (unsigned long long)key.offset,
77 btrfs_item_offset(l, item), btrfs_item_size(l, item));
78 switch (type) {
79 case BTRFS_INODE_ITEM_KEY:
80 ii = btrfs_item_ptr(l, i, struct btrfs_inode_item);
81 printk(KERN_INFO "\t\tinode generation %llu size %llu "
82 "mode %o\n",
83 (unsigned long long)
84 btrfs_inode_generation(l, ii),
85 (unsigned long long)btrfs_inode_size(l, ii),
86 btrfs_inode_mode(l, ii));
87 break;
88 case BTRFS_DIR_ITEM_KEY:
89 di = btrfs_item_ptr(l, i, struct btrfs_dir_item);
90 btrfs_dir_item_key_to_cpu(l, di, &found_key);
91 printk(KERN_INFO "\t\tdir oid %llu type %u\n",
92 (unsigned long long)found_key.objectid,
93 btrfs_dir_type(l, di));
94 break;
95 case BTRFS_ROOT_ITEM_KEY:
96 ri = btrfs_item_ptr(l, i, struct btrfs_root_item);
97 printk(KERN_INFO "\t\troot data bytenr %llu refs %u\n",
98 (unsigned long long)
99 btrfs_disk_root_bytenr(l, ri),
100 btrfs_disk_root_refs(l, ri));
101 break;
102 case BTRFS_EXTENT_ITEM_KEY:
103 ei = btrfs_item_ptr(l, i, struct btrfs_extent_item);
104 printk(KERN_INFO "\t\textent data refs %u\n",
105 btrfs_extent_refs(l, ei));
106 break;
107 case BTRFS_EXTENT_REF_KEY:
108 ref = btrfs_item_ptr(l, i, struct btrfs_extent_ref);
109 printk(KERN_INFO "\t\textent back ref root %llu "
110 "gen %llu owner %llu num_refs %lu\n",
111 (unsigned long long)btrfs_ref_root(l, ref),
112 (unsigned long long)btrfs_ref_generation(l, ref),
113 (unsigned long long)btrfs_ref_objectid(l, ref),
114 (unsigned long)btrfs_ref_num_refs(l, ref));
115 break;
116
117 case BTRFS_EXTENT_DATA_KEY:
118 fi = btrfs_item_ptr(l, i,
119 struct btrfs_file_extent_item);
120 if (btrfs_file_extent_type(l, fi) ==
121 BTRFS_FILE_EXTENT_INLINE) {
122 printk(KERN_INFO "\t\tinline extent data "
123 "size %u\n",
124 btrfs_file_extent_inline_len(l, fi));
125 break;
126 }
127 printk(KERN_INFO "\t\textent data disk bytenr %llu "
128 "nr %llu\n",
129 (unsigned long long)
130 btrfs_file_extent_disk_bytenr(l, fi),
131 (unsigned long long)
132 btrfs_file_extent_disk_num_bytes(l, fi));
133 printk(KERN_INFO "\t\textent data offset %llu "
134 "nr %llu ram %llu\n",
135 (unsigned long long)
136 btrfs_file_extent_offset(l, fi),
137 (unsigned long long)
138 btrfs_file_extent_num_bytes(l, fi),
139 (unsigned long long)
140 btrfs_file_extent_ram_bytes(l, fi));
141 break;
142 case BTRFS_BLOCK_GROUP_ITEM_KEY:
143 bi = btrfs_item_ptr(l, i,
144 struct btrfs_block_group_item);
145 printk(KERN_INFO "\t\tblock group used %llu\n",
146 (unsigned long long)
147 btrfs_disk_block_group_used(l, bi));
148 break;
149 case BTRFS_CHUNK_ITEM_KEY:
150 print_chunk(l, btrfs_item_ptr(l, i,
151 struct btrfs_chunk));
152 break;
153 case BTRFS_DEV_ITEM_KEY:
154 print_dev_item(l, btrfs_item_ptr(l, i,
155 struct btrfs_dev_item));
156 break;
157 case BTRFS_DEV_EXTENT_KEY:
158 dev_extent = btrfs_item_ptr(l, i,
159 struct btrfs_dev_extent);
160 printk(KERN_INFO "\t\tdev extent chunk_tree %llu\n"
161 "\t\tchunk objectid %llu chunk offset %llu "
162 "length %llu\n",
163 (unsigned long long)
164 btrfs_dev_extent_chunk_tree(l, dev_extent),
165 (unsigned long long)
166 btrfs_dev_extent_chunk_objectid(l, dev_extent),
167 (unsigned long long)
168 btrfs_dev_extent_chunk_offset(l, dev_extent),
169 (unsigned long long)
170 btrfs_dev_extent_length(l, dev_extent));
171 };
172 }
173}
174
175void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
176{
177 int i; u32 nr;
178 struct btrfs_key key;
179 int level;
180
181 if (!c)
182 return;
183 nr = btrfs_header_nritems(c);
184 level = btrfs_header_level(c);
185 if (level == 0) {
186 btrfs_print_leaf(root, c);
187 return;
188 }
189 printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n",
190 (unsigned long long)btrfs_header_bytenr(c),
191 btrfs_header_level(c), nr,
192 (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
193 for (i = 0; i < nr; i++) {
194 btrfs_node_key_to_cpu(c, &key, i);
195 printk(KERN_INFO "\tkey %d (%llu %u %llu) block %llu\n",
196 i,
197 (unsigned long long)key.objectid,
198 key.type,
199 (unsigned long long)key.offset,
200 (unsigned long long)btrfs_node_blockptr(c, i));
201 }
202 for (i = 0; i < nr; i++) {
203 struct extent_buffer *next = read_tree_block(root,
204 btrfs_node_blockptr(c, i),
205 btrfs_level_size(root, level - 1),
206 btrfs_node_ptr_generation(c, i));
207 if (btrfs_is_leaf(next) &&
208 btrfs_header_level(c) != 1)
209 BUG();
210 if (btrfs_header_level(next) !=
211 btrfs_header_level(c) - 1)
212 BUG();
213 btrfs_print_tree(root, next);
214 free_extent_buffer(next);
215 }
216}
diff --git a/fs/btrfs/print-tree.h b/fs/btrfs/print-tree.h
new file mode 100644
index 000000000000..da75efe534d5
--- /dev/null
+++ b/fs/btrfs/print-tree.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#ifndef __PRINT_TREE_
20#define __PRINT_TREE_
21void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l);
22void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *t);
23#endif
diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c
new file mode 100644
index 000000000000..6f0acc4c9eab
--- /dev/null
+++ b/fs/btrfs/ref-cache.c
@@ -0,0 +1,230 @@
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include "ctree.h"
21#include "ref-cache.h"
22#include "transaction.h"
23
24/*
25 * leaf refs are used to cache the information about which extents
26 * a given leaf has references on. This allows us to process that leaf
27 * in btrfs_drop_snapshot without needing to read it back from disk.
28 */
29
30/*
31 * kmalloc a leaf reference struct and update the counters for the
32 * total ref cache size
33 */
34struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
35 int nr_extents)
36{
37 struct btrfs_leaf_ref *ref;
38 size_t size = btrfs_leaf_ref_size(nr_extents);
39
40 ref = kmalloc(size, GFP_NOFS);
41 if (ref) {
42 spin_lock(&root->fs_info->ref_cache_lock);
43 root->fs_info->total_ref_cache_size += size;
44 spin_unlock(&root->fs_info->ref_cache_lock);
45
46 memset(ref, 0, sizeof(*ref));
47 atomic_set(&ref->usage, 1);
48 INIT_LIST_HEAD(&ref->list);
49 }
50 return ref;
51}
52
53/*
54 * free a leaf reference struct and update the counters for the
55 * total ref cache size
56 */
57void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
58{
59 if (!ref)
60 return;
61 WARN_ON(atomic_read(&ref->usage) == 0);
62 if (atomic_dec_and_test(&ref->usage)) {
63 size_t size = btrfs_leaf_ref_size(ref->nritems);
64
65 BUG_ON(ref->in_tree);
66 kfree(ref);
67
68 spin_lock(&root->fs_info->ref_cache_lock);
69 root->fs_info->total_ref_cache_size -= size;
70 spin_unlock(&root->fs_info->ref_cache_lock);
71 }
72}
73
74static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
75 struct rb_node *node)
76{
77 struct rb_node **p = &root->rb_node;
78 struct rb_node *parent = NULL;
79 struct btrfs_leaf_ref *entry;
80
81 while (*p) {
82 parent = *p;
83 entry = rb_entry(parent, struct btrfs_leaf_ref, rb_node);
84
85 if (bytenr < entry->bytenr)
86 p = &(*p)->rb_left;
87 else if (bytenr > entry->bytenr)
88 p = &(*p)->rb_right;
89 else
90 return parent;
91 }
92
93 entry = rb_entry(node, struct btrfs_leaf_ref, rb_node);
94 rb_link_node(node, parent, p);
95 rb_insert_color(node, root);
96 return NULL;
97}
98
99static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
100{
101 struct rb_node *n = root->rb_node;
102 struct btrfs_leaf_ref *entry;
103
104 while (n) {
105 entry = rb_entry(n, struct btrfs_leaf_ref, rb_node);
106 WARN_ON(!entry->in_tree);
107
108 if (bytenr < entry->bytenr)
109 n = n->rb_left;
110 else if (bytenr > entry->bytenr)
111 n = n->rb_right;
112 else
113 return n;
114 }
115 return NULL;
116}
117
118int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
119 int shared)
120{
121 struct btrfs_leaf_ref *ref = NULL;
122 struct btrfs_leaf_ref_tree *tree = root->ref_tree;
123
124 if (shared)
125 tree = &root->fs_info->shared_ref_tree;
126 if (!tree)
127 return 0;
128
129 spin_lock(&tree->lock);
130 while (!list_empty(&tree->list)) {
131 ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list);
132 BUG_ON(ref->tree != tree);
133 if (ref->root_gen > max_root_gen)
134 break;
135 if (!xchg(&ref->in_tree, 0)) {
136 cond_resched_lock(&tree->lock);
137 continue;
138 }
139
140 rb_erase(&ref->rb_node, &tree->root);
141 list_del_init(&ref->list);
142
143 spin_unlock(&tree->lock);
144 btrfs_free_leaf_ref(root, ref);
145 cond_resched();
146 spin_lock(&tree->lock);
147 }
148 spin_unlock(&tree->lock);
149 return 0;
150}
151
152/*
153 * find the leaf ref for a given extent. This returns the ref struct with
154 * a usage reference incremented
155 */
156struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
157 u64 bytenr)
158{
159 struct rb_node *rb;
160 struct btrfs_leaf_ref *ref = NULL;
161 struct btrfs_leaf_ref_tree *tree = root->ref_tree;
162again:
163 if (tree) {
164 spin_lock(&tree->lock);
165 rb = tree_search(&tree->root, bytenr);
166 if (rb)
167 ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node);
168 if (ref)
169 atomic_inc(&ref->usage);
170 spin_unlock(&tree->lock);
171 if (ref)
172 return ref;
173 }
174 if (tree != &root->fs_info->shared_ref_tree) {
175 tree = &root->fs_info->shared_ref_tree;
176 goto again;
177 }
178 return NULL;
179}
180
181/*
182 * add a fully filled in leaf ref struct
183 * remove all the refs older than a given root generation
184 */
185int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
186 int shared)
187{
188 int ret = 0;
189 struct rb_node *rb;
190 struct btrfs_leaf_ref_tree *tree = root->ref_tree;
191
192 if (shared)
193 tree = &root->fs_info->shared_ref_tree;
194
195 spin_lock(&tree->lock);
196 rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node);
197 if (rb) {
198 ret = -EEXIST;
199 } else {
200 atomic_inc(&ref->usage);
201 ref->tree = tree;
202 ref->in_tree = 1;
203 list_add_tail(&ref->list, &tree->list);
204 }
205 spin_unlock(&tree->lock);
206 return ret;
207}
208
209/*
210 * remove a single leaf ref from the tree. This drops the ref held by the tree
211 * only
212 */
213int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
214{
215 struct btrfs_leaf_ref_tree *tree;
216
217 if (!xchg(&ref->in_tree, 0))
218 return 0;
219
220 tree = ref->tree;
221 spin_lock(&tree->lock);
222
223 rb_erase(&ref->rb_node, &tree->root);
224 list_del_init(&ref->list);
225
226 spin_unlock(&tree->lock);
227
228 btrfs_free_leaf_ref(root, ref);
229 return 0;
230}
diff --git a/fs/btrfs/ref-cache.h b/fs/btrfs/ref-cache.h
new file mode 100644
index 000000000000..16f3183d7c59
--- /dev/null
+++ b/fs/btrfs/ref-cache.h
@@ -0,0 +1,77 @@
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#ifndef __REFCACHE__
19#define __REFCACHE__
20
21struct btrfs_extent_info {
22 /* bytenr and num_bytes find the extent in the extent allocation tree */
23 u64 bytenr;
24 u64 num_bytes;
25
26 /* objectid and offset find the back reference for the file */
27 u64 objectid;
28 u64 offset;
29};
30
31struct btrfs_leaf_ref {
32 struct rb_node rb_node;
33 struct btrfs_leaf_ref_tree *tree;
34 int in_tree;
35 atomic_t usage;
36
37 u64 root_gen;
38 u64 bytenr;
39 u64 owner;
40 u64 generation;
41 int nritems;
42
43 struct list_head list;
44 struct btrfs_extent_info extents[];
45};
46
47static inline size_t btrfs_leaf_ref_size(int nr_extents)
48{
49 return sizeof(struct btrfs_leaf_ref) +
50 sizeof(struct btrfs_extent_info) * nr_extents;
51}
52
53static inline void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree)
54{
55 tree->root.rb_node = NULL;
56 INIT_LIST_HEAD(&tree->list);
57 spin_lock_init(&tree->lock);
58}
59
60static inline int btrfs_leaf_ref_tree_empty(struct btrfs_leaf_ref_tree *tree)
61{
62 return RB_EMPTY_ROOT(&tree->root);
63}
64
65void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree);
66struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
67 int nr_extents);
68void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
69struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
70 u64 bytenr);
71int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
72 int shared);
73int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
74 int shared);
75int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
76
77#endif
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
new file mode 100644
index 000000000000..b48650de4472
--- /dev/null
+++ b/fs/btrfs/root-tree.c
@@ -0,0 +1,366 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include "ctree.h"
20#include "transaction.h"
21#include "disk-io.h"
22#include "print-tree.h"
23
24/*
25 * search forward for a root, starting with objectid 'search_start'
26 * if a root key is found, the objectid we find is filled into 'found_objectid'
27 * and 0 is returned. < 0 is returned on error, 1 if there is nothing
28 * left in the tree.
29 */
30int btrfs_search_root(struct btrfs_root *root, u64 search_start,
31 u64 *found_objectid)
32{
33 struct btrfs_path *path;
34 struct btrfs_key search_key;
35 int ret;
36
37 root = root->fs_info->tree_root;
38 search_key.objectid = search_start;
39 search_key.type = (u8)-1;
40 search_key.offset = (u64)-1;
41
42 path = btrfs_alloc_path();
43 BUG_ON(!path);
44again:
45 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
46 if (ret < 0)
47 goto out;
48 if (ret == 0) {
49 ret = 1;
50 goto out;
51 }
52 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
53 ret = btrfs_next_leaf(root, path);
54 if (ret)
55 goto out;
56 }
57 btrfs_item_key_to_cpu(path->nodes[0], &search_key, path->slots[0]);
58 if (search_key.type != BTRFS_ROOT_ITEM_KEY) {
59 search_key.offset++;
60 btrfs_release_path(root, path);
61 goto again;
62 }
63 ret = 0;
64 *found_objectid = search_key.objectid;
65
66out:
67 btrfs_free_path(path);
68 return ret;
69}
70
71/*
72 * lookup the root with the highest offset for a given objectid. The key we do
73 * find is copied into 'key'. If we find something return 0, otherwise 1, < 0
74 * on error.
75 */
76int btrfs_find_last_root(struct btrfs_root *root, u64 objectid,
77 struct btrfs_root_item *item, struct btrfs_key *key)
78{
79 struct btrfs_path *path;
80 struct btrfs_key search_key;
81 struct btrfs_key found_key;
82 struct extent_buffer *l;
83 int ret;
84 int slot;
85
86 search_key.objectid = objectid;
87 search_key.type = BTRFS_ROOT_ITEM_KEY;
88 search_key.offset = (u64)-1;
89
90 path = btrfs_alloc_path();
91 BUG_ON(!path);
92 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
93 if (ret < 0)
94 goto out;
95
96 BUG_ON(ret == 0);
97 l = path->nodes[0];
98 BUG_ON(path->slots[0] == 0);
99 slot = path->slots[0] - 1;
100 btrfs_item_key_to_cpu(l, &found_key, slot);
101 if (found_key.objectid != objectid) {
102 ret = 1;
103 goto out;
104 }
105 read_extent_buffer(l, item, btrfs_item_ptr_offset(l, slot),
106 sizeof(*item));
107 memcpy(key, &found_key, sizeof(found_key));
108 ret = 0;
109out:
110 btrfs_free_path(path);
111 return ret;
112}
113
114/*
115 * copy the data in 'item' into the btree
116 */
117int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
118 *root, struct btrfs_key *key, struct btrfs_root_item
119 *item)
120{
121 struct btrfs_path *path;
122 struct extent_buffer *l;
123 int ret;
124 int slot;
125 unsigned long ptr;
126
127 path = btrfs_alloc_path();
128 BUG_ON(!path);
129 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
130 if (ret < 0)
131 goto out;
132
133 if (ret != 0) {
134 btrfs_print_leaf(root, path->nodes[0]);
135 printk(KERN_CRIT "unable to update root key %llu %u %llu\n",
136 (unsigned long long)key->objectid, key->type,
137 (unsigned long long)key->offset);
138 BUG_ON(1);
139 }
140
141 l = path->nodes[0];
142 slot = path->slots[0];
143 ptr = btrfs_item_ptr_offset(l, slot);
144 write_extent_buffer(l, item, ptr, sizeof(*item));
145 btrfs_mark_buffer_dirty(path->nodes[0]);
146out:
147 btrfs_release_path(root, path);
148 btrfs_free_path(path);
149 return ret;
150}
151
152int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root
153 *root, struct btrfs_key *key, struct btrfs_root_item
154 *item)
155{
156 int ret;
157 ret = btrfs_insert_item(trans, root, key, item, sizeof(*item));
158 return ret;
159}
160
161/*
162 * at mount time we want to find all the old transaction snapshots that were in
163 * the process of being deleted if we crashed. This is any root item with an
164 * offset lower than the latest root. They need to be queued for deletion to
165 * finish what was happening when we crashed.
166 */
167int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid,
168 struct btrfs_root *latest)
169{
170 struct btrfs_root *dead_root;
171 struct btrfs_item *item;
172 struct btrfs_root_item *ri;
173 struct btrfs_key key;
174 struct btrfs_key found_key;
175 struct btrfs_path *path;
176 int ret;
177 u32 nritems;
178 struct extent_buffer *leaf;
179 int slot;
180
181 key.objectid = objectid;
182 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
183 key.offset = 0;
184 path = btrfs_alloc_path();
185 if (!path)
186 return -ENOMEM;
187
188again:
189 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
190 if (ret < 0)
191 goto err;
192 while (1) {
193 leaf = path->nodes[0];
194 nritems = btrfs_header_nritems(leaf);
195 slot = path->slots[0];
196 if (slot >= nritems) {
197 ret = btrfs_next_leaf(root, path);
198 if (ret)
199 break;
200 leaf = path->nodes[0];
201 nritems = btrfs_header_nritems(leaf);
202 slot = path->slots[0];
203 }
204 item = btrfs_item_nr(leaf, slot);
205 btrfs_item_key_to_cpu(leaf, &key, slot);
206 if (btrfs_key_type(&key) != BTRFS_ROOT_ITEM_KEY)
207 goto next;
208
209 if (key.objectid < objectid)
210 goto next;
211
212 if (key.objectid > objectid)
213 break;
214
215 ri = btrfs_item_ptr(leaf, slot, struct btrfs_root_item);
216 if (btrfs_disk_root_refs(leaf, ri) != 0)
217 goto next;
218
219 memcpy(&found_key, &key, sizeof(key));
220 key.offset++;
221 btrfs_release_path(root, path);
222 dead_root =
223 btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
224 &found_key);
225 if (IS_ERR(dead_root)) {
226 ret = PTR_ERR(dead_root);
227 goto err;
228 }
229
230 if (objectid == BTRFS_TREE_RELOC_OBJECTID)
231 ret = btrfs_add_dead_reloc_root(dead_root);
232 else
233 ret = btrfs_add_dead_root(dead_root, latest);
234 if (ret)
235 goto err;
236 goto again;
237next:
238 slot++;
239 path->slots[0]++;
240 }
241 ret = 0;
242err:
243 btrfs_free_path(path);
244 return ret;
245}
246
247/* drop the root item for 'key' from 'root' */
248int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
249 struct btrfs_key *key)
250{
251 struct btrfs_path *path;
252 int ret;
253 u32 refs;
254 struct btrfs_root_item *ri;
255 struct extent_buffer *leaf;
256
257 path = btrfs_alloc_path();
258 BUG_ON(!path);
259 ret = btrfs_search_slot(trans, root, key, path, -1, 1);
260 if (ret < 0)
261 goto out;
262
263 BUG_ON(ret != 0);
264 leaf = path->nodes[0];
265 ri = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_item);
266
267 refs = btrfs_disk_root_refs(leaf, ri);
268 BUG_ON(refs != 0);
269 ret = btrfs_del_item(trans, root, path);
270out:
271 btrfs_release_path(root, path);
272 btrfs_free_path(path);
273 return ret;
274}
275
276#if 0 /* this will get used when snapshot deletion is implemented */
277int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
278 struct btrfs_root *tree_root,
279 u64 root_id, u8 type, u64 ref_id)
280{
281 struct btrfs_key key;
282 int ret;
283 struct btrfs_path *path;
284
285 path = btrfs_alloc_path();
286
287 key.objectid = root_id;
288 key.type = type;
289 key.offset = ref_id;
290
291 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
292 BUG_ON(ret);
293
294 ret = btrfs_del_item(trans, tree_root, path);
295 BUG_ON(ret);
296
297 btrfs_free_path(path);
298 return ret;
299}
300#endif
301
302int btrfs_find_root_ref(struct btrfs_root *tree_root,
303 struct btrfs_path *path,
304 u64 root_id, u64 ref_id)
305{
306 struct btrfs_key key;
307 int ret;
308
309 key.objectid = root_id;
310 key.type = BTRFS_ROOT_REF_KEY;
311 key.offset = ref_id;
312
313 ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
314 return ret;
315}
316
317
318/*
319 * add a btrfs_root_ref item. type is either BTRFS_ROOT_REF_KEY
320 * or BTRFS_ROOT_BACKREF_KEY.
321 *
322 * The dirid, sequence, name and name_len refer to the directory entry
323 * that is referencing the root.
324 *
325 * For a forward ref, the root_id is the id of the tree referencing
326 * the root and ref_id is the id of the subvol or snapshot.
327 *
328 * For a back ref the root_id is the id of the subvol or snapshot and
329 * ref_id is the id of the tree referencing it.
330 */
331int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
332 struct btrfs_root *tree_root,
333 u64 root_id, u8 type, u64 ref_id,
334 u64 dirid, u64 sequence,
335 const char *name, int name_len)
336{
337 struct btrfs_key key;
338 int ret;
339 struct btrfs_path *path;
340 struct btrfs_root_ref *ref;
341 struct extent_buffer *leaf;
342 unsigned long ptr;
343
344
345 path = btrfs_alloc_path();
346
347 key.objectid = root_id;
348 key.type = type;
349 key.offset = ref_id;
350
351 ret = btrfs_insert_empty_item(trans, tree_root, path, &key,
352 sizeof(*ref) + name_len);
353 BUG_ON(ret);
354
355 leaf = path->nodes[0];
356 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
357 btrfs_set_root_ref_dirid(leaf, ref, dirid);
358 btrfs_set_root_ref_sequence(leaf, ref, sequence);
359 btrfs_set_root_ref_name_len(leaf, ref, name_len);
360 ptr = (unsigned long)(ref + 1);
361 write_extent_buffer(leaf, name, ptr, name_len);
362 btrfs_mark_buffer_dirty(leaf);
363
364 btrfs_free_path(path);
365 return ret;
366}
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
new file mode 100644
index 000000000000..c0f7ecaf1e79
--- /dev/null
+++ b/fs/btrfs/struct-funcs.c
@@ -0,0 +1,139 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/highmem.h>
20
21/* this is some deeply nasty code. ctree.h has a different
22 * definition for this BTRFS_SETGET_FUNCS macro, behind a #ifndef
23 *
24 * The end result is that anyone who #includes ctree.h gets a
25 * declaration for the btrfs_set_foo functions and btrfs_foo functions
26 *
27 * This file declares the macros and then #includes ctree.h, which results
28 * in cpp creating the function here based on the template below.
29 *
30 * These setget functions do all the extent_buffer related mapping
31 * required to efficiently read and write specific fields in the extent
32 * buffers. Every pointer to metadata items in btrfs is really just
33 * an unsigned long offset into the extent buffer which has been
34 * cast to a specific type. This gives us all the gcc type checking.
35 *
36 * The extent buffer api is used to do all the kmapping and page
37 * spanning work required to get extent buffers in highmem and have
38 * a metadata blocksize different from the page size.
39 *
40 * The macro starts with a simple function prototype declaration so that
41 * sparse won't complain about it being static.
42 */
43
44#define BTRFS_SETGET_FUNCS(name, type, member, bits) \
45u##bits btrfs_##name(struct extent_buffer *eb, type *s); \
46void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val); \
47u##bits btrfs_##name(struct extent_buffer *eb, \
48 type *s) \
49{ \
50 unsigned long part_offset = (unsigned long)s; \
51 unsigned long offset = part_offset + offsetof(type, member); \
52 type *p; \
53 /* ugly, but we want the fast path here */ \
54 if (eb->map_token && offset >= eb->map_start && \
55 offset + sizeof(((type *)0)->member) <= eb->map_start + \
56 eb->map_len) { \
57 p = (type *)(eb->kaddr + part_offset - eb->map_start); \
58 return le##bits##_to_cpu(p->member); \
59 } \
60 { \
61 int err; \
62 char *map_token; \
63 char *kaddr; \
64 int unmap_on_exit = (eb->map_token == NULL); \
65 unsigned long map_start; \
66 unsigned long map_len; \
67 u##bits res; \
68 err = map_extent_buffer(eb, offset, \
69 sizeof(((type *)0)->member), \
70 &map_token, &kaddr, \
71 &map_start, &map_len, KM_USER1); \
72 if (err) { \
73 __le##bits leres; \
74 read_eb_member(eb, s, type, member, &leres); \
75 return le##bits##_to_cpu(leres); \
76 } \
77 p = (type *)(kaddr + part_offset - map_start); \
78 res = le##bits##_to_cpu(p->member); \
79 if (unmap_on_exit) \
80 unmap_extent_buffer(eb, map_token, KM_USER1); \
81 return res; \
82 } \
83} \
84void btrfs_set_##name(struct extent_buffer *eb, \
85 type *s, u##bits val) \
86{ \
87 unsigned long part_offset = (unsigned long)s; \
88 unsigned long offset = part_offset + offsetof(type, member); \
89 type *p; \
90 /* ugly, but we want the fast path here */ \
91 if (eb->map_token && offset >= eb->map_start && \
92 offset + sizeof(((type *)0)->member) <= eb->map_start + \
93 eb->map_len) { \
94 p = (type *)(eb->kaddr + part_offset - eb->map_start); \
95 p->member = cpu_to_le##bits(val); \
96 return; \
97 } \
98 { \
99 int err; \
100 char *map_token; \
101 char *kaddr; \
102 int unmap_on_exit = (eb->map_token == NULL); \
103 unsigned long map_start; \
104 unsigned long map_len; \
105 err = map_extent_buffer(eb, offset, \
106 sizeof(((type *)0)->member), \
107 &map_token, &kaddr, \
108 &map_start, &map_len, KM_USER1); \
109 if (err) { \
110 __le##bits val2; \
111 val2 = cpu_to_le##bits(val); \
112 write_eb_member(eb, s, type, member, &val2); \
113 return; \
114 } \
115 p = (type *)(kaddr + part_offset - map_start); \
116 p->member = cpu_to_le##bits(val); \
117 if (unmap_on_exit) \
118 unmap_extent_buffer(eb, map_token, KM_USER1); \
119 } \
120}
121
122#include "ctree.h"
123
124void btrfs_node_key(struct extent_buffer *eb,
125 struct btrfs_disk_key *disk_key, int nr)
126{
127 unsigned long ptr = btrfs_node_key_ptr_offset(nr);
128 if (eb->map_token && ptr >= eb->map_start &&
129 ptr + sizeof(*disk_key) <= eb->map_start + eb->map_len) {
130 memcpy(disk_key, eb->kaddr + ptr - eb->map_start,
131 sizeof(*disk_key));
132 return;
133 } else if (eb->map_token) {
134 unmap_extent_buffer(eb, eb->map_token, KM_USER1);
135 eb->map_token = NULL;
136 }
137 read_eb_member(eb, (struct btrfs_key_ptr *)ptr,
138 struct btrfs_key_ptr, key, disk_key);
139}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
new file mode 100644
index 000000000000..b4c101d9322c
--- /dev/null
+++ b/fs/btrfs/super.c
@@ -0,0 +1,720 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/blkdev.h>
20#include <linux/module.h>
21#include <linux/buffer_head.h>
22#include <linux/fs.h>
23#include <linux/pagemap.h>
24#include <linux/highmem.h>
25#include <linux/time.h>
26#include <linux/init.h>
27#include <linux/string.h>
28#include <linux/smp_lock.h>
29#include <linux/backing-dev.h>
30#include <linux/mount.h>
31#include <linux/mpage.h>
32#include <linux/swap.h>
33#include <linux/writeback.h>
34#include <linux/statfs.h>
35#include <linux/compat.h>
36#include <linux/parser.h>
37#include <linux/ctype.h>
38#include <linux/namei.h>
39#include <linux/miscdevice.h>
40#include <linux/version.h>
41#include "compat.h"
42#include "ctree.h"
43#include "disk-io.h"
44#include "transaction.h"
45#include "btrfs_inode.h"
46#include "ioctl.h"
47#include "print-tree.h"
48#include "xattr.h"
49#include "volumes.h"
50#include "version.h"
51#include "export.h"
52#include "compression.h"
53
54#define BTRFS_SUPER_MAGIC 0x9123683E
55
56static struct super_operations btrfs_super_ops;
57
58static void btrfs_put_super(struct super_block *sb)
59{
60 struct btrfs_root *root = btrfs_sb(sb);
61 int ret;
62
63 ret = close_ctree(root);
64 sb->s_fs_info = NULL;
65}
66
67enum {
68 Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow,
69 Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier,
70 Opt_ssd, Opt_thread_pool, Opt_noacl, Opt_compress, Opt_err,
71};
72
73static match_table_t tokens = {
74 {Opt_degraded, "degraded"},
75 {Opt_subvol, "subvol=%s"},
76 {Opt_device, "device=%s"},
77 {Opt_nodatasum, "nodatasum"},
78 {Opt_nodatacow, "nodatacow"},
79 {Opt_nobarrier, "nobarrier"},
80 {Opt_max_extent, "max_extent=%s"},
81 {Opt_max_inline, "max_inline=%s"},
82 {Opt_alloc_start, "alloc_start=%s"},
83 {Opt_thread_pool, "thread_pool=%d"},
84 {Opt_compress, "compress"},
85 {Opt_ssd, "ssd"},
86 {Opt_noacl, "noacl"},
87 {Opt_err, NULL},
88};
89
90u64 btrfs_parse_size(char *str)
91{
92 u64 res;
93 int mult = 1;
94 char *end;
95 char last;
96
97 res = simple_strtoul(str, &end, 10);
98
99 last = end[0];
100 if (isalpha(last)) {
101 last = tolower(last);
102 switch (last) {
103 case 'g':
104 mult *= 1024;
105 case 'm':
106 mult *= 1024;
107 case 'k':
108 mult *= 1024;
109 }
110 res = res * mult;
111 }
112 return res;
113}
114
115/*
116 * Regular mount options parser. Everything that is needed only when
117 * reading in a new superblock is parsed here.
118 */
119int btrfs_parse_options(struct btrfs_root *root, char *options)
120{
121 struct btrfs_fs_info *info = root->fs_info;
122 substring_t args[MAX_OPT_ARGS];
123 char *p, *num;
124 int intarg;
125
126 if (!options)
127 return 0;
128
129 /*
130 * strsep changes the string, duplicate it because parse_options
131 * gets called twice
132 */
133 options = kstrdup(options, GFP_NOFS);
134 if (!options)
135 return -ENOMEM;
136
137
138 while ((p = strsep(&options, ",")) != NULL) {
139 int token;
140 if (!*p)
141 continue;
142
143 token = match_token(p, tokens, args);
144 switch (token) {
145 case Opt_degraded:
146 printk(KERN_INFO "btrfs: allowing degraded mounts\n");
147 btrfs_set_opt(info->mount_opt, DEGRADED);
148 break;
149 case Opt_subvol:
150 case Opt_device:
151 /*
152 * These are parsed by btrfs_parse_early_options
153 * and can be happily ignored here.
154 */
155 break;
156 case Opt_nodatasum:
157 printk(KERN_INFO "btrfs: setting nodatacsum\n");
158 btrfs_set_opt(info->mount_opt, NODATASUM);
159 break;
160 case Opt_nodatacow:
161 printk(KERN_INFO "btrfs: setting nodatacow\n");
162 btrfs_set_opt(info->mount_opt, NODATACOW);
163 btrfs_set_opt(info->mount_opt, NODATASUM);
164 break;
165 case Opt_compress:
166 printk(KERN_INFO "btrfs: use compression\n");
167 btrfs_set_opt(info->mount_opt, COMPRESS);
168 break;
169 case Opt_ssd:
170 printk(KERN_INFO "btrfs: use ssd allocation scheme\n");
171 btrfs_set_opt(info->mount_opt, SSD);
172 break;
173 case Opt_nobarrier:
174 printk(KERN_INFO "btrfs: turning off barriers\n");
175 btrfs_set_opt(info->mount_opt, NOBARRIER);
176 break;
177 case Opt_thread_pool:
178 intarg = 0;
179 match_int(&args[0], &intarg);
180 if (intarg) {
181 info->thread_pool_size = intarg;
182 printk(KERN_INFO "btrfs: thread pool %d\n",
183 info->thread_pool_size);
184 }
185 break;
186 case Opt_max_extent:
187 num = match_strdup(&args[0]);
188 if (num) {
189 info->max_extent = btrfs_parse_size(num);
190 kfree(num);
191
192 info->max_extent = max_t(u64,
193 info->max_extent, root->sectorsize);
194 printk(KERN_INFO "btrfs: max_extent at %llu\n",
195 info->max_extent);
196 }
197 break;
198 case Opt_max_inline:
199 num = match_strdup(&args[0]);
200 if (num) {
201 info->max_inline = btrfs_parse_size(num);
202 kfree(num);
203
204 if (info->max_inline) {
205 info->max_inline = max_t(u64,
206 info->max_inline,
207 root->sectorsize);
208 }
209 printk(KERN_INFO "btrfs: max_inline at %llu\n",
210 info->max_inline);
211 }
212 break;
213 case Opt_alloc_start:
214 num = match_strdup(&args[0]);
215 if (num) {
216 info->alloc_start = btrfs_parse_size(num);
217 kfree(num);
218 printk(KERN_INFO
219 "btrfs: allocations start at %llu\n",
220 info->alloc_start);
221 }
222 break;
223 case Opt_noacl:
224 root->fs_info->sb->s_flags &= ~MS_POSIXACL;
225 break;
226 default:
227 break;
228 }
229 }
230 kfree(options);
231 return 0;
232}
233
234/*
235 * Parse mount options that are required early in the mount process.
236 *
237 * All other options will be parsed on much later in the mount process and
238 * only when we need to allocate a new super block.
239 */
240static int btrfs_parse_early_options(const char *options, fmode_t flags,
241 void *holder, char **subvol_name,
242 struct btrfs_fs_devices **fs_devices)
243{
244 substring_t args[MAX_OPT_ARGS];
245 char *opts, *p;
246 int error = 0;
247
248 if (!options)
249 goto out;
250
251 /*
252 * strsep changes the string, duplicate it because parse_options
253 * gets called twice
254 */
255 opts = kstrdup(options, GFP_KERNEL);
256 if (!opts)
257 return -ENOMEM;
258
259 while ((p = strsep(&opts, ",")) != NULL) {
260 int token;
261 if (!*p)
262 continue;
263
264 token = match_token(p, tokens, args);
265 switch (token) {
266 case Opt_subvol:
267 *subvol_name = match_strdup(&args[0]);
268 break;
269 case Opt_device:
270 error = btrfs_scan_one_device(match_strdup(&args[0]),
271 flags, holder, fs_devices);
272 if (error)
273 goto out_free_opts;
274 break;
275 default:
276 break;
277 }
278 }
279
280 out_free_opts:
281 kfree(opts);
282 out:
283 /*
284 * If no subvolume name is specified we use the default one. Allocate
285 * a copy of the string "." here so that code later in the
286 * mount path doesn't care if it's the default volume or another one.
287 */
288 if (!*subvol_name) {
289 *subvol_name = kstrdup(".", GFP_KERNEL);
290 if (!*subvol_name)
291 return -ENOMEM;
292 }
293 return error;
294}
295
296static int btrfs_fill_super(struct super_block *sb,
297 struct btrfs_fs_devices *fs_devices,
298 void *data, int silent)
299{
300 struct inode *inode;
301 struct dentry *root_dentry;
302 struct btrfs_super_block *disk_super;
303 struct btrfs_root *tree_root;
304 struct btrfs_inode *bi;
305 int err;
306
307 sb->s_maxbytes = MAX_LFS_FILESIZE;
308 sb->s_magic = BTRFS_SUPER_MAGIC;
309 sb->s_op = &btrfs_super_ops;
310 sb->s_export_op = &btrfs_export_ops;
311 sb->s_xattr = btrfs_xattr_handlers;
312 sb->s_time_gran = 1;
313 sb->s_flags |= MS_POSIXACL;
314
315 tree_root = open_ctree(sb, fs_devices, (char *)data);
316
317 if (IS_ERR(tree_root)) {
318 printk("btrfs: open_ctree failed\n");
319 return PTR_ERR(tree_root);
320 }
321 sb->s_fs_info = tree_root;
322 disk_super = &tree_root->fs_info->super_copy;
323 inode = btrfs_iget_locked(sb, BTRFS_FIRST_FREE_OBJECTID,
324 tree_root->fs_info->fs_root);
325 bi = BTRFS_I(inode);
326 bi->location.objectid = inode->i_ino;
327 bi->location.offset = 0;
328 bi->root = tree_root->fs_info->fs_root;
329
330 btrfs_set_key_type(&bi->location, BTRFS_INODE_ITEM_KEY);
331
332 if (!inode) {
333 err = -ENOMEM;
334 goto fail_close;
335 }
336 if (inode->i_state & I_NEW) {
337 btrfs_read_locked_inode(inode);
338 unlock_new_inode(inode);
339 }
340
341 root_dentry = d_alloc_root(inode);
342 if (!root_dentry) {
343 iput(inode);
344 err = -ENOMEM;
345 goto fail_close;
346 }
347#if 0
348 /* this does the super kobj at the same time */
349 err = btrfs_sysfs_add_super(tree_root->fs_info);
350 if (err)
351 goto fail_close;
352#endif
353
354 sb->s_root = root_dentry;
355
356 save_mount_options(sb, data);
357 return 0;
358
359fail_close:
360 close_ctree(tree_root);
361 return err;
362}
363
364int btrfs_sync_fs(struct super_block *sb, int wait)
365{
366 struct btrfs_trans_handle *trans;
367 struct btrfs_root *root;
368 int ret;
369 root = btrfs_sb(sb);
370
371 if (sb->s_flags & MS_RDONLY)
372 return 0;
373
374 sb->s_dirt = 0;
375 if (!wait) {
376 filemap_flush(root->fs_info->btree_inode->i_mapping);
377 return 0;
378 }
379
380 btrfs_start_delalloc_inodes(root);
381 btrfs_wait_ordered_extents(root, 0);
382
383 btrfs_clean_old_snapshots(root);
384 trans = btrfs_start_transaction(root, 1);
385 ret = btrfs_commit_transaction(trans, root);
386 sb->s_dirt = 0;
387 return ret;
388}
389
390static void btrfs_write_super(struct super_block *sb)
391{
392 sb->s_dirt = 0;
393}
394
395static int btrfs_test_super(struct super_block *s, void *data)
396{
397 struct btrfs_fs_devices *test_fs_devices = data;
398 struct btrfs_root *root = btrfs_sb(s);
399
400 return root->fs_info->fs_devices == test_fs_devices;
401}
402
403/*
404 * Find a superblock for the given device / mount point.
405 *
406 * Note: This is based on get_sb_bdev from fs/super.c with a few additions
407 * for multiple device setup. Make sure to keep it in sync.
408 */
409static int btrfs_get_sb(struct file_system_type *fs_type, int flags,
410 const char *dev_name, void *data, struct vfsmount *mnt)
411{
412 char *subvol_name = NULL;
413 struct block_device *bdev = NULL;
414 struct super_block *s;
415 struct dentry *root;
416 struct btrfs_fs_devices *fs_devices = NULL;
417 fmode_t mode = FMODE_READ;
418 int error = 0;
419
420 if (!(flags & MS_RDONLY))
421 mode |= FMODE_WRITE;
422
423 error = btrfs_parse_early_options(data, mode, fs_type,
424 &subvol_name, &fs_devices);
425 if (error)
426 return error;
427
428 error = btrfs_scan_one_device(dev_name, mode, fs_type, &fs_devices);
429 if (error)
430 goto error_free_subvol_name;
431
432 error = btrfs_open_devices(fs_devices, mode, fs_type);
433 if (error)
434 goto error_free_subvol_name;
435
436 if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
437 error = -EACCES;
438 goto error_close_devices;
439 }
440
441 bdev = fs_devices->latest_bdev;
442 s = sget(fs_type, btrfs_test_super, set_anon_super, fs_devices);
443 if (IS_ERR(s))
444 goto error_s;
445
446 if (s->s_root) {
447 if ((flags ^ s->s_flags) & MS_RDONLY) {
448 up_write(&s->s_umount);
449 deactivate_super(s);
450 error = -EBUSY;
451 goto error_close_devices;
452 }
453
454 btrfs_close_devices(fs_devices);
455 } else {
456 char b[BDEVNAME_SIZE];
457
458 s->s_flags = flags;
459 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
460 error = btrfs_fill_super(s, fs_devices, data,
461 flags & MS_SILENT ? 1 : 0);
462 if (error) {
463 up_write(&s->s_umount);
464 deactivate_super(s);
465 goto error_free_subvol_name;
466 }
467
468 btrfs_sb(s)->fs_info->bdev_holder = fs_type;
469 s->s_flags |= MS_ACTIVE;
470 }
471
472 if (!strcmp(subvol_name, "."))
473 root = dget(s->s_root);
474 else {
475 mutex_lock(&s->s_root->d_inode->i_mutex);
476 root = lookup_one_len(subvol_name, s->s_root,
477 strlen(subvol_name));
478 mutex_unlock(&s->s_root->d_inode->i_mutex);
479
480 if (IS_ERR(root)) {
481 up_write(&s->s_umount);
482 deactivate_super(s);
483 error = PTR_ERR(root);
484 goto error_free_subvol_name;
485 }
486 if (!root->d_inode) {
487 dput(root);
488 up_write(&s->s_umount);
489 deactivate_super(s);
490 error = -ENXIO;
491 goto error_free_subvol_name;
492 }
493 }
494
495 mnt->mnt_sb = s;
496 mnt->mnt_root = root;
497
498 kfree(subvol_name);
499 return 0;
500
501error_s:
502 error = PTR_ERR(s);
503error_close_devices:
504 btrfs_close_devices(fs_devices);
505error_free_subvol_name:
506 kfree(subvol_name);
507 return error;
508}
509
510static int btrfs_remount(struct super_block *sb, int *flags, char *data)
511{
512 struct btrfs_root *root = btrfs_sb(sb);
513 int ret;
514
515 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
516 return 0;
517
518 if (*flags & MS_RDONLY) {
519 sb->s_flags |= MS_RDONLY;
520
521 ret = btrfs_commit_super(root);
522 WARN_ON(ret);
523 } else {
524 if (root->fs_info->fs_devices->rw_devices == 0)
525 return -EACCES;
526
527 if (btrfs_super_log_root(&root->fs_info->super_copy) != 0)
528 return -EINVAL;
529
530 ret = btrfs_cleanup_reloc_trees(root);
531 WARN_ON(ret);
532
533 ret = btrfs_cleanup_fs_roots(root->fs_info);
534 WARN_ON(ret);
535
536 sb->s_flags &= ~MS_RDONLY;
537 }
538
539 return 0;
540}
541
542static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
543{
544 struct btrfs_root *root = btrfs_sb(dentry->d_sb);
545 struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
546 int bits = dentry->d_sb->s_blocksize_bits;
547 __be32 *fsid = (__be32 *)root->fs_info->fsid;
548
549 buf->f_namelen = BTRFS_NAME_LEN;
550 buf->f_blocks = btrfs_super_total_bytes(disk_super) >> bits;
551 buf->f_bfree = buf->f_blocks -
552 (btrfs_super_bytes_used(disk_super) >> bits);
553 buf->f_bavail = buf->f_bfree;
554 buf->f_bsize = dentry->d_sb->s_blocksize;
555 buf->f_type = BTRFS_SUPER_MAGIC;
556
557 /* We treat it as constant endianness (it doesn't matter _which_)
558 because we want the fsid to come out the same whether mounted
559 on a big-endian or little-endian host */
560 buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]);
561 buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]);
562 /* Mask in the root object ID too, to disambiguate subvols */
563 buf->f_fsid.val[0] ^= BTRFS_I(dentry->d_inode)->root->objectid >> 32;
564 buf->f_fsid.val[1] ^= BTRFS_I(dentry->d_inode)->root->objectid;
565
566 return 0;
567}
568
569static struct file_system_type btrfs_fs_type = {
570 .owner = THIS_MODULE,
571 .name = "btrfs",
572 .get_sb = btrfs_get_sb,
573 .kill_sb = kill_anon_super,
574 .fs_flags = FS_REQUIRES_DEV,
575};
576
577/*
578 * used by btrfsctl to scan devices when no FS is mounted
579 */
580static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
581 unsigned long arg)
582{
583 struct btrfs_ioctl_vol_args *vol;
584 struct btrfs_fs_devices *fs_devices;
585 int ret = 0;
586 int len;
587
588 if (!capable(CAP_SYS_ADMIN))
589 return -EPERM;
590
591 vol = kmalloc(sizeof(*vol), GFP_KERNEL);
592 if (copy_from_user(vol, (void __user *)arg, sizeof(*vol))) {
593 ret = -EFAULT;
594 goto out;
595 }
596 len = strnlen(vol->name, BTRFS_PATH_NAME_MAX);
597 switch (cmd) {
598 case BTRFS_IOC_SCAN_DEV:
599 ret = btrfs_scan_one_device(vol->name, FMODE_READ,
600 &btrfs_fs_type, &fs_devices);
601 break;
602 }
603out:
604 kfree(vol);
605 return ret;
606}
607
608static void btrfs_write_super_lockfs(struct super_block *sb)
609{
610 struct btrfs_root *root = btrfs_sb(sb);
611 mutex_lock(&root->fs_info->transaction_kthread_mutex);
612 mutex_lock(&root->fs_info->cleaner_mutex);
613}
614
615static void btrfs_unlockfs(struct super_block *sb)
616{
617 struct btrfs_root *root = btrfs_sb(sb);
618 mutex_unlock(&root->fs_info->cleaner_mutex);
619 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
620}
621
622static struct super_operations btrfs_super_ops = {
623 .delete_inode = btrfs_delete_inode,
624 .put_super = btrfs_put_super,
625 .write_super = btrfs_write_super,
626 .sync_fs = btrfs_sync_fs,
627 .show_options = generic_show_options,
628 .write_inode = btrfs_write_inode,
629 .dirty_inode = btrfs_dirty_inode,
630 .alloc_inode = btrfs_alloc_inode,
631 .destroy_inode = btrfs_destroy_inode,
632 .statfs = btrfs_statfs,
633 .remount_fs = btrfs_remount,
634 .write_super_lockfs = btrfs_write_super_lockfs,
635 .unlockfs = btrfs_unlockfs,
636};
637
638static const struct file_operations btrfs_ctl_fops = {
639 .unlocked_ioctl = btrfs_control_ioctl,
640 .compat_ioctl = btrfs_control_ioctl,
641 .owner = THIS_MODULE,
642};
643
644static struct miscdevice btrfs_misc = {
645 .minor = MISC_DYNAMIC_MINOR,
646 .name = "btrfs-control",
647 .fops = &btrfs_ctl_fops
648};
649
650static int btrfs_interface_init(void)
651{
652 return misc_register(&btrfs_misc);
653}
654
655static void btrfs_interface_exit(void)
656{
657 if (misc_deregister(&btrfs_misc) < 0)
658 printk(KERN_INFO "misc_deregister failed for control device");
659}
660
661static int __init init_btrfs_fs(void)
662{
663 int err;
664
665 err = btrfs_init_sysfs();
666 if (err)
667 return err;
668
669 err = btrfs_init_cachep();
670 if (err)
671 goto free_sysfs;
672
673 err = extent_io_init();
674 if (err)
675 goto free_cachep;
676
677 err = extent_map_init();
678 if (err)
679 goto free_extent_io;
680
681 err = btrfs_interface_init();
682 if (err)
683 goto free_extent_map;
684
685 err = register_filesystem(&btrfs_fs_type);
686 if (err)
687 goto unregister_ioctl;
688
689 printk(KERN_INFO "%s loaded\n", BTRFS_BUILD_VERSION);
690 return 0;
691
692unregister_ioctl:
693 btrfs_interface_exit();
694free_extent_map:
695 extent_map_exit();
696free_extent_io:
697 extent_io_exit();
698free_cachep:
699 btrfs_destroy_cachep();
700free_sysfs:
701 btrfs_exit_sysfs();
702 return err;
703}
704
705static void __exit exit_btrfs_fs(void)
706{
707 btrfs_destroy_cachep();
708 extent_map_exit();
709 extent_io_exit();
710 btrfs_interface_exit();
711 unregister_filesystem(&btrfs_fs_type);
712 btrfs_exit_sysfs();
713 btrfs_cleanup_fs_uuids();
714 btrfs_zlib_exit();
715}
716
717module_init(init_btrfs_fs)
718module_exit(exit_btrfs_fs)
719
720MODULE_LICENSE("GPL");
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
new file mode 100644
index 000000000000..a240b6fa81df
--- /dev/null
+++ b/fs/btrfs/sysfs.c
@@ -0,0 +1,269 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/spinlock.h>
22#include <linux/completion.h>
23#include <linux/buffer_head.h>
24#include <linux/module.h>
25#include <linux/kobject.h>
26
27#include "ctree.h"
28#include "disk-io.h"
29#include "transaction.h"
30
31static ssize_t root_blocks_used_show(struct btrfs_root *root, char *buf)
32{
33 return snprintf(buf, PAGE_SIZE, "%llu\n",
34 (unsigned long long)btrfs_root_used(&root->root_item));
35}
36
37static ssize_t root_block_limit_show(struct btrfs_root *root, char *buf)
38{
39 return snprintf(buf, PAGE_SIZE, "%llu\n",
40 (unsigned long long)btrfs_root_limit(&root->root_item));
41}
42
43static ssize_t super_blocks_used_show(struct btrfs_fs_info *fs, char *buf)
44{
45
46 return snprintf(buf, PAGE_SIZE, "%llu\n",
47 (unsigned long long)btrfs_super_bytes_used(&fs->super_copy));
48}
49
50static ssize_t super_total_blocks_show(struct btrfs_fs_info *fs, char *buf)
51{
52 return snprintf(buf, PAGE_SIZE, "%llu\n",
53 (unsigned long long)btrfs_super_total_bytes(&fs->super_copy));
54}
55
56static ssize_t super_blocksize_show(struct btrfs_fs_info *fs, char *buf)
57{
58 return snprintf(buf, PAGE_SIZE, "%llu\n",
59 (unsigned long long)btrfs_super_sectorsize(&fs->super_copy));
60}
61
62/* this is for root attrs (subvols/snapshots) */
63struct btrfs_root_attr {
64 struct attribute attr;
65 ssize_t (*show)(struct btrfs_root *, char *);
66 ssize_t (*store)(struct btrfs_root *, const char *, size_t);
67};
68
69#define ROOT_ATTR(name, mode, show, store) \
70static struct btrfs_root_attr btrfs_root_attr_##name = __ATTR(name, mode, \
71 show, store)
72
73ROOT_ATTR(blocks_used, 0444, root_blocks_used_show, NULL);
74ROOT_ATTR(block_limit, 0644, root_block_limit_show, NULL);
75
76static struct attribute *btrfs_root_attrs[] = {
77 &btrfs_root_attr_blocks_used.attr,
78 &btrfs_root_attr_block_limit.attr,
79 NULL,
80};
81
82/* this is for super attrs (actual full fs) */
83struct btrfs_super_attr {
84 struct attribute attr;
85 ssize_t (*show)(struct btrfs_fs_info *, char *);
86 ssize_t (*store)(struct btrfs_fs_info *, const char *, size_t);
87};
88
89#define SUPER_ATTR(name, mode, show, store) \
90static struct btrfs_super_attr btrfs_super_attr_##name = __ATTR(name, mode, \
91 show, store)
92
93SUPER_ATTR(blocks_used, 0444, super_blocks_used_show, NULL);
94SUPER_ATTR(total_blocks, 0444, super_total_blocks_show, NULL);
95SUPER_ATTR(blocksize, 0444, super_blocksize_show, NULL);
96
97static struct attribute *btrfs_super_attrs[] = {
98 &btrfs_super_attr_blocks_used.attr,
99 &btrfs_super_attr_total_blocks.attr,
100 &btrfs_super_attr_blocksize.attr,
101 NULL,
102};
103
104static ssize_t btrfs_super_attr_show(struct kobject *kobj,
105 struct attribute *attr, char *buf)
106{
107 struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
108 super_kobj);
109 struct btrfs_super_attr *a = container_of(attr,
110 struct btrfs_super_attr,
111 attr);
112
113 return a->show ? a->show(fs, buf) : 0;
114}
115
116static ssize_t btrfs_super_attr_store(struct kobject *kobj,
117 struct attribute *attr,
118 const char *buf, size_t len)
119{
120 struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
121 super_kobj);
122 struct btrfs_super_attr *a = container_of(attr,
123 struct btrfs_super_attr,
124 attr);
125
126 return a->store ? a->store(fs, buf, len) : 0;
127}
128
129static ssize_t btrfs_root_attr_show(struct kobject *kobj,
130 struct attribute *attr, char *buf)
131{
132 struct btrfs_root *root = container_of(kobj, struct btrfs_root,
133 root_kobj);
134 struct btrfs_root_attr *a = container_of(attr,
135 struct btrfs_root_attr,
136 attr);
137
138 return a->show ? a->show(root, buf) : 0;
139}
140
141static ssize_t btrfs_root_attr_store(struct kobject *kobj,
142 struct attribute *attr,
143 const char *buf, size_t len)
144{
145 struct btrfs_root *root = container_of(kobj, struct btrfs_root,
146 root_kobj);
147 struct btrfs_root_attr *a = container_of(attr,
148 struct btrfs_root_attr,
149 attr);
150 return a->store ? a->store(root, buf, len) : 0;
151}
152
153static void btrfs_super_release(struct kobject *kobj)
154{
155 struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
156 super_kobj);
157 complete(&fs->kobj_unregister);
158}
159
160static void btrfs_root_release(struct kobject *kobj)
161{
162 struct btrfs_root *root = container_of(kobj, struct btrfs_root,
163 root_kobj);
164 complete(&root->kobj_unregister);
165}
166
167static struct sysfs_ops btrfs_super_attr_ops = {
168 .show = btrfs_super_attr_show,
169 .store = btrfs_super_attr_store,
170};
171
172static struct sysfs_ops btrfs_root_attr_ops = {
173 .show = btrfs_root_attr_show,
174 .store = btrfs_root_attr_store,
175};
176
177static struct kobj_type btrfs_root_ktype = {
178 .default_attrs = btrfs_root_attrs,
179 .sysfs_ops = &btrfs_root_attr_ops,
180 .release = btrfs_root_release,
181};
182
183static struct kobj_type btrfs_super_ktype = {
184 .default_attrs = btrfs_super_attrs,
185 .sysfs_ops = &btrfs_super_attr_ops,
186 .release = btrfs_super_release,
187};
188
189/* /sys/fs/btrfs/ entry */
190static struct kset *btrfs_kset;
191
192int btrfs_sysfs_add_super(struct btrfs_fs_info *fs)
193{
194 int error;
195 char *name;
196 char c;
197 int len = strlen(fs->sb->s_id) + 1;
198 int i;
199
200 name = kmalloc(len, GFP_NOFS);
201 if (!name) {
202 error = -ENOMEM;
203 goto fail;
204 }
205
206 for (i = 0; i < len; i++) {
207 c = fs->sb->s_id[i];
208 if (c == '/' || c == '\\')
209 c = '!';
210 name[i] = c;
211 }
212 name[len] = '\0';
213
214 fs->super_kobj.kset = btrfs_kset;
215 error = kobject_init_and_add(&fs->super_kobj, &btrfs_super_ktype,
216 NULL, "%s", name);
217 kfree(name);
218 if (error)
219 goto fail;
220
221 return 0;
222
223fail:
224 printk(KERN_ERR "btrfs: sysfs creation for super failed\n");
225 return error;
226}
227
228int btrfs_sysfs_add_root(struct btrfs_root *root)
229{
230 int error;
231
232 error = kobject_init_and_add(&root->root_kobj, &btrfs_root_ktype,
233 &root->fs_info->super_kobj,
234 "%s", root->name);
235 if (error)
236 goto fail;
237
238 return 0;
239
240fail:
241 printk(KERN_ERR "btrfs: sysfs creation for root failed\n");
242 return error;
243}
244
245void btrfs_sysfs_del_root(struct btrfs_root *root)
246{
247 kobject_put(&root->root_kobj);
248 wait_for_completion(&root->kobj_unregister);
249}
250
251void btrfs_sysfs_del_super(struct btrfs_fs_info *fs)
252{
253 kobject_put(&fs->super_kobj);
254 wait_for_completion(&fs->kobj_unregister);
255}
256
257int btrfs_init_sysfs(void)
258{
259 btrfs_kset = kset_create_and_add("btrfs", NULL, fs_kobj);
260 if (!btrfs_kset)
261 return -ENOMEM;
262 return 0;
263}
264
265void btrfs_exit_sysfs(void)
266{
267 kset_unregister(btrfs_kset);
268}
269
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
new file mode 100644
index 000000000000..8a08f9443340
--- /dev/null
+++ b/fs/btrfs/transaction.c
@@ -0,0 +1,1097 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/fs.h>
20#include <linux/sched.h>
21#include <linux/writeback.h>
22#include <linux/pagemap.h>
23#include <linux/blkdev.h>
24#include "ctree.h"
25#include "disk-io.h"
26#include "transaction.h"
27#include "locking.h"
28#include "ref-cache.h"
29#include "tree-log.h"
30
31#define BTRFS_ROOT_TRANS_TAG 0
32
33static noinline void put_transaction(struct btrfs_transaction *transaction)
34{
35 WARN_ON(transaction->use_count == 0);
36 transaction->use_count--;
37 if (transaction->use_count == 0) {
38 list_del_init(&transaction->list);
39 memset(transaction, 0, sizeof(*transaction));
40 kmem_cache_free(btrfs_transaction_cachep, transaction);
41 }
42}
43
44/*
45 * either allocate a new transaction or hop into the existing one
46 */
47static noinline int join_transaction(struct btrfs_root *root)
48{
49 struct btrfs_transaction *cur_trans;
50 cur_trans = root->fs_info->running_transaction;
51 if (!cur_trans) {
52 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
53 GFP_NOFS);
54 BUG_ON(!cur_trans);
55 root->fs_info->generation++;
56 root->fs_info->last_alloc = 0;
57 root->fs_info->last_data_alloc = 0;
58 cur_trans->num_writers = 1;
59 cur_trans->num_joined = 0;
60 cur_trans->transid = root->fs_info->generation;
61 init_waitqueue_head(&cur_trans->writer_wait);
62 init_waitqueue_head(&cur_trans->commit_wait);
63 cur_trans->in_commit = 0;
64 cur_trans->blocked = 0;
65 cur_trans->use_count = 1;
66 cur_trans->commit_done = 0;
67 cur_trans->start_time = get_seconds();
68 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
69 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
70 extent_io_tree_init(&cur_trans->dirty_pages,
71 root->fs_info->btree_inode->i_mapping,
72 GFP_NOFS);
73 spin_lock(&root->fs_info->new_trans_lock);
74 root->fs_info->running_transaction = cur_trans;
75 spin_unlock(&root->fs_info->new_trans_lock);
76 } else {
77 cur_trans->num_writers++;
78 cur_trans->num_joined++;
79 }
80
81 return 0;
82}
83
84/*
85 * this does all the record keeping required to make sure that a reference
86 * counted root is properly recorded in a given transaction. This is required
87 * to make sure the old root from before we joined the transaction is deleted
88 * when the transaction commits
89 */
90noinline int btrfs_record_root_in_trans(struct btrfs_root *root)
91{
92 struct btrfs_dirty_root *dirty;
93 u64 running_trans_id = root->fs_info->running_transaction->transid;
94 if (root->ref_cows && root->last_trans < running_trans_id) {
95 WARN_ON(root == root->fs_info->extent_root);
96 if (root->root_item.refs != 0) {
97 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
98 (unsigned long)root->root_key.objectid,
99 BTRFS_ROOT_TRANS_TAG);
100
101 dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
102 BUG_ON(!dirty);
103 dirty->root = kmalloc(sizeof(*dirty->root), GFP_NOFS);
104 BUG_ON(!dirty->root);
105 dirty->latest_root = root;
106 INIT_LIST_HEAD(&dirty->list);
107
108 root->commit_root = btrfs_root_node(root);
109
110 memcpy(dirty->root, root, sizeof(*root));
111 spin_lock_init(&dirty->root->node_lock);
112 spin_lock_init(&dirty->root->list_lock);
113 mutex_init(&dirty->root->objectid_mutex);
114 mutex_init(&dirty->root->log_mutex);
115 INIT_LIST_HEAD(&dirty->root->dead_list);
116 dirty->root->node = root->commit_root;
117 dirty->root->commit_root = NULL;
118
119 spin_lock(&root->list_lock);
120 list_add(&dirty->root->dead_list, &root->dead_list);
121 spin_unlock(&root->list_lock);
122
123 root->dirty_root = dirty;
124 } else {
125 WARN_ON(1);
126 }
127 root->last_trans = running_trans_id;
128 }
129 return 0;
130}
131
132/* wait for commit against the current transaction to become unblocked
133 * when this is done, it is safe to start a new transaction, but the current
134 * transaction might not be fully on disk.
135 */
136static void wait_current_trans(struct btrfs_root *root)
137{
138 struct btrfs_transaction *cur_trans;
139
140 cur_trans = root->fs_info->running_transaction;
141 if (cur_trans && cur_trans->blocked) {
142 DEFINE_WAIT(wait);
143 cur_trans->use_count++;
144 while (1) {
145 prepare_to_wait(&root->fs_info->transaction_wait, &wait,
146 TASK_UNINTERRUPTIBLE);
147 if (cur_trans->blocked) {
148 mutex_unlock(&root->fs_info->trans_mutex);
149 schedule();
150 mutex_lock(&root->fs_info->trans_mutex);
151 finish_wait(&root->fs_info->transaction_wait,
152 &wait);
153 } else {
154 finish_wait(&root->fs_info->transaction_wait,
155 &wait);
156 break;
157 }
158 }
159 put_transaction(cur_trans);
160 }
161}
162
163static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
164 int num_blocks, int wait)
165{
166 struct btrfs_trans_handle *h =
167 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
168 int ret;
169
170 mutex_lock(&root->fs_info->trans_mutex);
171 if (!root->fs_info->log_root_recovering &&
172 ((wait == 1 && !root->fs_info->open_ioctl_trans) || wait == 2))
173 wait_current_trans(root);
174 ret = join_transaction(root);
175 BUG_ON(ret);
176
177 btrfs_record_root_in_trans(root);
178 h->transid = root->fs_info->running_transaction->transid;
179 h->transaction = root->fs_info->running_transaction;
180 h->blocks_reserved = num_blocks;
181 h->blocks_used = 0;
182 h->block_group = 0;
183 h->alloc_exclude_nr = 0;
184 h->alloc_exclude_start = 0;
185 root->fs_info->running_transaction->use_count++;
186 mutex_unlock(&root->fs_info->trans_mutex);
187 return h;
188}
189
190struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
191 int num_blocks)
192{
193 return start_transaction(root, num_blocks, 1);
194}
195struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
196 int num_blocks)
197{
198 return start_transaction(root, num_blocks, 0);
199}
200
201struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
202 int num_blocks)
203{
204 return start_transaction(r, num_blocks, 2);
205}
206
207/* wait for a transaction commit to be fully complete */
208static noinline int wait_for_commit(struct btrfs_root *root,
209 struct btrfs_transaction *commit)
210{
211 DEFINE_WAIT(wait);
212 mutex_lock(&root->fs_info->trans_mutex);
213 while (!commit->commit_done) {
214 prepare_to_wait(&commit->commit_wait, &wait,
215 TASK_UNINTERRUPTIBLE);
216 if (commit->commit_done)
217 break;
218 mutex_unlock(&root->fs_info->trans_mutex);
219 schedule();
220 mutex_lock(&root->fs_info->trans_mutex);
221 }
222 mutex_unlock(&root->fs_info->trans_mutex);
223 finish_wait(&commit->commit_wait, &wait);
224 return 0;
225}
226
227/*
228 * rate limit against the drop_snapshot code. This helps to slow down new
229 * operations if the drop_snapshot code isn't able to keep up.
230 */
231static void throttle_on_drops(struct btrfs_root *root)
232{
233 struct btrfs_fs_info *info = root->fs_info;
234 int harder_count = 0;
235
236harder:
237 if (atomic_read(&info->throttles)) {
238 DEFINE_WAIT(wait);
239 int thr;
240 thr = atomic_read(&info->throttle_gen);
241
242 do {
243 prepare_to_wait(&info->transaction_throttle,
244 &wait, TASK_UNINTERRUPTIBLE);
245 if (!atomic_read(&info->throttles)) {
246 finish_wait(&info->transaction_throttle, &wait);
247 break;
248 }
249 schedule();
250 finish_wait(&info->transaction_throttle, &wait);
251 } while (thr == atomic_read(&info->throttle_gen));
252 harder_count++;
253
254 if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
255 harder_count < 2)
256 goto harder;
257
258 if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
259 harder_count < 10)
260 goto harder;
261
262 if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
263 harder_count < 20)
264 goto harder;
265 }
266}
267
268void btrfs_throttle(struct btrfs_root *root)
269{
270 mutex_lock(&root->fs_info->trans_mutex);
271 if (!root->fs_info->open_ioctl_trans)
272 wait_current_trans(root);
273 mutex_unlock(&root->fs_info->trans_mutex);
274
275 throttle_on_drops(root);
276}
277
278static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
279 struct btrfs_root *root, int throttle)
280{
281 struct btrfs_transaction *cur_trans;
282 struct btrfs_fs_info *info = root->fs_info;
283
284 mutex_lock(&info->trans_mutex);
285 cur_trans = info->running_transaction;
286 WARN_ON(cur_trans != trans->transaction);
287 WARN_ON(cur_trans->num_writers < 1);
288 cur_trans->num_writers--;
289
290 if (waitqueue_active(&cur_trans->writer_wait))
291 wake_up(&cur_trans->writer_wait);
292 put_transaction(cur_trans);
293 mutex_unlock(&info->trans_mutex);
294 memset(trans, 0, sizeof(*trans));
295 kmem_cache_free(btrfs_trans_handle_cachep, trans);
296
297 if (throttle)
298 throttle_on_drops(root);
299
300 return 0;
301}
302
303int btrfs_end_transaction(struct btrfs_trans_handle *trans,
304 struct btrfs_root *root)
305{
306 return __btrfs_end_transaction(trans, root, 0);
307}
308
309int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
310 struct btrfs_root *root)
311{
312 return __btrfs_end_transaction(trans, root, 1);
313}
314
315/*
316 * when btree blocks are allocated, they have some corresponding bits set for
317 * them in one of two extent_io trees. This is used to make sure all of
318 * those extents are on disk for transaction or log commit
319 */
320int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
321 struct extent_io_tree *dirty_pages)
322{
323 int ret;
324 int err = 0;
325 int werr = 0;
326 struct page *page;
327 struct inode *btree_inode = root->fs_info->btree_inode;
328 u64 start = 0;
329 u64 end;
330 unsigned long index;
331
332 while (1) {
333 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
334 EXTENT_DIRTY);
335 if (ret)
336 break;
337 while (start <= end) {
338 cond_resched();
339
340 index = start >> PAGE_CACHE_SHIFT;
341 start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
342 page = find_get_page(btree_inode->i_mapping, index);
343 if (!page)
344 continue;
345
346 btree_lock_page_hook(page);
347 if (!page->mapping) {
348 unlock_page(page);
349 page_cache_release(page);
350 continue;
351 }
352
353 if (PageWriteback(page)) {
354 if (PageDirty(page))
355 wait_on_page_writeback(page);
356 else {
357 unlock_page(page);
358 page_cache_release(page);
359 continue;
360 }
361 }
362 err = write_one_page(page, 0);
363 if (err)
364 werr = err;
365 page_cache_release(page);
366 }
367 }
368 while (1) {
369 ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
370 EXTENT_DIRTY);
371 if (ret)
372 break;
373
374 clear_extent_dirty(dirty_pages, start, end, GFP_NOFS);
375 while (start <= end) {
376 index = start >> PAGE_CACHE_SHIFT;
377 start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
378 page = find_get_page(btree_inode->i_mapping, index);
379 if (!page)
380 continue;
381 if (PageDirty(page)) {
382 btree_lock_page_hook(page);
383 wait_on_page_writeback(page);
384 err = write_one_page(page, 0);
385 if (err)
386 werr = err;
387 }
388 wait_on_page_writeback(page);
389 page_cache_release(page);
390 cond_resched();
391 }
392 }
393 if (err)
394 werr = err;
395 return werr;
396}
397
398int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
399 struct btrfs_root *root)
400{
401 if (!trans || !trans->transaction) {
402 struct inode *btree_inode;
403 btree_inode = root->fs_info->btree_inode;
404 return filemap_write_and_wait(btree_inode->i_mapping);
405 }
406 return btrfs_write_and_wait_marked_extents(root,
407 &trans->transaction->dirty_pages);
408}
409
410/*
411 * this is used to update the root pointer in the tree of tree roots.
412 *
413 * But, in the case of the extent allocation tree, updating the root
414 * pointer may allocate blocks which may change the root of the extent
415 * allocation tree.
416 *
417 * So, this loops and repeats and makes sure the cowonly root didn't
418 * change while the root pointer was being updated in the metadata.
419 */
420static int update_cowonly_root(struct btrfs_trans_handle *trans,
421 struct btrfs_root *root)
422{
423 int ret;
424 u64 old_root_bytenr;
425 struct btrfs_root *tree_root = root->fs_info->tree_root;
426
427 btrfs_extent_post_op(trans, root);
428 btrfs_write_dirty_block_groups(trans, root);
429 btrfs_extent_post_op(trans, root);
430
431 while (1) {
432 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
433 if (old_root_bytenr == root->node->start)
434 break;
435 btrfs_set_root_bytenr(&root->root_item,
436 root->node->start);
437 btrfs_set_root_level(&root->root_item,
438 btrfs_header_level(root->node));
439 btrfs_set_root_generation(&root->root_item, trans->transid);
440
441 btrfs_extent_post_op(trans, root);
442
443 ret = btrfs_update_root(trans, tree_root,
444 &root->root_key,
445 &root->root_item);
446 BUG_ON(ret);
447 btrfs_write_dirty_block_groups(trans, root);
448 btrfs_extent_post_op(trans, root);
449 }
450 return 0;
451}
452
453/*
454 * update all the cowonly tree roots on disk
455 */
456int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
457 struct btrfs_root *root)
458{
459 struct btrfs_fs_info *fs_info = root->fs_info;
460 struct list_head *next;
461 struct extent_buffer *eb;
462
463 btrfs_extent_post_op(trans, fs_info->tree_root);
464
465 eb = btrfs_lock_root_node(fs_info->tree_root);
466 btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb, 0);
467 btrfs_tree_unlock(eb);
468 free_extent_buffer(eb);
469
470 btrfs_extent_post_op(trans, fs_info->tree_root);
471
472 while (!list_empty(&fs_info->dirty_cowonly_roots)) {
473 next = fs_info->dirty_cowonly_roots.next;
474 list_del_init(next);
475 root = list_entry(next, struct btrfs_root, dirty_list);
476
477 update_cowonly_root(trans, root);
478 }
479 return 0;
480}
481
482/*
483 * dead roots are old snapshots that need to be deleted. This allocates
484 * a dirty root struct and adds it into the list of dead roots that need to
485 * be deleted
486 */
487int btrfs_add_dead_root(struct btrfs_root *root, struct btrfs_root *latest)
488{
489 struct btrfs_dirty_root *dirty;
490
491 dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
492 if (!dirty)
493 return -ENOMEM;
494 dirty->root = root;
495 dirty->latest_root = latest;
496
497 mutex_lock(&root->fs_info->trans_mutex);
498 list_add(&dirty->list, &latest->fs_info->dead_roots);
499 mutex_unlock(&root->fs_info->trans_mutex);
500 return 0;
501}
502
503/*
504 * at transaction commit time we need to schedule the old roots for
505 * deletion via btrfs_drop_snapshot. This runs through all the
506 * reference counted roots that were modified in the current
507 * transaction and puts them into the drop list
508 */
509static noinline int add_dirty_roots(struct btrfs_trans_handle *trans,
510 struct radix_tree_root *radix,
511 struct list_head *list)
512{
513 struct btrfs_dirty_root *dirty;
514 struct btrfs_root *gang[8];
515 struct btrfs_root *root;
516 int i;
517 int ret;
518 int err = 0;
519 u32 refs;
520
521 while (1) {
522 ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0,
523 ARRAY_SIZE(gang),
524 BTRFS_ROOT_TRANS_TAG);
525 if (ret == 0)
526 break;
527 for (i = 0; i < ret; i++) {
528 root = gang[i];
529 radix_tree_tag_clear(radix,
530 (unsigned long)root->root_key.objectid,
531 BTRFS_ROOT_TRANS_TAG);
532
533 BUG_ON(!root->ref_tree);
534 dirty = root->dirty_root;
535
536 btrfs_free_log(trans, root);
537 btrfs_free_reloc_root(trans, root);
538
539 if (root->commit_root == root->node) {
540 WARN_ON(root->node->start !=
541 btrfs_root_bytenr(&root->root_item));
542
543 free_extent_buffer(root->commit_root);
544 root->commit_root = NULL;
545 root->dirty_root = NULL;
546
547 spin_lock(&root->list_lock);
548 list_del_init(&dirty->root->dead_list);
549 spin_unlock(&root->list_lock);
550
551 kfree(dirty->root);
552 kfree(dirty);
553
554 /* make sure to update the root on disk
555 * so we get any updates to the block used
556 * counts
557 */
558 err = btrfs_update_root(trans,
559 root->fs_info->tree_root,
560 &root->root_key,
561 &root->root_item);
562 continue;
563 }
564
565 memset(&root->root_item.drop_progress, 0,
566 sizeof(struct btrfs_disk_key));
567 root->root_item.drop_level = 0;
568 root->commit_root = NULL;
569 root->dirty_root = NULL;
570 root->root_key.offset = root->fs_info->generation;
571 btrfs_set_root_bytenr(&root->root_item,
572 root->node->start);
573 btrfs_set_root_level(&root->root_item,
574 btrfs_header_level(root->node));
575 btrfs_set_root_generation(&root->root_item,
576 root->root_key.offset);
577
578 err = btrfs_insert_root(trans, root->fs_info->tree_root,
579 &root->root_key,
580 &root->root_item);
581 if (err)
582 break;
583
584 refs = btrfs_root_refs(&dirty->root->root_item);
585 btrfs_set_root_refs(&dirty->root->root_item, refs - 1);
586 err = btrfs_update_root(trans, root->fs_info->tree_root,
587 &dirty->root->root_key,
588 &dirty->root->root_item);
589
590 BUG_ON(err);
591 if (refs == 1) {
592 list_add(&dirty->list, list);
593 } else {
594 WARN_ON(1);
595 free_extent_buffer(dirty->root->node);
596 kfree(dirty->root);
597 kfree(dirty);
598 }
599 }
600 }
601 return err;
602}
603
604/*
605 * defrag a given btree. If cacheonly == 1, this won't read from the disk,
606 * otherwise every leaf in the btree is read and defragged.
607 */
608int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
609{
610 struct btrfs_fs_info *info = root->fs_info;
611 int ret;
612 struct btrfs_trans_handle *trans;
613 unsigned long nr;
614
615 smp_mb();
616 if (root->defrag_running)
617 return 0;
618 trans = btrfs_start_transaction(root, 1);
619 while (1) {
620 root->defrag_running = 1;
621 ret = btrfs_defrag_leaves(trans, root, cacheonly);
622 nr = trans->blocks_used;
623 btrfs_end_transaction(trans, root);
624 btrfs_btree_balance_dirty(info->tree_root, nr);
625 cond_resched();
626
627 trans = btrfs_start_transaction(root, 1);
628 if (root->fs_info->closing || ret != -EAGAIN)
629 break;
630 }
631 root->defrag_running = 0;
632 smp_mb();
633 btrfs_end_transaction(trans, root);
634 return 0;
635}
636
637/*
638 * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
639 * all of them
640 */
641static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
642 struct list_head *list)
643{
644 struct btrfs_dirty_root *dirty;
645 struct btrfs_trans_handle *trans;
646 unsigned long nr;
647 u64 num_bytes;
648 u64 bytes_used;
649 u64 max_useless;
650 int ret = 0;
651 int err;
652
653 while (!list_empty(list)) {
654 struct btrfs_root *root;
655
656 dirty = list_entry(list->prev, struct btrfs_dirty_root, list);
657 list_del_init(&dirty->list);
658
659 num_bytes = btrfs_root_used(&dirty->root->root_item);
660 root = dirty->latest_root;
661 atomic_inc(&root->fs_info->throttles);
662
663 while (1) {
664 trans = btrfs_start_transaction(tree_root, 1);
665 mutex_lock(&root->fs_info->drop_mutex);
666 ret = btrfs_drop_snapshot(trans, dirty->root);
667 if (ret != -EAGAIN)
668 break;
669 mutex_unlock(&root->fs_info->drop_mutex);
670
671 err = btrfs_update_root(trans,
672 tree_root,
673 &dirty->root->root_key,
674 &dirty->root->root_item);
675 if (err)
676 ret = err;
677 nr = trans->blocks_used;
678 ret = btrfs_end_transaction(trans, tree_root);
679 BUG_ON(ret);
680
681 btrfs_btree_balance_dirty(tree_root, nr);
682 cond_resched();
683 }
684 BUG_ON(ret);
685 atomic_dec(&root->fs_info->throttles);
686 wake_up(&root->fs_info->transaction_throttle);
687
688 num_bytes -= btrfs_root_used(&dirty->root->root_item);
689 bytes_used = btrfs_root_used(&root->root_item);
690 if (num_bytes) {
691 btrfs_record_root_in_trans(root);
692 btrfs_set_root_used(&root->root_item,
693 bytes_used - num_bytes);
694 }
695
696 ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key);
697 if (ret) {
698 BUG();
699 break;
700 }
701 mutex_unlock(&root->fs_info->drop_mutex);
702
703 spin_lock(&root->list_lock);
704 list_del_init(&dirty->root->dead_list);
705 if (!list_empty(&root->dead_list)) {
706 struct btrfs_root *oldest;
707 oldest = list_entry(root->dead_list.prev,
708 struct btrfs_root, dead_list);
709 max_useless = oldest->root_key.offset - 1;
710 } else {
711 max_useless = root->root_key.offset - 1;
712 }
713 spin_unlock(&root->list_lock);
714
715 nr = trans->blocks_used;
716 ret = btrfs_end_transaction(trans, tree_root);
717 BUG_ON(ret);
718
719 ret = btrfs_remove_leaf_refs(root, max_useless, 0);
720 BUG_ON(ret);
721
722 free_extent_buffer(dirty->root->node);
723 kfree(dirty->root);
724 kfree(dirty);
725
726 btrfs_btree_balance_dirty(tree_root, nr);
727 cond_resched();
728 }
729 return ret;
730}
731
732/*
733 * new snapshots need to be created at a very specific time in the
734 * transaction commit. This does the actual creation
735 */
736static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
737 struct btrfs_fs_info *fs_info,
738 struct btrfs_pending_snapshot *pending)
739{
740 struct btrfs_key key;
741 struct btrfs_root_item *new_root_item;
742 struct btrfs_root *tree_root = fs_info->tree_root;
743 struct btrfs_root *root = pending->root;
744 struct extent_buffer *tmp;
745 struct extent_buffer *old;
746 int ret;
747 u64 objectid;
748
749 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
750 if (!new_root_item) {
751 ret = -ENOMEM;
752 goto fail;
753 }
754 ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
755 if (ret)
756 goto fail;
757
758 btrfs_record_root_in_trans(root);
759 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
760 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
761
762 key.objectid = objectid;
763 key.offset = trans->transid;
764 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
765
766 old = btrfs_lock_root_node(root);
767 btrfs_cow_block(trans, root, old, NULL, 0, &old, 0);
768
769 btrfs_copy_root(trans, root, old, &tmp, objectid);
770 btrfs_tree_unlock(old);
771 free_extent_buffer(old);
772
773 btrfs_set_root_bytenr(new_root_item, tmp->start);
774 btrfs_set_root_level(new_root_item, btrfs_header_level(tmp));
775 btrfs_set_root_generation(new_root_item, trans->transid);
776 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
777 new_root_item);
778 btrfs_tree_unlock(tmp);
779 free_extent_buffer(tmp);
780 if (ret)
781 goto fail;
782
783 key.offset = (u64)-1;
784 memcpy(&pending->root_key, &key, sizeof(key));
785fail:
786 kfree(new_root_item);
787 return ret;
788}
789
790static noinline int finish_pending_snapshot(struct btrfs_fs_info *fs_info,
791 struct btrfs_pending_snapshot *pending)
792{
793 int ret;
794 int namelen;
795 u64 index = 0;
796 struct btrfs_trans_handle *trans;
797 struct inode *parent_inode;
798 struct inode *inode;
799 struct btrfs_root *parent_root;
800
801 parent_inode = pending->dentry->d_parent->d_inode;
802 parent_root = BTRFS_I(parent_inode)->root;
803 trans = btrfs_join_transaction(parent_root, 1);
804
805 /*
806 * insert the directory item
807 */
808 namelen = strlen(pending->name);
809 ret = btrfs_set_inode_index(parent_inode, &index);
810 ret = btrfs_insert_dir_item(trans, parent_root,
811 pending->name, namelen,
812 parent_inode->i_ino,
813 &pending->root_key, BTRFS_FT_DIR, index);
814
815 if (ret)
816 goto fail;
817
818 btrfs_i_size_write(parent_inode, parent_inode->i_size + namelen * 2);
819 ret = btrfs_update_inode(trans, parent_root, parent_inode);
820 BUG_ON(ret);
821
822 /* add the backref first */
823 ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root,
824 pending->root_key.objectid,
825 BTRFS_ROOT_BACKREF_KEY,
826 parent_root->root_key.objectid,
827 parent_inode->i_ino, index, pending->name,
828 namelen);
829
830 BUG_ON(ret);
831
832 /* now add the forward ref */
833 ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root,
834 parent_root->root_key.objectid,
835 BTRFS_ROOT_REF_KEY,
836 pending->root_key.objectid,
837 parent_inode->i_ino, index, pending->name,
838 namelen);
839
840 inode = btrfs_lookup_dentry(parent_inode, pending->dentry);
841 d_instantiate(pending->dentry, inode);
842fail:
843 btrfs_end_transaction(trans, fs_info->fs_root);
844 return ret;
845}
846
847/*
848 * create all the snapshots we've scheduled for creation
849 */
850static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
851 struct btrfs_fs_info *fs_info)
852{
853 struct btrfs_pending_snapshot *pending;
854 struct list_head *head = &trans->transaction->pending_snapshots;
855 struct list_head *cur;
856 int ret;
857
858 list_for_each(cur, head) {
859 pending = list_entry(cur, struct btrfs_pending_snapshot, list);
860 ret = create_pending_snapshot(trans, fs_info, pending);
861 BUG_ON(ret);
862 }
863 return 0;
864}
865
866static noinline int finish_pending_snapshots(struct btrfs_trans_handle *trans,
867 struct btrfs_fs_info *fs_info)
868{
869 struct btrfs_pending_snapshot *pending;
870 struct list_head *head = &trans->transaction->pending_snapshots;
871 int ret;
872
873 while (!list_empty(head)) {
874 pending = list_entry(head->next,
875 struct btrfs_pending_snapshot, list);
876 ret = finish_pending_snapshot(fs_info, pending);
877 BUG_ON(ret);
878 list_del(&pending->list);
879 kfree(pending->name);
880 kfree(pending);
881 }
882 return 0;
883}
884
885int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
886 struct btrfs_root *root)
887{
888 unsigned long joined = 0;
889 unsigned long timeout = 1;
890 struct btrfs_transaction *cur_trans;
891 struct btrfs_transaction *prev_trans = NULL;
892 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
893 struct list_head dirty_fs_roots;
894 struct extent_io_tree *pinned_copy;
895 DEFINE_WAIT(wait);
896 int ret;
897
898 INIT_LIST_HEAD(&dirty_fs_roots);
899 mutex_lock(&root->fs_info->trans_mutex);
900 if (trans->transaction->in_commit) {
901 cur_trans = trans->transaction;
902 trans->transaction->use_count++;
903 mutex_unlock(&root->fs_info->trans_mutex);
904 btrfs_end_transaction(trans, root);
905
906 ret = wait_for_commit(root, cur_trans);
907 BUG_ON(ret);
908
909 mutex_lock(&root->fs_info->trans_mutex);
910 put_transaction(cur_trans);
911 mutex_unlock(&root->fs_info->trans_mutex);
912
913 return 0;
914 }
915
916 pinned_copy = kmalloc(sizeof(*pinned_copy), GFP_NOFS);
917 if (!pinned_copy)
918 return -ENOMEM;
919
920 extent_io_tree_init(pinned_copy,
921 root->fs_info->btree_inode->i_mapping, GFP_NOFS);
922
923 trans->transaction->in_commit = 1;
924 trans->transaction->blocked = 1;
925 cur_trans = trans->transaction;
926 if (cur_trans->list.prev != &root->fs_info->trans_list) {
927 prev_trans = list_entry(cur_trans->list.prev,
928 struct btrfs_transaction, list);
929 if (!prev_trans->commit_done) {
930 prev_trans->use_count++;
931 mutex_unlock(&root->fs_info->trans_mutex);
932
933 wait_for_commit(root, prev_trans);
934
935 mutex_lock(&root->fs_info->trans_mutex);
936 put_transaction(prev_trans);
937 }
938 }
939
940 do {
941 int snap_pending = 0;
942 joined = cur_trans->num_joined;
943 if (!list_empty(&trans->transaction->pending_snapshots))
944 snap_pending = 1;
945
946 WARN_ON(cur_trans != trans->transaction);
947 prepare_to_wait(&cur_trans->writer_wait, &wait,
948 TASK_UNINTERRUPTIBLE);
949
950 if (cur_trans->num_writers > 1)
951 timeout = MAX_SCHEDULE_TIMEOUT;
952 else
953 timeout = 1;
954
955 mutex_unlock(&root->fs_info->trans_mutex);
956
957 if (snap_pending) {
958 ret = btrfs_wait_ordered_extents(root, 1);
959 BUG_ON(ret);
960 }
961
962 schedule_timeout(timeout);
963
964 mutex_lock(&root->fs_info->trans_mutex);
965 finish_wait(&cur_trans->writer_wait, &wait);
966 } while (cur_trans->num_writers > 1 ||
967 (cur_trans->num_joined != joined));
968
969 ret = create_pending_snapshots(trans, root->fs_info);
970 BUG_ON(ret);
971
972 WARN_ON(cur_trans != trans->transaction);
973
974 /* btrfs_commit_tree_roots is responsible for getting the
975 * various roots consistent with each other. Every pointer
976 * in the tree of tree roots has to point to the most up to date
977 * root for every subvolume and other tree. So, we have to keep
978 * the tree logging code from jumping in and changing any
979 * of the trees.
980 *
981 * At this point in the commit, there can't be any tree-log
982 * writers, but a little lower down we drop the trans mutex
983 * and let new people in. By holding the tree_log_mutex
984 * from now until after the super is written, we avoid races
985 * with the tree-log code.
986 */
987 mutex_lock(&root->fs_info->tree_log_mutex);
988 /*
989 * keep tree reloc code from adding new reloc trees
990 */
991 mutex_lock(&root->fs_info->tree_reloc_mutex);
992
993
994 ret = add_dirty_roots(trans, &root->fs_info->fs_roots_radix,
995 &dirty_fs_roots);
996 BUG_ON(ret);
997
998 /* add_dirty_roots gets rid of all the tree log roots, it is now
999 * safe to free the root of tree log roots
1000 */
1001 btrfs_free_log_root_tree(trans, root->fs_info);
1002
1003 ret = btrfs_commit_tree_roots(trans, root);
1004 BUG_ON(ret);
1005
1006 cur_trans = root->fs_info->running_transaction;
1007 spin_lock(&root->fs_info->new_trans_lock);
1008 root->fs_info->running_transaction = NULL;
1009 spin_unlock(&root->fs_info->new_trans_lock);
1010 btrfs_set_super_generation(&root->fs_info->super_copy,
1011 cur_trans->transid);
1012 btrfs_set_super_root(&root->fs_info->super_copy,
1013 root->fs_info->tree_root->node->start);
1014 btrfs_set_super_root_level(&root->fs_info->super_copy,
1015 btrfs_header_level(root->fs_info->tree_root->node));
1016
1017 btrfs_set_super_chunk_root(&root->fs_info->super_copy,
1018 chunk_root->node->start);
1019 btrfs_set_super_chunk_root_level(&root->fs_info->super_copy,
1020 btrfs_header_level(chunk_root->node));
1021 btrfs_set_super_chunk_root_generation(&root->fs_info->super_copy,
1022 btrfs_header_generation(chunk_root->node));
1023
1024 if (!root->fs_info->log_root_recovering) {
1025 btrfs_set_super_log_root(&root->fs_info->super_copy, 0);
1026 btrfs_set_super_log_root_level(&root->fs_info->super_copy, 0);
1027 }
1028
1029 memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
1030 sizeof(root->fs_info->super_copy));
1031
1032 btrfs_copy_pinned(root, pinned_copy);
1033
1034 trans->transaction->blocked = 0;
1035 wake_up(&root->fs_info->transaction_throttle);
1036 wake_up(&root->fs_info->transaction_wait);
1037
1038 mutex_unlock(&root->fs_info->trans_mutex);
1039 ret = btrfs_write_and_wait_transaction(trans, root);
1040 BUG_ON(ret);
1041 write_ctree_super(trans, root, 0);
1042
1043 /*
1044 * the super is written, we can safely allow the tree-loggers
1045 * to go about their business
1046 */
1047 mutex_unlock(&root->fs_info->tree_log_mutex);
1048
1049 btrfs_finish_extent_commit(trans, root, pinned_copy);
1050 kfree(pinned_copy);
1051
1052 btrfs_drop_dead_reloc_roots(root);
1053 mutex_unlock(&root->fs_info->tree_reloc_mutex);
1054
1055 /* do the directory inserts of any pending snapshot creations */
1056 finish_pending_snapshots(trans, root->fs_info);
1057
1058 mutex_lock(&root->fs_info->trans_mutex);
1059
1060 cur_trans->commit_done = 1;
1061 root->fs_info->last_trans_committed = cur_trans->transid;
1062 wake_up(&cur_trans->commit_wait);
1063
1064 put_transaction(cur_trans);
1065 put_transaction(cur_trans);
1066
1067 list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots);
1068 if (root->fs_info->closing)
1069 list_splice_init(&root->fs_info->dead_roots, &dirty_fs_roots);
1070
1071 mutex_unlock(&root->fs_info->trans_mutex);
1072
1073 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1074
1075 if (root->fs_info->closing)
1076 drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots);
1077 return ret;
1078}
1079
1080/*
1081 * interface function to delete all the snapshots we have scheduled for deletion
1082 */
1083int btrfs_clean_old_snapshots(struct btrfs_root *root)
1084{
1085 struct list_head dirty_roots;
1086 INIT_LIST_HEAD(&dirty_roots);
1087again:
1088 mutex_lock(&root->fs_info->trans_mutex);
1089 list_splice_init(&root->fs_info->dead_roots, &dirty_roots);
1090 mutex_unlock(&root->fs_info->trans_mutex);
1091
1092 if (!list_empty(&dirty_roots)) {
1093 drop_dirty_roots(root, &dirty_roots);
1094 goto again;
1095 }
1096 return 0;
1097}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
new file mode 100644
index 000000000000..ea292117f882
--- /dev/null
+++ b/fs/btrfs/transaction.h
@@ -0,0 +1,106 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#ifndef __BTRFS_TRANSACTION__
20#define __BTRFS_TRANSACTION__
21#include "btrfs_inode.h"
22
23struct btrfs_transaction {
24 u64 transid;
25 unsigned long num_writers;
26 unsigned long num_joined;
27 int in_commit;
28 int use_count;
29 int commit_done;
30 int blocked;
31 struct list_head list;
32 struct extent_io_tree dirty_pages;
33 unsigned long start_time;
34 wait_queue_head_t writer_wait;
35 wait_queue_head_t commit_wait;
36 struct list_head pending_snapshots;
37};
38
39struct btrfs_trans_handle {
40 u64 transid;
41 unsigned long blocks_reserved;
42 unsigned long blocks_used;
43 struct btrfs_transaction *transaction;
44 u64 block_group;
45 u64 alloc_exclude_start;
46 u64 alloc_exclude_nr;
47};
48
49struct btrfs_pending_snapshot {
50 struct dentry *dentry;
51 struct btrfs_root *root;
52 char *name;
53 struct btrfs_key root_key;
54 struct list_head list;
55};
56
57struct btrfs_dirty_root {
58 struct list_head list;
59 struct btrfs_root *root;
60 struct btrfs_root *latest_root;
61};
62
63static inline void btrfs_set_trans_block_group(struct btrfs_trans_handle *trans,
64 struct inode *inode)
65{
66 trans->block_group = BTRFS_I(inode)->block_group;
67}
68
69static inline void btrfs_update_inode_block_group(
70 struct btrfs_trans_handle *trans,
71 struct inode *inode)
72{
73 BTRFS_I(inode)->block_group = trans->block_group;
74}
75
76static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
77 struct inode *inode)
78{
79 BTRFS_I(inode)->last_trans = trans->transaction->transid;
80}
81
82int btrfs_end_transaction(struct btrfs_trans_handle *trans,
83 struct btrfs_root *root);
84struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
85 int num_blocks);
86struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
87 int num_blocks);
88struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
89 int num_blocks);
90int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
91 struct btrfs_root *root);
92int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
93 struct btrfs_root *root);
94
95int btrfs_add_dead_root(struct btrfs_root *root, struct btrfs_root *latest);
96int btrfs_defrag_root(struct btrfs_root *root, int cacheonly);
97int btrfs_clean_old_snapshots(struct btrfs_root *root);
98int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
99 struct btrfs_root *root);
100int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
101 struct btrfs_root *root);
102void btrfs_throttle(struct btrfs_root *root);
103int btrfs_record_root_in_trans(struct btrfs_root *root);
104int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
105 struct extent_io_tree *dirty_pages);
106#endif
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
new file mode 100644
index 000000000000..3e8358c36165
--- /dev/null
+++ b/fs/btrfs/tree-defrag.c
@@ -0,0 +1,147 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include "ctree.h"
21#include "disk-io.h"
22#include "print-tree.h"
23#include "transaction.h"
24#include "locking.h"
25
26/* defrag all the leaves in a given btree. If cache_only == 1, don't read
27 * things from disk, otherwise read all the leaves and try to get key order to
28 * better reflect disk order
29 */
30
31int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
32 struct btrfs_root *root, int cache_only)
33{
34 struct btrfs_path *path = NULL;
35 struct btrfs_key key;
36 int ret = 0;
37 int wret;
38 int level;
39 int orig_level;
40 int is_extent = 0;
41 int next_key_ret = 0;
42 u64 last_ret = 0;
43 u64 min_trans = 0;
44
45 if (cache_only)
46 goto out;
47
48 if (root->fs_info->extent_root == root) {
49 /*
50 * there's recursion here right now in the tree locking,
51 * we can't defrag the extent root without deadlock
52 */
53 goto out;
54 }
55
56 if (root->ref_cows == 0 && !is_extent)
57 goto out;
58
59 if (btrfs_test_opt(root, SSD))
60 goto out;
61
62 path = btrfs_alloc_path();
63 if (!path)
64 return -ENOMEM;
65
66 level = btrfs_header_level(root->node);
67 orig_level = level;
68
69 if (level == 0)
70 goto out;
71
72 if (root->defrag_progress.objectid == 0) {
73 struct extent_buffer *root_node;
74 u32 nritems;
75
76 root_node = btrfs_lock_root_node(root);
77 nritems = btrfs_header_nritems(root_node);
78 root->defrag_max.objectid = 0;
79 /* from above we know this is not a leaf */
80 btrfs_node_key_to_cpu(root_node, &root->defrag_max,
81 nritems - 1);
82 btrfs_tree_unlock(root_node);
83 free_extent_buffer(root_node);
84 memset(&key, 0, sizeof(key));
85 } else {
86 memcpy(&key, &root->defrag_progress, sizeof(key));
87 }
88
89 path->keep_locks = 1;
90 if (cache_only)
91 min_trans = root->defrag_trans_start;
92
93 ret = btrfs_search_forward(root, &key, NULL, path,
94 cache_only, min_trans);
95 if (ret < 0)
96 goto out;
97 if (ret > 0) {
98 ret = 0;
99 goto out;
100 }
101 btrfs_release_path(root, path);
102 wret = btrfs_search_slot(trans, root, &key, path, 0, 1);
103
104 if (wret < 0) {
105 ret = wret;
106 goto out;
107 }
108 if (!path->nodes[1]) {
109 ret = 0;
110 goto out;
111 }
112 path->slots[1] = btrfs_header_nritems(path->nodes[1]);
113 next_key_ret = btrfs_find_next_key(root, path, &key, 1, cache_only,
114 min_trans);
115 ret = btrfs_realloc_node(trans, root,
116 path->nodes[1], 0,
117 cache_only, &last_ret,
118 &root->defrag_progress);
119 WARN_ON(ret && ret != -EAGAIN);
120 if (next_key_ret == 0) {
121 memcpy(&root->defrag_progress, &key, sizeof(key));
122 ret = -EAGAIN;
123 }
124
125 btrfs_release_path(root, path);
126 if (is_extent)
127 btrfs_extent_post_op(trans, root);
128out:
129 if (path)
130 btrfs_free_path(path);
131 if (ret == -EAGAIN) {
132 if (root->defrag_max.objectid > root->defrag_progress.objectid)
133 goto done;
134 if (root->defrag_max.type > root->defrag_progress.type)
135 goto done;
136 if (root->defrag_max.offset > root->defrag_progress.offset)
137 goto done;
138 ret = 0;
139 }
140done:
141 if (ret != -EAGAIN) {
142 memset(&root->defrag_progress, 0,
143 sizeof(root->defrag_progress));
144 root->defrag_trans_start = trans->transid;
145 }
146 return ret;
147}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
new file mode 100644
index 000000000000..d81cda2e077c
--- /dev/null
+++ b/fs/btrfs/tree-log.c
@@ -0,0 +1,2898 @@
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include "ctree.h"
21#include "transaction.h"
22#include "disk-io.h"
23#include "locking.h"
24#include "print-tree.h"
25#include "compat.h"
26#include "tree-log.h"
27
28/* magic values for the inode_only field in btrfs_log_inode:
29 *
30 * LOG_INODE_ALL means to log everything
31 * LOG_INODE_EXISTS means to log just enough to recreate the inode
32 * during log replay
33 */
34#define LOG_INODE_ALL 0
35#define LOG_INODE_EXISTS 1
36
37/*
38 * stages for the tree walking. The first
39 * stage (0) is to only pin down the blocks we find
40 * the second stage (1) is to make sure that all the inodes
41 * we find in the log are created in the subvolume.
42 *
43 * The last stage is to deal with directories and links and extents
44 * and all the other fun semantics
45 */
46#define LOG_WALK_PIN_ONLY 0
47#define LOG_WALK_REPLAY_INODES 1
48#define LOG_WALK_REPLAY_ALL 2
49
50static int __btrfs_log_inode(struct btrfs_trans_handle *trans,
51 struct btrfs_root *root, struct inode *inode,
52 int inode_only);
53static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
54 struct btrfs_root *root,
55 struct btrfs_path *path, u64 objectid);
56
57/*
58 * tree logging is a special write ahead log used to make sure that
59 * fsyncs and O_SYNCs can happen without doing full tree commits.
60 *
61 * Full tree commits are expensive because they require commonly
62 * modified blocks to be recowed, creating many dirty pages in the
63 * extent tree an 4x-6x higher write load than ext3.
64 *
65 * Instead of doing a tree commit on every fsync, we use the
66 * key ranges and transaction ids to find items for a given file or directory
67 * that have changed in this transaction. Those items are copied into
68 * a special tree (one per subvolume root), that tree is written to disk
69 * and then the fsync is considered complete.
70 *
71 * After a crash, items are copied out of the log-tree back into the
72 * subvolume tree. Any file data extents found are recorded in the extent
73 * allocation tree, and the log-tree freed.
74 *
75 * The log tree is read three times, once to pin down all the extents it is
76 * using in ram and once, once to create all the inodes logged in the tree
77 * and once to do all the other items.
78 */
79
80/*
81 * btrfs_add_log_tree adds a new per-subvolume log tree into the
82 * tree of log tree roots. This must be called with a tree log transaction
83 * running (see start_log_trans).
84 */
85static int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
86 struct btrfs_root *root)
87{
88 struct btrfs_key key;
89 struct btrfs_root_item root_item;
90 struct btrfs_inode_item *inode_item;
91 struct extent_buffer *leaf;
92 struct btrfs_root *new_root = root;
93 int ret;
94 u64 objectid = root->root_key.objectid;
95
96 leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
97 BTRFS_TREE_LOG_OBJECTID,
98 trans->transid, 0, 0, 0);
99 if (IS_ERR(leaf)) {
100 ret = PTR_ERR(leaf);
101 return ret;
102 }
103
104 btrfs_set_header_nritems(leaf, 0);
105 btrfs_set_header_level(leaf, 0);
106 btrfs_set_header_bytenr(leaf, leaf->start);
107 btrfs_set_header_generation(leaf, trans->transid);
108 btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
109
110 write_extent_buffer(leaf, root->fs_info->fsid,
111 (unsigned long)btrfs_header_fsid(leaf),
112 BTRFS_FSID_SIZE);
113 btrfs_mark_buffer_dirty(leaf);
114
115 inode_item = &root_item.inode;
116 memset(inode_item, 0, sizeof(*inode_item));
117 inode_item->generation = cpu_to_le64(1);
118 inode_item->size = cpu_to_le64(3);
119 inode_item->nlink = cpu_to_le32(1);
120 inode_item->nbytes = cpu_to_le64(root->leafsize);
121 inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
122
123 btrfs_set_root_bytenr(&root_item, leaf->start);
124 btrfs_set_root_generation(&root_item, trans->transid);
125 btrfs_set_root_level(&root_item, 0);
126 btrfs_set_root_refs(&root_item, 0);
127 btrfs_set_root_used(&root_item, 0);
128
129 memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress));
130 root_item.drop_level = 0;
131
132 btrfs_tree_unlock(leaf);
133 free_extent_buffer(leaf);
134 leaf = NULL;
135
136 btrfs_set_root_dirid(&root_item, 0);
137
138 key.objectid = BTRFS_TREE_LOG_OBJECTID;
139 key.offset = objectid;
140 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
141 ret = btrfs_insert_root(trans, root->fs_info->log_root_tree, &key,
142 &root_item);
143 if (ret)
144 goto fail;
145
146 new_root = btrfs_read_fs_root_no_radix(root->fs_info->log_root_tree,
147 &key);
148 BUG_ON(!new_root);
149
150 WARN_ON(root->log_root);
151 root->log_root = new_root;
152
153 /*
154 * log trees do not get reference counted because they go away
155 * before a real commit is actually done. They do store pointers
156 * to file data extents, and those reference counts still get
157 * updated (along with back refs to the log tree).
158 */
159 new_root->ref_cows = 0;
160 new_root->last_trans = trans->transid;
161
162 /*
163 * we need to make sure the root block for this new tree
164 * is marked as dirty in the dirty_log_pages tree. This
165 * is how it gets flushed down to disk at tree log commit time.
166 *
167 * the tree logging mutex keeps others from coming in and changing
168 * the new_root->node, so we can safely access it here
169 */
170 set_extent_dirty(&new_root->dirty_log_pages, new_root->node->start,
171 new_root->node->start + new_root->node->len - 1,
172 GFP_NOFS);
173
174fail:
175 return ret;
176}
177
178/*
179 * start a sub transaction and setup the log tree
180 * this increments the log tree writer count to make the people
181 * syncing the tree wait for us to finish
182 */
183static int start_log_trans(struct btrfs_trans_handle *trans,
184 struct btrfs_root *root)
185{
186 int ret;
187 mutex_lock(&root->fs_info->tree_log_mutex);
188 if (!root->fs_info->log_root_tree) {
189 ret = btrfs_init_log_root_tree(trans, root->fs_info);
190 BUG_ON(ret);
191 }
192 if (!root->log_root) {
193 ret = btrfs_add_log_tree(trans, root);
194 BUG_ON(ret);
195 }
196 atomic_inc(&root->fs_info->tree_log_writers);
197 root->fs_info->tree_log_batch++;
198 mutex_unlock(&root->fs_info->tree_log_mutex);
199 return 0;
200}
201
202/*
203 * returns 0 if there was a log transaction running and we were able
204 * to join, or returns -ENOENT if there were not transactions
205 * in progress
206 */
207static int join_running_log_trans(struct btrfs_root *root)
208{
209 int ret = -ENOENT;
210
211 smp_mb();
212 if (!root->log_root)
213 return -ENOENT;
214
215 mutex_lock(&root->fs_info->tree_log_mutex);
216 if (root->log_root) {
217 ret = 0;
218 atomic_inc(&root->fs_info->tree_log_writers);
219 root->fs_info->tree_log_batch++;
220 }
221 mutex_unlock(&root->fs_info->tree_log_mutex);
222 return ret;
223}
224
225/*
226 * indicate we're done making changes to the log tree
227 * and wake up anyone waiting to do a sync
228 */
229static int end_log_trans(struct btrfs_root *root)
230{
231 atomic_dec(&root->fs_info->tree_log_writers);
232 smp_mb();
233 if (waitqueue_active(&root->fs_info->tree_log_wait))
234 wake_up(&root->fs_info->tree_log_wait);
235 return 0;
236}
237
238
239/*
240 * the walk control struct is used to pass state down the chain when
241 * processing the log tree. The stage field tells us which part
242 * of the log tree processing we are currently doing. The others
243 * are state fields used for that specific part
244 */
245struct walk_control {
246 /* should we free the extent on disk when done? This is used
247 * at transaction commit time while freeing a log tree
248 */
249 int free;
250
251 /* should we write out the extent buffer? This is used
252 * while flushing the log tree to disk during a sync
253 */
254 int write;
255
256 /* should we wait for the extent buffer io to finish? Also used
257 * while flushing the log tree to disk for a sync
258 */
259 int wait;
260
261 /* pin only walk, we record which extents on disk belong to the
262 * log trees
263 */
264 int pin;
265
266 /* what stage of the replay code we're currently in */
267 int stage;
268
269 /* the root we are currently replaying */
270 struct btrfs_root *replay_dest;
271
272 /* the trans handle for the current replay */
273 struct btrfs_trans_handle *trans;
274
275 /* the function that gets used to process blocks we find in the
276 * tree. Note the extent_buffer might not be up to date when it is
277 * passed in, and it must be checked or read if you need the data
278 * inside it
279 */
280 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
281 struct walk_control *wc, u64 gen);
282};
283
284/*
285 * process_func used to pin down extents, write them or wait on them
286 */
287static int process_one_buffer(struct btrfs_root *log,
288 struct extent_buffer *eb,
289 struct walk_control *wc, u64 gen)
290{
291 if (wc->pin) {
292 mutex_lock(&log->fs_info->pinned_mutex);
293 btrfs_update_pinned_extents(log->fs_info->extent_root,
294 eb->start, eb->len, 1);
295 mutex_unlock(&log->fs_info->pinned_mutex);
296 }
297
298 if (btrfs_buffer_uptodate(eb, gen)) {
299 if (wc->write)
300 btrfs_write_tree_block(eb);
301 if (wc->wait)
302 btrfs_wait_tree_block_writeback(eb);
303 }
304 return 0;
305}
306
307/*
308 * Item overwrite used by replay and tree logging. eb, slot and key all refer
309 * to the src data we are copying out.
310 *
311 * root is the tree we are copying into, and path is a scratch
312 * path for use in this function (it should be released on entry and
313 * will be released on exit).
314 *
315 * If the key is already in the destination tree the existing item is
316 * overwritten. If the existing item isn't big enough, it is extended.
317 * If it is too large, it is truncated.
318 *
319 * If the key isn't in the destination yet, a new item is inserted.
320 */
321static noinline int overwrite_item(struct btrfs_trans_handle *trans,
322 struct btrfs_root *root,
323 struct btrfs_path *path,
324 struct extent_buffer *eb, int slot,
325 struct btrfs_key *key)
326{
327 int ret;
328 u32 item_size;
329 u64 saved_i_size = 0;
330 int save_old_i_size = 0;
331 unsigned long src_ptr;
332 unsigned long dst_ptr;
333 int overwrite_root = 0;
334
335 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
336 overwrite_root = 1;
337
338 item_size = btrfs_item_size_nr(eb, slot);
339 src_ptr = btrfs_item_ptr_offset(eb, slot);
340
341 /* look for the key in the destination tree */
342 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
343 if (ret == 0) {
344 char *src_copy;
345 char *dst_copy;
346 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
347 path->slots[0]);
348 if (dst_size != item_size)
349 goto insert;
350
351 if (item_size == 0) {
352 btrfs_release_path(root, path);
353 return 0;
354 }
355 dst_copy = kmalloc(item_size, GFP_NOFS);
356 src_copy = kmalloc(item_size, GFP_NOFS);
357
358 read_extent_buffer(eb, src_copy, src_ptr, item_size);
359
360 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
361 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
362 item_size);
363 ret = memcmp(dst_copy, src_copy, item_size);
364
365 kfree(dst_copy);
366 kfree(src_copy);
367 /*
368 * they have the same contents, just return, this saves
369 * us from cowing blocks in the destination tree and doing
370 * extra writes that may not have been done by a previous
371 * sync
372 */
373 if (ret == 0) {
374 btrfs_release_path(root, path);
375 return 0;
376 }
377
378 }
379insert:
380 btrfs_release_path(root, path);
381 /* try to insert the key into the destination tree */
382 ret = btrfs_insert_empty_item(trans, root, path,
383 key, item_size);
384
385 /* make sure any existing item is the correct size */
386 if (ret == -EEXIST) {
387 u32 found_size;
388 found_size = btrfs_item_size_nr(path->nodes[0],
389 path->slots[0]);
390 if (found_size > item_size) {
391 btrfs_truncate_item(trans, root, path, item_size, 1);
392 } else if (found_size < item_size) {
393 ret = btrfs_extend_item(trans, root, path,
394 item_size - found_size);
395 BUG_ON(ret);
396 }
397 } else if (ret) {
398 BUG();
399 }
400 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
401 path->slots[0]);
402
403 /* don't overwrite an existing inode if the generation number
404 * was logged as zero. This is done when the tree logging code
405 * is just logging an inode to make sure it exists after recovery.
406 *
407 * Also, don't overwrite i_size on directories during replay.
408 * log replay inserts and removes directory items based on the
409 * state of the tree found in the subvolume, and i_size is modified
410 * as it goes
411 */
412 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
413 struct btrfs_inode_item *src_item;
414 struct btrfs_inode_item *dst_item;
415
416 src_item = (struct btrfs_inode_item *)src_ptr;
417 dst_item = (struct btrfs_inode_item *)dst_ptr;
418
419 if (btrfs_inode_generation(eb, src_item) == 0)
420 goto no_copy;
421
422 if (overwrite_root &&
423 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
424 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
425 save_old_i_size = 1;
426 saved_i_size = btrfs_inode_size(path->nodes[0],
427 dst_item);
428 }
429 }
430
431 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
432 src_ptr, item_size);
433
434 if (save_old_i_size) {
435 struct btrfs_inode_item *dst_item;
436 dst_item = (struct btrfs_inode_item *)dst_ptr;
437 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
438 }
439
440 /* make sure the generation is filled in */
441 if (key->type == BTRFS_INODE_ITEM_KEY) {
442 struct btrfs_inode_item *dst_item;
443 dst_item = (struct btrfs_inode_item *)dst_ptr;
444 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
445 btrfs_set_inode_generation(path->nodes[0], dst_item,
446 trans->transid);
447 }
448 }
449no_copy:
450 btrfs_mark_buffer_dirty(path->nodes[0]);
451 btrfs_release_path(root, path);
452 return 0;
453}
454
455/*
456 * simple helper to read an inode off the disk from a given root
457 * This can only be called for subvolume roots and not for the log
458 */
459static noinline struct inode *read_one_inode(struct btrfs_root *root,
460 u64 objectid)
461{
462 struct inode *inode;
463 inode = btrfs_iget_locked(root->fs_info->sb, objectid, root);
464 if (inode->i_state & I_NEW) {
465 BTRFS_I(inode)->root = root;
466 BTRFS_I(inode)->location.objectid = objectid;
467 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
468 BTRFS_I(inode)->location.offset = 0;
469 btrfs_read_locked_inode(inode);
470 unlock_new_inode(inode);
471
472 }
473 if (is_bad_inode(inode)) {
474 iput(inode);
475 inode = NULL;
476 }
477 return inode;
478}
479
480/* replays a single extent in 'eb' at 'slot' with 'key' into the
481 * subvolume 'root'. path is released on entry and should be released
482 * on exit.
483 *
484 * extents in the log tree have not been allocated out of the extent
485 * tree yet. So, this completes the allocation, taking a reference
486 * as required if the extent already exists or creating a new extent
487 * if it isn't in the extent allocation tree yet.
488 *
489 * The extent is inserted into the file, dropping any existing extents
490 * from the file that overlap the new one.
491 */
492static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
493 struct btrfs_root *root,
494 struct btrfs_path *path,
495 struct extent_buffer *eb, int slot,
496 struct btrfs_key *key)
497{
498 int found_type;
499 u64 mask = root->sectorsize - 1;
500 u64 extent_end;
501 u64 alloc_hint;
502 u64 start = key->offset;
503 u64 saved_nbytes;
504 struct btrfs_file_extent_item *item;
505 struct inode *inode = NULL;
506 unsigned long size;
507 int ret = 0;
508
509 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
510 found_type = btrfs_file_extent_type(eb, item);
511
512 if (found_type == BTRFS_FILE_EXTENT_REG ||
513 found_type == BTRFS_FILE_EXTENT_PREALLOC)
514 extent_end = start + btrfs_file_extent_num_bytes(eb, item);
515 else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
516 size = btrfs_file_extent_inline_len(eb, item);
517 extent_end = (start + size + mask) & ~mask;
518 } else {
519 ret = 0;
520 goto out;
521 }
522
523 inode = read_one_inode(root, key->objectid);
524 if (!inode) {
525 ret = -EIO;
526 goto out;
527 }
528
529 /*
530 * first check to see if we already have this extent in the
531 * file. This must be done before the btrfs_drop_extents run
532 * so we don't try to drop this extent.
533 */
534 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
535 start, 0);
536
537 if (ret == 0 &&
538 (found_type == BTRFS_FILE_EXTENT_REG ||
539 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
540 struct btrfs_file_extent_item cmp1;
541 struct btrfs_file_extent_item cmp2;
542 struct btrfs_file_extent_item *existing;
543 struct extent_buffer *leaf;
544
545 leaf = path->nodes[0];
546 existing = btrfs_item_ptr(leaf, path->slots[0],
547 struct btrfs_file_extent_item);
548
549 read_extent_buffer(eb, &cmp1, (unsigned long)item,
550 sizeof(cmp1));
551 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
552 sizeof(cmp2));
553
554 /*
555 * we already have a pointer to this exact extent,
556 * we don't have to do anything
557 */
558 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
559 btrfs_release_path(root, path);
560 goto out;
561 }
562 }
563 btrfs_release_path(root, path);
564
565 saved_nbytes = inode_get_bytes(inode);
566 /* drop any overlapping extents */
567 ret = btrfs_drop_extents(trans, root, inode,
568 start, extent_end, start, &alloc_hint);
569 BUG_ON(ret);
570
571 if (found_type == BTRFS_FILE_EXTENT_REG ||
572 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
573 unsigned long dest_offset;
574 struct btrfs_key ins;
575
576 ret = btrfs_insert_empty_item(trans, root, path, key,
577 sizeof(*item));
578 BUG_ON(ret);
579 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
580 path->slots[0]);
581 copy_extent_buffer(path->nodes[0], eb, dest_offset,
582 (unsigned long)item, sizeof(*item));
583
584 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
585 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
586 ins.type = BTRFS_EXTENT_ITEM_KEY;
587
588 if (ins.objectid > 0) {
589 u64 csum_start;
590 u64 csum_end;
591 LIST_HEAD(ordered_sums);
592 /*
593 * is this extent already allocated in the extent
594 * allocation tree? If so, just add a reference
595 */
596 ret = btrfs_lookup_extent(root, ins.objectid,
597 ins.offset);
598 if (ret == 0) {
599 ret = btrfs_inc_extent_ref(trans, root,
600 ins.objectid, ins.offset,
601 path->nodes[0]->start,
602 root->root_key.objectid,
603 trans->transid, key->objectid);
604 } else {
605 /*
606 * insert the extent pointer in the extent
607 * allocation tree
608 */
609 ret = btrfs_alloc_logged_extent(trans, root,
610 path->nodes[0]->start,
611 root->root_key.objectid,
612 trans->transid, key->objectid,
613 &ins);
614 BUG_ON(ret);
615 }
616 btrfs_release_path(root, path);
617
618 if (btrfs_file_extent_compression(eb, item)) {
619 csum_start = ins.objectid;
620 csum_end = csum_start + ins.offset;
621 } else {
622 csum_start = ins.objectid +
623 btrfs_file_extent_offset(eb, item);
624 csum_end = csum_start +
625 btrfs_file_extent_num_bytes(eb, item);
626 }
627
628 ret = btrfs_lookup_csums_range(root->log_root,
629 csum_start, csum_end - 1,
630 &ordered_sums);
631 BUG_ON(ret);
632 while (!list_empty(&ordered_sums)) {
633 struct btrfs_ordered_sum *sums;
634 sums = list_entry(ordered_sums.next,
635 struct btrfs_ordered_sum,
636 list);
637 ret = btrfs_csum_file_blocks(trans,
638 root->fs_info->csum_root,
639 sums);
640 BUG_ON(ret);
641 list_del(&sums->list);
642 kfree(sums);
643 }
644 } else {
645 btrfs_release_path(root, path);
646 }
647 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
648 /* inline extents are easy, we just overwrite them */
649 ret = overwrite_item(trans, root, path, eb, slot, key);
650 BUG_ON(ret);
651 }
652
653 inode_set_bytes(inode, saved_nbytes);
654 btrfs_update_inode(trans, root, inode);
655out:
656 if (inode)
657 iput(inode);
658 return ret;
659}
660
661/*
662 * when cleaning up conflicts between the directory names in the
663 * subvolume, directory names in the log and directory names in the
664 * inode back references, we may have to unlink inodes from directories.
665 *
666 * This is a helper function to do the unlink of a specific directory
667 * item
668 */
669static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
670 struct btrfs_root *root,
671 struct btrfs_path *path,
672 struct inode *dir,
673 struct btrfs_dir_item *di)
674{
675 struct inode *inode;
676 char *name;
677 int name_len;
678 struct extent_buffer *leaf;
679 struct btrfs_key location;
680 int ret;
681
682 leaf = path->nodes[0];
683
684 btrfs_dir_item_key_to_cpu(leaf, di, &location);
685 name_len = btrfs_dir_name_len(leaf, di);
686 name = kmalloc(name_len, GFP_NOFS);
687 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
688 btrfs_release_path(root, path);
689
690 inode = read_one_inode(root, location.objectid);
691 BUG_ON(!inode);
692
693 ret = link_to_fixup_dir(trans, root, path, location.objectid);
694 BUG_ON(ret);
695 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
696 BUG_ON(ret);
697 kfree(name);
698
699 iput(inode);
700 return ret;
701}
702
703/*
704 * helper function to see if a given name and sequence number found
705 * in an inode back reference are already in a directory and correctly
706 * point to this inode
707 */
708static noinline int inode_in_dir(struct btrfs_root *root,
709 struct btrfs_path *path,
710 u64 dirid, u64 objectid, u64 index,
711 const char *name, int name_len)
712{
713 struct btrfs_dir_item *di;
714 struct btrfs_key location;
715 int match = 0;
716
717 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
718 index, name, name_len, 0);
719 if (di && !IS_ERR(di)) {
720 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
721 if (location.objectid != objectid)
722 goto out;
723 } else
724 goto out;
725 btrfs_release_path(root, path);
726
727 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
728 if (di && !IS_ERR(di)) {
729 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
730 if (location.objectid != objectid)
731 goto out;
732 } else
733 goto out;
734 match = 1;
735out:
736 btrfs_release_path(root, path);
737 return match;
738}
739
740/*
741 * helper function to check a log tree for a named back reference in
742 * an inode. This is used to decide if a back reference that is
743 * found in the subvolume conflicts with what we find in the log.
744 *
745 * inode backreferences may have multiple refs in a single item,
746 * during replay we process one reference at a time, and we don't
747 * want to delete valid links to a file from the subvolume if that
748 * link is also in the log.
749 */
750static noinline int backref_in_log(struct btrfs_root *log,
751 struct btrfs_key *key,
752 char *name, int namelen)
753{
754 struct btrfs_path *path;
755 struct btrfs_inode_ref *ref;
756 unsigned long ptr;
757 unsigned long ptr_end;
758 unsigned long name_ptr;
759 int found_name_len;
760 int item_size;
761 int ret;
762 int match = 0;
763
764 path = btrfs_alloc_path();
765 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
766 if (ret != 0)
767 goto out;
768
769 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
770 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
771 ptr_end = ptr + item_size;
772 while (ptr < ptr_end) {
773 ref = (struct btrfs_inode_ref *)ptr;
774 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
775 if (found_name_len == namelen) {
776 name_ptr = (unsigned long)(ref + 1);
777 ret = memcmp_extent_buffer(path->nodes[0], name,
778 name_ptr, namelen);
779 if (ret == 0) {
780 match = 1;
781 goto out;
782 }
783 }
784 ptr = (unsigned long)(ref + 1) + found_name_len;
785 }
786out:
787 btrfs_free_path(path);
788 return match;
789}
790
791
792/*
793 * replay one inode back reference item found in the log tree.
794 * eb, slot and key refer to the buffer and key found in the log tree.
795 * root is the destination we are replaying into, and path is for temp
796 * use by this function. (it should be released on return).
797 */
798static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
799 struct btrfs_root *root,
800 struct btrfs_root *log,
801 struct btrfs_path *path,
802 struct extent_buffer *eb, int slot,
803 struct btrfs_key *key)
804{
805 struct inode *dir;
806 int ret;
807 struct btrfs_key location;
808 struct btrfs_inode_ref *ref;
809 struct btrfs_dir_item *di;
810 struct inode *inode;
811 char *name;
812 int namelen;
813 unsigned long ref_ptr;
814 unsigned long ref_end;
815
816 location.objectid = key->objectid;
817 location.type = BTRFS_INODE_ITEM_KEY;
818 location.offset = 0;
819
820 /*
821 * it is possible that we didn't log all the parent directories
822 * for a given inode. If we don't find the dir, just don't
823 * copy the back ref in. The link count fixup code will take
824 * care of the rest
825 */
826 dir = read_one_inode(root, key->offset);
827 if (!dir)
828 return -ENOENT;
829
830 inode = read_one_inode(root, key->objectid);
831 BUG_ON(!dir);
832
833 ref_ptr = btrfs_item_ptr_offset(eb, slot);
834 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
835
836again:
837 ref = (struct btrfs_inode_ref *)ref_ptr;
838
839 namelen = btrfs_inode_ref_name_len(eb, ref);
840 name = kmalloc(namelen, GFP_NOFS);
841 BUG_ON(!name);
842
843 read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen);
844
845 /* if we already have a perfect match, we're done */
846 if (inode_in_dir(root, path, dir->i_ino, inode->i_ino,
847 btrfs_inode_ref_index(eb, ref),
848 name, namelen)) {
849 goto out;
850 }
851
852 /*
853 * look for a conflicting back reference in the metadata.
854 * if we find one we have to unlink that name of the file
855 * before we add our new link. Later on, we overwrite any
856 * existing back reference, and we don't want to create
857 * dangling pointers in the directory.
858 */
859conflict_again:
860 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
861 if (ret == 0) {
862 char *victim_name;
863 int victim_name_len;
864 struct btrfs_inode_ref *victim_ref;
865 unsigned long ptr;
866 unsigned long ptr_end;
867 struct extent_buffer *leaf = path->nodes[0];
868
869 /* are we trying to overwrite a back ref for the root directory
870 * if so, just jump out, we're done
871 */
872 if (key->objectid == key->offset)
873 goto out_nowrite;
874
875 /* check all the names in this back reference to see
876 * if they are in the log. if so, we allow them to stay
877 * otherwise they must be unlinked as a conflict
878 */
879 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
880 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
881 while (ptr < ptr_end) {
882 victim_ref = (struct btrfs_inode_ref *)ptr;
883 victim_name_len = btrfs_inode_ref_name_len(leaf,
884 victim_ref);
885 victim_name = kmalloc(victim_name_len, GFP_NOFS);
886 BUG_ON(!victim_name);
887
888 read_extent_buffer(leaf, victim_name,
889 (unsigned long)(victim_ref + 1),
890 victim_name_len);
891
892 if (!backref_in_log(log, key, victim_name,
893 victim_name_len)) {
894 btrfs_inc_nlink(inode);
895 btrfs_release_path(root, path);
896 ret = btrfs_unlink_inode(trans, root, dir,
897 inode, victim_name,
898 victim_name_len);
899 kfree(victim_name);
900 btrfs_release_path(root, path);
901 goto conflict_again;
902 }
903 kfree(victim_name);
904 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
905 }
906 BUG_ON(ret);
907 }
908 btrfs_release_path(root, path);
909
910 /* look for a conflicting sequence number */
911 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
912 btrfs_inode_ref_index(eb, ref),
913 name, namelen, 0);
914 if (di && !IS_ERR(di)) {
915 ret = drop_one_dir_item(trans, root, path, dir, di);
916 BUG_ON(ret);
917 }
918 btrfs_release_path(root, path);
919
920
921 /* look for a conflicting name */
922 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
923 name, namelen, 0);
924 if (di && !IS_ERR(di)) {
925 ret = drop_one_dir_item(trans, root, path, dir, di);
926 BUG_ON(ret);
927 }
928 btrfs_release_path(root, path);
929
930 /* insert our name */
931 ret = btrfs_add_link(trans, dir, inode, name, namelen, 0,
932 btrfs_inode_ref_index(eb, ref));
933 BUG_ON(ret);
934
935 btrfs_update_inode(trans, root, inode);
936
937out:
938 ref_ptr = (unsigned long)(ref + 1) + namelen;
939 kfree(name);
940 if (ref_ptr < ref_end)
941 goto again;
942
943 /* finally write the back reference in the inode */
944 ret = overwrite_item(trans, root, path, eb, slot, key);
945 BUG_ON(ret);
946
947out_nowrite:
948 btrfs_release_path(root, path);
949 iput(dir);
950 iput(inode);
951 return 0;
952}
953
954/*
955 * There are a few corners where the link count of the file can't
956 * be properly maintained during replay. So, instead of adding
957 * lots of complexity to the log code, we just scan the backrefs
958 * for any file that has been through replay.
959 *
960 * The scan will update the link count on the inode to reflect the
961 * number of back refs found. If it goes down to zero, the iput
962 * will free the inode.
963 */
964static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
965 struct btrfs_root *root,
966 struct inode *inode)
967{
968 struct btrfs_path *path;
969 int ret;
970 struct btrfs_key key;
971 u64 nlink = 0;
972 unsigned long ptr;
973 unsigned long ptr_end;
974 int name_len;
975
976 key.objectid = inode->i_ino;
977 key.type = BTRFS_INODE_REF_KEY;
978 key.offset = (u64)-1;
979
980 path = btrfs_alloc_path();
981
982 while (1) {
983 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
984 if (ret < 0)
985 break;
986 if (ret > 0) {
987 if (path->slots[0] == 0)
988 break;
989 path->slots[0]--;
990 }
991 btrfs_item_key_to_cpu(path->nodes[0], &key,
992 path->slots[0]);
993 if (key.objectid != inode->i_ino ||
994 key.type != BTRFS_INODE_REF_KEY)
995 break;
996 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
997 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
998 path->slots[0]);
999 while (ptr < ptr_end) {
1000 struct btrfs_inode_ref *ref;
1001
1002 ref = (struct btrfs_inode_ref *)ptr;
1003 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1004 ref);
1005 ptr = (unsigned long)(ref + 1) + name_len;
1006 nlink++;
1007 }
1008
1009 if (key.offset == 0)
1010 break;
1011 key.offset--;
1012 btrfs_release_path(root, path);
1013 }
1014 btrfs_free_path(path);
1015 if (nlink != inode->i_nlink) {
1016 inode->i_nlink = nlink;
1017 btrfs_update_inode(trans, root, inode);
1018 }
1019 BTRFS_I(inode)->index_cnt = (u64)-1;
1020
1021 return 0;
1022}
1023
1024static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1025 struct btrfs_root *root,
1026 struct btrfs_path *path)
1027{
1028 int ret;
1029 struct btrfs_key key;
1030 struct inode *inode;
1031
1032 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1033 key.type = BTRFS_ORPHAN_ITEM_KEY;
1034 key.offset = (u64)-1;
1035 while (1) {
1036 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1037 if (ret < 0)
1038 break;
1039
1040 if (ret == 1) {
1041 if (path->slots[0] == 0)
1042 break;
1043 path->slots[0]--;
1044 }
1045
1046 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1047 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1048 key.type != BTRFS_ORPHAN_ITEM_KEY)
1049 break;
1050
1051 ret = btrfs_del_item(trans, root, path);
1052 BUG_ON(ret);
1053
1054 btrfs_release_path(root, path);
1055 inode = read_one_inode(root, key.offset);
1056 BUG_ON(!inode);
1057
1058 ret = fixup_inode_link_count(trans, root, inode);
1059 BUG_ON(ret);
1060
1061 iput(inode);
1062
1063 if (key.offset == 0)
1064 break;
1065 key.offset--;
1066 }
1067 btrfs_release_path(root, path);
1068 return 0;
1069}
1070
1071
1072/*
1073 * record a given inode in the fixup dir so we can check its link
1074 * count when replay is done. The link count is incremented here
1075 * so the inode won't go away until we check it
1076 */
1077static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1078 struct btrfs_root *root,
1079 struct btrfs_path *path,
1080 u64 objectid)
1081{
1082 struct btrfs_key key;
1083 int ret = 0;
1084 struct inode *inode;
1085
1086 inode = read_one_inode(root, objectid);
1087 BUG_ON(!inode);
1088
1089 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1090 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1091 key.offset = objectid;
1092
1093 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1094
1095 btrfs_release_path(root, path);
1096 if (ret == 0) {
1097 btrfs_inc_nlink(inode);
1098 btrfs_update_inode(trans, root, inode);
1099 } else if (ret == -EEXIST) {
1100 ret = 0;
1101 } else {
1102 BUG();
1103 }
1104 iput(inode);
1105
1106 return ret;
1107}
1108
1109/*
1110 * when replaying the log for a directory, we only insert names
1111 * for inodes that actually exist. This means an fsync on a directory
1112 * does not implicitly fsync all the new files in it
1113 */
1114static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1115 struct btrfs_root *root,
1116 struct btrfs_path *path,
1117 u64 dirid, u64 index,
1118 char *name, int name_len, u8 type,
1119 struct btrfs_key *location)
1120{
1121 struct inode *inode;
1122 struct inode *dir;
1123 int ret;
1124
1125 inode = read_one_inode(root, location->objectid);
1126 if (!inode)
1127 return -ENOENT;
1128
1129 dir = read_one_inode(root, dirid);
1130 if (!dir) {
1131 iput(inode);
1132 return -EIO;
1133 }
1134 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
1135
1136 /* FIXME, put inode into FIXUP list */
1137
1138 iput(inode);
1139 iput(dir);
1140 return ret;
1141}
1142
1143/*
1144 * take a single entry in a log directory item and replay it into
1145 * the subvolume.
1146 *
1147 * if a conflicting item exists in the subdirectory already,
1148 * the inode it points to is unlinked and put into the link count
1149 * fix up tree.
1150 *
1151 * If a name from the log points to a file or directory that does
1152 * not exist in the FS, it is skipped. fsyncs on directories
1153 * do not force down inodes inside that directory, just changes to the
1154 * names or unlinks in a directory.
1155 */
1156static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1157 struct btrfs_root *root,
1158 struct btrfs_path *path,
1159 struct extent_buffer *eb,
1160 struct btrfs_dir_item *di,
1161 struct btrfs_key *key)
1162{
1163 char *name;
1164 int name_len;
1165 struct btrfs_dir_item *dst_di;
1166 struct btrfs_key found_key;
1167 struct btrfs_key log_key;
1168 struct inode *dir;
1169 u8 log_type;
1170 int exists;
1171 int ret;
1172
1173 dir = read_one_inode(root, key->objectid);
1174 BUG_ON(!dir);
1175
1176 name_len = btrfs_dir_name_len(eb, di);
1177 name = kmalloc(name_len, GFP_NOFS);
1178 log_type = btrfs_dir_type(eb, di);
1179 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1180 name_len);
1181
1182 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1183 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1184 if (exists == 0)
1185 exists = 1;
1186 else
1187 exists = 0;
1188 btrfs_release_path(root, path);
1189
1190 if (key->type == BTRFS_DIR_ITEM_KEY) {
1191 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1192 name, name_len, 1);
1193 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1194 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1195 key->objectid,
1196 key->offset, name,
1197 name_len, 1);
1198 } else {
1199 BUG();
1200 }
1201 if (!dst_di || IS_ERR(dst_di)) {
1202 /* we need a sequence number to insert, so we only
1203 * do inserts for the BTRFS_DIR_INDEX_KEY types
1204 */
1205 if (key->type != BTRFS_DIR_INDEX_KEY)
1206 goto out;
1207 goto insert;
1208 }
1209
1210 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1211 /* the existing item matches the logged item */
1212 if (found_key.objectid == log_key.objectid &&
1213 found_key.type == log_key.type &&
1214 found_key.offset == log_key.offset &&
1215 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1216 goto out;
1217 }
1218
1219 /*
1220 * don't drop the conflicting directory entry if the inode
1221 * for the new entry doesn't exist
1222 */
1223 if (!exists)
1224 goto out;
1225
1226 ret = drop_one_dir_item(trans, root, path, dir, dst_di);
1227 BUG_ON(ret);
1228
1229 if (key->type == BTRFS_DIR_INDEX_KEY)
1230 goto insert;
1231out:
1232 btrfs_release_path(root, path);
1233 kfree(name);
1234 iput(dir);
1235 return 0;
1236
1237insert:
1238 btrfs_release_path(root, path);
1239 ret = insert_one_name(trans, root, path, key->objectid, key->offset,
1240 name, name_len, log_type, &log_key);
1241
1242 if (ret && ret != -ENOENT)
1243 BUG();
1244 goto out;
1245}
1246
1247/*
1248 * find all the names in a directory item and reconcile them into
1249 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1250 * one name in a directory item, but the same code gets used for
1251 * both directory index types
1252 */
1253static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1254 struct btrfs_root *root,
1255 struct btrfs_path *path,
1256 struct extent_buffer *eb, int slot,
1257 struct btrfs_key *key)
1258{
1259 int ret;
1260 u32 item_size = btrfs_item_size_nr(eb, slot);
1261 struct btrfs_dir_item *di;
1262 int name_len;
1263 unsigned long ptr;
1264 unsigned long ptr_end;
1265
1266 ptr = btrfs_item_ptr_offset(eb, slot);
1267 ptr_end = ptr + item_size;
1268 while (ptr < ptr_end) {
1269 di = (struct btrfs_dir_item *)ptr;
1270 name_len = btrfs_dir_name_len(eb, di);
1271 ret = replay_one_name(trans, root, path, eb, di, key);
1272 BUG_ON(ret);
1273 ptr = (unsigned long)(di + 1);
1274 ptr += name_len;
1275 }
1276 return 0;
1277}
1278
1279/*
1280 * directory replay has two parts. There are the standard directory
1281 * items in the log copied from the subvolume, and range items
1282 * created in the log while the subvolume was logged.
1283 *
1284 * The range items tell us which parts of the key space the log
1285 * is authoritative for. During replay, if a key in the subvolume
1286 * directory is in a logged range item, but not actually in the log
1287 * that means it was deleted from the directory before the fsync
1288 * and should be removed.
1289 */
1290static noinline int find_dir_range(struct btrfs_root *root,
1291 struct btrfs_path *path,
1292 u64 dirid, int key_type,
1293 u64 *start_ret, u64 *end_ret)
1294{
1295 struct btrfs_key key;
1296 u64 found_end;
1297 struct btrfs_dir_log_item *item;
1298 int ret;
1299 int nritems;
1300
1301 if (*start_ret == (u64)-1)
1302 return 1;
1303
1304 key.objectid = dirid;
1305 key.type = key_type;
1306 key.offset = *start_ret;
1307
1308 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1309 if (ret < 0)
1310 goto out;
1311 if (ret > 0) {
1312 if (path->slots[0] == 0)
1313 goto out;
1314 path->slots[0]--;
1315 }
1316 if (ret != 0)
1317 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1318
1319 if (key.type != key_type || key.objectid != dirid) {
1320 ret = 1;
1321 goto next;
1322 }
1323 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1324 struct btrfs_dir_log_item);
1325 found_end = btrfs_dir_log_end(path->nodes[0], item);
1326
1327 if (*start_ret >= key.offset && *start_ret <= found_end) {
1328 ret = 0;
1329 *start_ret = key.offset;
1330 *end_ret = found_end;
1331 goto out;
1332 }
1333 ret = 1;
1334next:
1335 /* check the next slot in the tree to see if it is a valid item */
1336 nritems = btrfs_header_nritems(path->nodes[0]);
1337 if (path->slots[0] >= nritems) {
1338 ret = btrfs_next_leaf(root, path);
1339 if (ret)
1340 goto out;
1341 } else {
1342 path->slots[0]++;
1343 }
1344
1345 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1346
1347 if (key.type != key_type || key.objectid != dirid) {
1348 ret = 1;
1349 goto out;
1350 }
1351 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1352 struct btrfs_dir_log_item);
1353 found_end = btrfs_dir_log_end(path->nodes[0], item);
1354 *start_ret = key.offset;
1355 *end_ret = found_end;
1356 ret = 0;
1357out:
1358 btrfs_release_path(root, path);
1359 return ret;
1360}
1361
1362/*
1363 * this looks for a given directory item in the log. If the directory
1364 * item is not in the log, the item is removed and the inode it points
1365 * to is unlinked
1366 */
1367static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
1368 struct btrfs_root *root,
1369 struct btrfs_root *log,
1370 struct btrfs_path *path,
1371 struct btrfs_path *log_path,
1372 struct inode *dir,
1373 struct btrfs_key *dir_key)
1374{
1375 int ret;
1376 struct extent_buffer *eb;
1377 int slot;
1378 u32 item_size;
1379 struct btrfs_dir_item *di;
1380 struct btrfs_dir_item *log_di;
1381 int name_len;
1382 unsigned long ptr;
1383 unsigned long ptr_end;
1384 char *name;
1385 struct inode *inode;
1386 struct btrfs_key location;
1387
1388again:
1389 eb = path->nodes[0];
1390 slot = path->slots[0];
1391 item_size = btrfs_item_size_nr(eb, slot);
1392 ptr = btrfs_item_ptr_offset(eb, slot);
1393 ptr_end = ptr + item_size;
1394 while (ptr < ptr_end) {
1395 di = (struct btrfs_dir_item *)ptr;
1396 name_len = btrfs_dir_name_len(eb, di);
1397 name = kmalloc(name_len, GFP_NOFS);
1398 if (!name) {
1399 ret = -ENOMEM;
1400 goto out;
1401 }
1402 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1403 name_len);
1404 log_di = NULL;
1405 if (dir_key->type == BTRFS_DIR_ITEM_KEY) {
1406 log_di = btrfs_lookup_dir_item(trans, log, log_path,
1407 dir_key->objectid,
1408 name, name_len, 0);
1409 } else if (dir_key->type == BTRFS_DIR_INDEX_KEY) {
1410 log_di = btrfs_lookup_dir_index_item(trans, log,
1411 log_path,
1412 dir_key->objectid,
1413 dir_key->offset,
1414 name, name_len, 0);
1415 }
1416 if (!log_di || IS_ERR(log_di)) {
1417 btrfs_dir_item_key_to_cpu(eb, di, &location);
1418 btrfs_release_path(root, path);
1419 btrfs_release_path(log, log_path);
1420 inode = read_one_inode(root, location.objectid);
1421 BUG_ON(!inode);
1422
1423 ret = link_to_fixup_dir(trans, root,
1424 path, location.objectid);
1425 BUG_ON(ret);
1426 btrfs_inc_nlink(inode);
1427 ret = btrfs_unlink_inode(trans, root, dir, inode,
1428 name, name_len);
1429 BUG_ON(ret);
1430 kfree(name);
1431 iput(inode);
1432
1433 /* there might still be more names under this key
1434 * check and repeat if required
1435 */
1436 ret = btrfs_search_slot(NULL, root, dir_key, path,
1437 0, 0);
1438 if (ret == 0)
1439 goto again;
1440 ret = 0;
1441 goto out;
1442 }
1443 btrfs_release_path(log, log_path);
1444 kfree(name);
1445
1446 ptr = (unsigned long)(di + 1);
1447 ptr += name_len;
1448 }
1449 ret = 0;
1450out:
1451 btrfs_release_path(root, path);
1452 btrfs_release_path(log, log_path);
1453 return ret;
1454}
1455
1456/*
1457 * deletion replay happens before we copy any new directory items
1458 * out of the log or out of backreferences from inodes. It
1459 * scans the log to find ranges of keys that log is authoritative for,
1460 * and then scans the directory to find items in those ranges that are
1461 * not present in the log.
1462 *
1463 * Anything we don't find in the log is unlinked and removed from the
1464 * directory.
1465 */
1466static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
1467 struct btrfs_root *root,
1468 struct btrfs_root *log,
1469 struct btrfs_path *path,
1470 u64 dirid)
1471{
1472 u64 range_start;
1473 u64 range_end;
1474 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
1475 int ret = 0;
1476 struct btrfs_key dir_key;
1477 struct btrfs_key found_key;
1478 struct btrfs_path *log_path;
1479 struct inode *dir;
1480
1481 dir_key.objectid = dirid;
1482 dir_key.type = BTRFS_DIR_ITEM_KEY;
1483 log_path = btrfs_alloc_path();
1484 if (!log_path)
1485 return -ENOMEM;
1486
1487 dir = read_one_inode(root, dirid);
1488 /* it isn't an error if the inode isn't there, that can happen
1489 * because we replay the deletes before we copy in the inode item
1490 * from the log
1491 */
1492 if (!dir) {
1493 btrfs_free_path(log_path);
1494 return 0;
1495 }
1496again:
1497 range_start = 0;
1498 range_end = 0;
1499 while (1) {
1500 ret = find_dir_range(log, path, dirid, key_type,
1501 &range_start, &range_end);
1502 if (ret != 0)
1503 break;
1504
1505 dir_key.offset = range_start;
1506 while (1) {
1507 int nritems;
1508 ret = btrfs_search_slot(NULL, root, &dir_key, path,
1509 0, 0);
1510 if (ret < 0)
1511 goto out;
1512
1513 nritems = btrfs_header_nritems(path->nodes[0]);
1514 if (path->slots[0] >= nritems) {
1515 ret = btrfs_next_leaf(root, path);
1516 if (ret)
1517 break;
1518 }
1519 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1520 path->slots[0]);
1521 if (found_key.objectid != dirid ||
1522 found_key.type != dir_key.type)
1523 goto next_type;
1524
1525 if (found_key.offset > range_end)
1526 break;
1527
1528 ret = check_item_in_log(trans, root, log, path,
1529 log_path, dir, &found_key);
1530 BUG_ON(ret);
1531 if (found_key.offset == (u64)-1)
1532 break;
1533 dir_key.offset = found_key.offset + 1;
1534 }
1535 btrfs_release_path(root, path);
1536 if (range_end == (u64)-1)
1537 break;
1538 range_start = range_end + 1;
1539 }
1540
1541next_type:
1542 ret = 0;
1543 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
1544 key_type = BTRFS_DIR_LOG_INDEX_KEY;
1545 dir_key.type = BTRFS_DIR_INDEX_KEY;
1546 btrfs_release_path(root, path);
1547 goto again;
1548 }
1549out:
1550 btrfs_release_path(root, path);
1551 btrfs_free_path(log_path);
1552 iput(dir);
1553 return ret;
1554}
1555
1556/*
1557 * the process_func used to replay items from the log tree. This
1558 * gets called in two different stages. The first stage just looks
1559 * for inodes and makes sure they are all copied into the subvolume.
1560 *
1561 * The second stage copies all the other item types from the log into
1562 * the subvolume. The two stage approach is slower, but gets rid of
1563 * lots of complexity around inodes referencing other inodes that exist
1564 * only in the log (references come from either directory items or inode
1565 * back refs).
1566 */
1567static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
1568 struct walk_control *wc, u64 gen)
1569{
1570 int nritems;
1571 struct btrfs_path *path;
1572 struct btrfs_root *root = wc->replay_dest;
1573 struct btrfs_key key;
1574 u32 item_size;
1575 int level;
1576 int i;
1577 int ret;
1578
1579 btrfs_read_buffer(eb, gen);
1580
1581 level = btrfs_header_level(eb);
1582
1583 if (level != 0)
1584 return 0;
1585
1586 path = btrfs_alloc_path();
1587 BUG_ON(!path);
1588
1589 nritems = btrfs_header_nritems(eb);
1590 for (i = 0; i < nritems; i++) {
1591 btrfs_item_key_to_cpu(eb, &key, i);
1592 item_size = btrfs_item_size_nr(eb, i);
1593
1594 /* inode keys are done during the first stage */
1595 if (key.type == BTRFS_INODE_ITEM_KEY &&
1596 wc->stage == LOG_WALK_REPLAY_INODES) {
1597 struct inode *inode;
1598 struct btrfs_inode_item *inode_item;
1599 u32 mode;
1600
1601 inode_item = btrfs_item_ptr(eb, i,
1602 struct btrfs_inode_item);
1603 mode = btrfs_inode_mode(eb, inode_item);
1604 if (S_ISDIR(mode)) {
1605 ret = replay_dir_deletes(wc->trans,
1606 root, log, path, key.objectid);
1607 BUG_ON(ret);
1608 }
1609 ret = overwrite_item(wc->trans, root, path,
1610 eb, i, &key);
1611 BUG_ON(ret);
1612
1613 /* for regular files, truncate away
1614 * extents past the new EOF
1615 */
1616 if (S_ISREG(mode)) {
1617 inode = read_one_inode(root,
1618 key.objectid);
1619 BUG_ON(!inode);
1620
1621 ret = btrfs_truncate_inode_items(wc->trans,
1622 root, inode, inode->i_size,
1623 BTRFS_EXTENT_DATA_KEY);
1624 BUG_ON(ret);
1625 iput(inode);
1626 }
1627 ret = link_to_fixup_dir(wc->trans, root,
1628 path, key.objectid);
1629 BUG_ON(ret);
1630 }
1631 if (wc->stage < LOG_WALK_REPLAY_ALL)
1632 continue;
1633
1634 /* these keys are simply copied */
1635 if (key.type == BTRFS_XATTR_ITEM_KEY) {
1636 ret = overwrite_item(wc->trans, root, path,
1637 eb, i, &key);
1638 BUG_ON(ret);
1639 } else if (key.type == BTRFS_INODE_REF_KEY) {
1640 ret = add_inode_ref(wc->trans, root, log, path,
1641 eb, i, &key);
1642 BUG_ON(ret && ret != -ENOENT);
1643 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
1644 ret = replay_one_extent(wc->trans, root, path,
1645 eb, i, &key);
1646 BUG_ON(ret);
1647 } else if (key.type == BTRFS_DIR_ITEM_KEY ||
1648 key.type == BTRFS_DIR_INDEX_KEY) {
1649 ret = replay_one_dir_item(wc->trans, root, path,
1650 eb, i, &key);
1651 BUG_ON(ret);
1652 }
1653 }
1654 btrfs_free_path(path);
1655 return 0;
1656}
1657
1658static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
1659 struct btrfs_root *root,
1660 struct btrfs_path *path, int *level,
1661 struct walk_control *wc)
1662{
1663 u64 root_owner;
1664 u64 root_gen;
1665 u64 bytenr;
1666 u64 ptr_gen;
1667 struct extent_buffer *next;
1668 struct extent_buffer *cur;
1669 struct extent_buffer *parent;
1670 u32 blocksize;
1671 int ret = 0;
1672
1673 WARN_ON(*level < 0);
1674 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1675
1676 while (*level > 0) {
1677 WARN_ON(*level < 0);
1678 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1679 cur = path->nodes[*level];
1680
1681 if (btrfs_header_level(cur) != *level)
1682 WARN_ON(1);
1683
1684 if (path->slots[*level] >=
1685 btrfs_header_nritems(cur))
1686 break;
1687
1688 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
1689 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
1690 blocksize = btrfs_level_size(root, *level - 1);
1691
1692 parent = path->nodes[*level];
1693 root_owner = btrfs_header_owner(parent);
1694 root_gen = btrfs_header_generation(parent);
1695
1696 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
1697
1698 wc->process_func(root, next, wc, ptr_gen);
1699
1700 if (*level == 1) {
1701 path->slots[*level]++;
1702 if (wc->free) {
1703 btrfs_read_buffer(next, ptr_gen);
1704
1705 btrfs_tree_lock(next);
1706 clean_tree_block(trans, root, next);
1707 btrfs_wait_tree_block_writeback(next);
1708 btrfs_tree_unlock(next);
1709
1710 ret = btrfs_drop_leaf_ref(trans, root, next);
1711 BUG_ON(ret);
1712
1713 WARN_ON(root_owner !=
1714 BTRFS_TREE_LOG_OBJECTID);
1715 ret = btrfs_free_reserved_extent(root,
1716 bytenr, blocksize);
1717 BUG_ON(ret);
1718 }
1719 free_extent_buffer(next);
1720 continue;
1721 }
1722 btrfs_read_buffer(next, ptr_gen);
1723
1724 WARN_ON(*level <= 0);
1725 if (path->nodes[*level-1])
1726 free_extent_buffer(path->nodes[*level-1]);
1727 path->nodes[*level-1] = next;
1728 *level = btrfs_header_level(next);
1729 path->slots[*level] = 0;
1730 cond_resched();
1731 }
1732 WARN_ON(*level < 0);
1733 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1734
1735 if (path->nodes[*level] == root->node)
1736 parent = path->nodes[*level];
1737 else
1738 parent = path->nodes[*level + 1];
1739
1740 bytenr = path->nodes[*level]->start;
1741
1742 blocksize = btrfs_level_size(root, *level);
1743 root_owner = btrfs_header_owner(parent);
1744 root_gen = btrfs_header_generation(parent);
1745
1746 wc->process_func(root, path->nodes[*level], wc,
1747 btrfs_header_generation(path->nodes[*level]));
1748
1749 if (wc->free) {
1750 next = path->nodes[*level];
1751 btrfs_tree_lock(next);
1752 clean_tree_block(trans, root, next);
1753 btrfs_wait_tree_block_writeback(next);
1754 btrfs_tree_unlock(next);
1755
1756 if (*level == 0) {
1757 ret = btrfs_drop_leaf_ref(trans, root, next);
1758 BUG_ON(ret);
1759 }
1760 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
1761 ret = btrfs_free_reserved_extent(root, bytenr, blocksize);
1762 BUG_ON(ret);
1763 }
1764 free_extent_buffer(path->nodes[*level]);
1765 path->nodes[*level] = NULL;
1766 *level += 1;
1767
1768 cond_resched();
1769 return 0;
1770}
1771
1772static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
1773 struct btrfs_root *root,
1774 struct btrfs_path *path, int *level,
1775 struct walk_control *wc)
1776{
1777 u64 root_owner;
1778 u64 root_gen;
1779 int i;
1780 int slot;
1781 int ret;
1782
1783 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
1784 slot = path->slots[i];
1785 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
1786 struct extent_buffer *node;
1787 node = path->nodes[i];
1788 path->slots[i]++;
1789 *level = i;
1790 WARN_ON(*level == 0);
1791 return 0;
1792 } else {
1793 struct extent_buffer *parent;
1794 if (path->nodes[*level] == root->node)
1795 parent = path->nodes[*level];
1796 else
1797 parent = path->nodes[*level + 1];
1798
1799 root_owner = btrfs_header_owner(parent);
1800 root_gen = btrfs_header_generation(parent);
1801 wc->process_func(root, path->nodes[*level], wc,
1802 btrfs_header_generation(path->nodes[*level]));
1803 if (wc->free) {
1804 struct extent_buffer *next;
1805
1806 next = path->nodes[*level];
1807
1808 btrfs_tree_lock(next);
1809 clean_tree_block(trans, root, next);
1810 btrfs_wait_tree_block_writeback(next);
1811 btrfs_tree_unlock(next);
1812
1813 if (*level == 0) {
1814 ret = btrfs_drop_leaf_ref(trans, root,
1815 next);
1816 BUG_ON(ret);
1817 }
1818
1819 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
1820 ret = btrfs_free_reserved_extent(root,
1821 path->nodes[*level]->start,
1822 path->nodes[*level]->len);
1823 BUG_ON(ret);
1824 }
1825 free_extent_buffer(path->nodes[*level]);
1826 path->nodes[*level] = NULL;
1827 *level = i + 1;
1828 }
1829 }
1830 return 1;
1831}
1832
1833/*
1834 * drop the reference count on the tree rooted at 'snap'. This traverses
1835 * the tree freeing any blocks that have a ref count of zero after being
1836 * decremented.
1837 */
1838static int walk_log_tree(struct btrfs_trans_handle *trans,
1839 struct btrfs_root *log, struct walk_control *wc)
1840{
1841 int ret = 0;
1842 int wret;
1843 int level;
1844 struct btrfs_path *path;
1845 int i;
1846 int orig_level;
1847
1848 path = btrfs_alloc_path();
1849 BUG_ON(!path);
1850
1851 level = btrfs_header_level(log->node);
1852 orig_level = level;
1853 path->nodes[level] = log->node;
1854 extent_buffer_get(log->node);
1855 path->slots[level] = 0;
1856
1857 while (1) {
1858 wret = walk_down_log_tree(trans, log, path, &level, wc);
1859 if (wret > 0)
1860 break;
1861 if (wret < 0)
1862 ret = wret;
1863
1864 wret = walk_up_log_tree(trans, log, path, &level, wc);
1865 if (wret > 0)
1866 break;
1867 if (wret < 0)
1868 ret = wret;
1869 }
1870
1871 /* was the root node processed? if not, catch it here */
1872 if (path->nodes[orig_level]) {
1873 wc->process_func(log, path->nodes[orig_level], wc,
1874 btrfs_header_generation(path->nodes[orig_level]));
1875 if (wc->free) {
1876 struct extent_buffer *next;
1877
1878 next = path->nodes[orig_level];
1879
1880 btrfs_tree_lock(next);
1881 clean_tree_block(trans, log, next);
1882 btrfs_wait_tree_block_writeback(next);
1883 btrfs_tree_unlock(next);
1884
1885 if (orig_level == 0) {
1886 ret = btrfs_drop_leaf_ref(trans, log,
1887 next);
1888 BUG_ON(ret);
1889 }
1890 WARN_ON(log->root_key.objectid !=
1891 BTRFS_TREE_LOG_OBJECTID);
1892 ret = btrfs_free_reserved_extent(log, next->start,
1893 next->len);
1894 BUG_ON(ret);
1895 }
1896 }
1897
1898 for (i = 0; i <= orig_level; i++) {
1899 if (path->nodes[i]) {
1900 free_extent_buffer(path->nodes[i]);
1901 path->nodes[i] = NULL;
1902 }
1903 }
1904 btrfs_free_path(path);
1905 if (wc->free)
1906 free_extent_buffer(log->node);
1907 return ret;
1908}
1909
1910static int wait_log_commit(struct btrfs_root *log)
1911{
1912 DEFINE_WAIT(wait);
1913 u64 transid = log->fs_info->tree_log_transid;
1914
1915 do {
1916 prepare_to_wait(&log->fs_info->tree_log_wait, &wait,
1917 TASK_UNINTERRUPTIBLE);
1918 mutex_unlock(&log->fs_info->tree_log_mutex);
1919 if (atomic_read(&log->fs_info->tree_log_commit))
1920 schedule();
1921 finish_wait(&log->fs_info->tree_log_wait, &wait);
1922 mutex_lock(&log->fs_info->tree_log_mutex);
1923 } while (transid == log->fs_info->tree_log_transid &&
1924 atomic_read(&log->fs_info->tree_log_commit));
1925 return 0;
1926}
1927
1928/*
1929 * btrfs_sync_log does sends a given tree log down to the disk and
1930 * updates the super blocks to record it. When this call is done,
1931 * you know that any inodes previously logged are safely on disk
1932 */
1933int btrfs_sync_log(struct btrfs_trans_handle *trans,
1934 struct btrfs_root *root)
1935{
1936 int ret;
1937 unsigned long batch;
1938 struct btrfs_root *log = root->log_root;
1939
1940 mutex_lock(&log->fs_info->tree_log_mutex);
1941 if (atomic_read(&log->fs_info->tree_log_commit)) {
1942 wait_log_commit(log);
1943 goto out;
1944 }
1945 atomic_set(&log->fs_info->tree_log_commit, 1);
1946
1947 while (1) {
1948 batch = log->fs_info->tree_log_batch;
1949 mutex_unlock(&log->fs_info->tree_log_mutex);
1950 schedule_timeout_uninterruptible(1);
1951 mutex_lock(&log->fs_info->tree_log_mutex);
1952
1953 while (atomic_read(&log->fs_info->tree_log_writers)) {
1954 DEFINE_WAIT(wait);
1955 prepare_to_wait(&log->fs_info->tree_log_wait, &wait,
1956 TASK_UNINTERRUPTIBLE);
1957 mutex_unlock(&log->fs_info->tree_log_mutex);
1958 if (atomic_read(&log->fs_info->tree_log_writers))
1959 schedule();
1960 mutex_lock(&log->fs_info->tree_log_mutex);
1961 finish_wait(&log->fs_info->tree_log_wait, &wait);
1962 }
1963 if (batch == log->fs_info->tree_log_batch)
1964 break;
1965 }
1966
1967 ret = btrfs_write_and_wait_marked_extents(log, &log->dirty_log_pages);
1968 BUG_ON(ret);
1969 ret = btrfs_write_and_wait_marked_extents(root->fs_info->log_root_tree,
1970 &root->fs_info->log_root_tree->dirty_log_pages);
1971 BUG_ON(ret);
1972
1973 btrfs_set_super_log_root(&root->fs_info->super_for_commit,
1974 log->fs_info->log_root_tree->node->start);
1975 btrfs_set_super_log_root_level(&root->fs_info->super_for_commit,
1976 btrfs_header_level(log->fs_info->log_root_tree->node));
1977
1978 write_ctree_super(trans, log->fs_info->tree_root, 2);
1979 log->fs_info->tree_log_transid++;
1980 log->fs_info->tree_log_batch = 0;
1981 atomic_set(&log->fs_info->tree_log_commit, 0);
1982 smp_mb();
1983 if (waitqueue_active(&log->fs_info->tree_log_wait))
1984 wake_up(&log->fs_info->tree_log_wait);
1985out:
1986 mutex_unlock(&log->fs_info->tree_log_mutex);
1987 return 0;
1988}
1989
1990/* * free all the extents used by the tree log. This should be called
1991 * at commit time of the full transaction
1992 */
1993int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
1994{
1995 int ret;
1996 struct btrfs_root *log;
1997 struct key;
1998 u64 start;
1999 u64 end;
2000 struct walk_control wc = {
2001 .free = 1,
2002 .process_func = process_one_buffer
2003 };
2004
2005 if (!root->log_root || root->fs_info->log_root_recovering)
2006 return 0;
2007
2008 log = root->log_root;
2009 ret = walk_log_tree(trans, log, &wc);
2010 BUG_ON(ret);
2011
2012 while (1) {
2013 ret = find_first_extent_bit(&log->dirty_log_pages,
2014 0, &start, &end, EXTENT_DIRTY);
2015 if (ret)
2016 break;
2017
2018 clear_extent_dirty(&log->dirty_log_pages,
2019 start, end, GFP_NOFS);
2020 }
2021
2022 log = root->log_root;
2023 ret = btrfs_del_root(trans, root->fs_info->log_root_tree,
2024 &log->root_key);
2025 BUG_ON(ret);
2026 root->log_root = NULL;
2027 kfree(root->log_root);
2028 return 0;
2029}
2030
2031/*
2032 * helper function to update the item for a given subvolumes log root
2033 * in the tree of log roots
2034 */
2035static int update_log_root(struct btrfs_trans_handle *trans,
2036 struct btrfs_root *log)
2037{
2038 u64 bytenr = btrfs_root_bytenr(&log->root_item);
2039 int ret;
2040
2041 if (log->node->start == bytenr)
2042 return 0;
2043
2044 btrfs_set_root_bytenr(&log->root_item, log->node->start);
2045 btrfs_set_root_generation(&log->root_item, trans->transid);
2046 btrfs_set_root_level(&log->root_item, btrfs_header_level(log->node));
2047 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
2048 &log->root_key, &log->root_item);
2049 BUG_ON(ret);
2050 return ret;
2051}
2052
2053/*
2054 * If both a file and directory are logged, and unlinks or renames are
2055 * mixed in, we have a few interesting corners:
2056 *
2057 * create file X in dir Y
2058 * link file X to X.link in dir Y
2059 * fsync file X
2060 * unlink file X but leave X.link
2061 * fsync dir Y
2062 *
2063 * After a crash we would expect only X.link to exist. But file X
2064 * didn't get fsync'd again so the log has back refs for X and X.link.
2065 *
2066 * We solve this by removing directory entries and inode backrefs from the
2067 * log when a file that was logged in the current transaction is
2068 * unlinked. Any later fsync will include the updated log entries, and
2069 * we'll be able to reconstruct the proper directory items from backrefs.
2070 *
2071 * This optimizations allows us to avoid relogging the entire inode
2072 * or the entire directory.
2073 */
2074int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2075 struct btrfs_root *root,
2076 const char *name, int name_len,
2077 struct inode *dir, u64 index)
2078{
2079 struct btrfs_root *log;
2080 struct btrfs_dir_item *di;
2081 struct btrfs_path *path;
2082 int ret;
2083 int bytes_del = 0;
2084
2085 if (BTRFS_I(dir)->logged_trans < trans->transid)
2086 return 0;
2087
2088 ret = join_running_log_trans(root);
2089 if (ret)
2090 return 0;
2091
2092 mutex_lock(&BTRFS_I(dir)->log_mutex);
2093
2094 log = root->log_root;
2095 path = btrfs_alloc_path();
2096 di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino,
2097 name, name_len, -1);
2098 if (di && !IS_ERR(di)) {
2099 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2100 bytes_del += name_len;
2101 BUG_ON(ret);
2102 }
2103 btrfs_release_path(log, path);
2104 di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino,
2105 index, name, name_len, -1);
2106 if (di && !IS_ERR(di)) {
2107 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2108 bytes_del += name_len;
2109 BUG_ON(ret);
2110 }
2111
2112 /* update the directory size in the log to reflect the names
2113 * we have removed
2114 */
2115 if (bytes_del) {
2116 struct btrfs_key key;
2117
2118 key.objectid = dir->i_ino;
2119 key.offset = 0;
2120 key.type = BTRFS_INODE_ITEM_KEY;
2121 btrfs_release_path(log, path);
2122
2123 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
2124 if (ret == 0) {
2125 struct btrfs_inode_item *item;
2126 u64 i_size;
2127
2128 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2129 struct btrfs_inode_item);
2130 i_size = btrfs_inode_size(path->nodes[0], item);
2131 if (i_size > bytes_del)
2132 i_size -= bytes_del;
2133 else
2134 i_size = 0;
2135 btrfs_set_inode_size(path->nodes[0], item, i_size);
2136 btrfs_mark_buffer_dirty(path->nodes[0]);
2137 } else
2138 ret = 0;
2139 btrfs_release_path(log, path);
2140 }
2141
2142 btrfs_free_path(path);
2143 mutex_unlock(&BTRFS_I(dir)->log_mutex);
2144 end_log_trans(root);
2145
2146 return 0;
2147}
2148
2149/* see comments for btrfs_del_dir_entries_in_log */
2150int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
2151 struct btrfs_root *root,
2152 const char *name, int name_len,
2153 struct inode *inode, u64 dirid)
2154{
2155 struct btrfs_root *log;
2156 u64 index;
2157 int ret;
2158
2159 if (BTRFS_I(inode)->logged_trans < trans->transid)
2160 return 0;
2161
2162 ret = join_running_log_trans(root);
2163 if (ret)
2164 return 0;
2165 log = root->log_root;
2166 mutex_lock(&BTRFS_I(inode)->log_mutex);
2167
2168 ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino,
2169 dirid, &index);
2170 mutex_unlock(&BTRFS_I(inode)->log_mutex);
2171 end_log_trans(root);
2172
2173 return ret;
2174}
2175
2176/*
2177 * creates a range item in the log for 'dirid'. first_offset and
2178 * last_offset tell us which parts of the key space the log should
2179 * be considered authoritative for.
2180 */
2181static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
2182 struct btrfs_root *log,
2183 struct btrfs_path *path,
2184 int key_type, u64 dirid,
2185 u64 first_offset, u64 last_offset)
2186{
2187 int ret;
2188 struct btrfs_key key;
2189 struct btrfs_dir_log_item *item;
2190
2191 key.objectid = dirid;
2192 key.offset = first_offset;
2193 if (key_type == BTRFS_DIR_ITEM_KEY)
2194 key.type = BTRFS_DIR_LOG_ITEM_KEY;
2195 else
2196 key.type = BTRFS_DIR_LOG_INDEX_KEY;
2197 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
2198 BUG_ON(ret);
2199
2200 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2201 struct btrfs_dir_log_item);
2202 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
2203 btrfs_mark_buffer_dirty(path->nodes[0]);
2204 btrfs_release_path(log, path);
2205 return 0;
2206}
2207
2208/*
2209 * log all the items included in the current transaction for a given
2210 * directory. This also creates the range items in the log tree required
2211 * to replay anything deleted before the fsync
2212 */
2213static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2214 struct btrfs_root *root, struct inode *inode,
2215 struct btrfs_path *path,
2216 struct btrfs_path *dst_path, int key_type,
2217 u64 min_offset, u64 *last_offset_ret)
2218{
2219 struct btrfs_key min_key;
2220 struct btrfs_key max_key;
2221 struct btrfs_root *log = root->log_root;
2222 struct extent_buffer *src;
2223 int ret;
2224 int i;
2225 int nritems;
2226 u64 first_offset = min_offset;
2227 u64 last_offset = (u64)-1;
2228
2229 log = root->log_root;
2230 max_key.objectid = inode->i_ino;
2231 max_key.offset = (u64)-1;
2232 max_key.type = key_type;
2233
2234 min_key.objectid = inode->i_ino;
2235 min_key.type = key_type;
2236 min_key.offset = min_offset;
2237
2238 path->keep_locks = 1;
2239
2240 ret = btrfs_search_forward(root, &min_key, &max_key,
2241 path, 0, trans->transid);
2242
2243 /*
2244 * we didn't find anything from this transaction, see if there
2245 * is anything at all
2246 */
2247 if (ret != 0 || min_key.objectid != inode->i_ino ||
2248 min_key.type != key_type) {
2249 min_key.objectid = inode->i_ino;
2250 min_key.type = key_type;
2251 min_key.offset = (u64)-1;
2252 btrfs_release_path(root, path);
2253 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
2254 if (ret < 0) {
2255 btrfs_release_path(root, path);
2256 return ret;
2257 }
2258 ret = btrfs_previous_item(root, path, inode->i_ino, key_type);
2259
2260 /* if ret == 0 there are items for this type,
2261 * create a range to tell us the last key of this type.
2262 * otherwise, there are no items in this directory after
2263 * *min_offset, and we create a range to indicate that.
2264 */
2265 if (ret == 0) {
2266 struct btrfs_key tmp;
2267 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
2268 path->slots[0]);
2269 if (key_type == tmp.type)
2270 first_offset = max(min_offset, tmp.offset) + 1;
2271 }
2272 goto done;
2273 }
2274
2275 /* go backward to find any previous key */
2276 ret = btrfs_previous_item(root, path, inode->i_ino, key_type);
2277 if (ret == 0) {
2278 struct btrfs_key tmp;
2279 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
2280 if (key_type == tmp.type) {
2281 first_offset = tmp.offset;
2282 ret = overwrite_item(trans, log, dst_path,
2283 path->nodes[0], path->slots[0],
2284 &tmp);
2285 }
2286 }
2287 btrfs_release_path(root, path);
2288
2289 /* find the first key from this transaction again */
2290 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
2291 if (ret != 0) {
2292 WARN_ON(1);
2293 goto done;
2294 }
2295
2296 /*
2297 * we have a block from this transaction, log every item in it
2298 * from our directory
2299 */
2300 while (1) {
2301 struct btrfs_key tmp;
2302 src = path->nodes[0];
2303 nritems = btrfs_header_nritems(src);
2304 for (i = path->slots[0]; i < nritems; i++) {
2305 btrfs_item_key_to_cpu(src, &min_key, i);
2306
2307 if (min_key.objectid != inode->i_ino ||
2308 min_key.type != key_type)
2309 goto done;
2310 ret = overwrite_item(trans, log, dst_path, src, i,
2311 &min_key);
2312 BUG_ON(ret);
2313 }
2314 path->slots[0] = nritems;
2315
2316 /*
2317 * look ahead to the next item and see if it is also
2318 * from this directory and from this transaction
2319 */
2320 ret = btrfs_next_leaf(root, path);
2321 if (ret == 1) {
2322 last_offset = (u64)-1;
2323 goto done;
2324 }
2325 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
2326 if (tmp.objectid != inode->i_ino || tmp.type != key_type) {
2327 last_offset = (u64)-1;
2328 goto done;
2329 }
2330 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
2331 ret = overwrite_item(trans, log, dst_path,
2332 path->nodes[0], path->slots[0],
2333 &tmp);
2334
2335 BUG_ON(ret);
2336 last_offset = tmp.offset;
2337 goto done;
2338 }
2339 }
2340done:
2341 *last_offset_ret = last_offset;
2342 btrfs_release_path(root, path);
2343 btrfs_release_path(log, dst_path);
2344
2345 /* insert the log range keys to indicate where the log is valid */
2346 ret = insert_dir_log_key(trans, log, path, key_type, inode->i_ino,
2347 first_offset, last_offset);
2348 BUG_ON(ret);
2349 return 0;
2350}
2351
2352/*
2353 * logging directories is very similar to logging inodes, We find all the items
2354 * from the current transaction and write them to the log.
2355 *
2356 * The recovery code scans the directory in the subvolume, and if it finds a
2357 * key in the range logged that is not present in the log tree, then it means
2358 * that dir entry was unlinked during the transaction.
2359 *
2360 * In order for that scan to work, we must include one key smaller than
2361 * the smallest logged by this transaction and one key larger than the largest
2362 * key logged by this transaction.
2363 */
2364static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
2365 struct btrfs_root *root, struct inode *inode,
2366 struct btrfs_path *path,
2367 struct btrfs_path *dst_path)
2368{
2369 u64 min_key;
2370 u64 max_key;
2371 int ret;
2372 int key_type = BTRFS_DIR_ITEM_KEY;
2373
2374again:
2375 min_key = 0;
2376 max_key = 0;
2377 while (1) {
2378 ret = log_dir_items(trans, root, inode, path,
2379 dst_path, key_type, min_key,
2380 &max_key);
2381 BUG_ON(ret);
2382 if (max_key == (u64)-1)
2383 break;
2384 min_key = max_key + 1;
2385 }
2386
2387 if (key_type == BTRFS_DIR_ITEM_KEY) {
2388 key_type = BTRFS_DIR_INDEX_KEY;
2389 goto again;
2390 }
2391 return 0;
2392}
2393
2394/*
2395 * a helper function to drop items from the log before we relog an
2396 * inode. max_key_type indicates the highest item type to remove.
2397 * This cannot be run for file data extents because it does not
2398 * free the extents they point to.
2399 */
2400static int drop_objectid_items(struct btrfs_trans_handle *trans,
2401 struct btrfs_root *log,
2402 struct btrfs_path *path,
2403 u64 objectid, int max_key_type)
2404{
2405 int ret;
2406 struct btrfs_key key;
2407 struct btrfs_key found_key;
2408
2409 key.objectid = objectid;
2410 key.type = max_key_type;
2411 key.offset = (u64)-1;
2412
2413 while (1) {
2414 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
2415
2416 if (ret != 1)
2417 break;
2418
2419 if (path->slots[0] == 0)
2420 break;
2421
2422 path->slots[0]--;
2423 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2424 path->slots[0]);
2425
2426 if (found_key.objectid != objectid)
2427 break;
2428
2429 ret = btrfs_del_item(trans, log, path);
2430 BUG_ON(ret);
2431 btrfs_release_path(log, path);
2432 }
2433 btrfs_release_path(log, path);
2434 return 0;
2435}
2436
2437static noinline int copy_items(struct btrfs_trans_handle *trans,
2438 struct btrfs_root *log,
2439 struct btrfs_path *dst_path,
2440 struct extent_buffer *src,
2441 int start_slot, int nr, int inode_only)
2442{
2443 unsigned long src_offset;
2444 unsigned long dst_offset;
2445 struct btrfs_file_extent_item *extent;
2446 struct btrfs_inode_item *inode_item;
2447 int ret;
2448 struct btrfs_key *ins_keys;
2449 u32 *ins_sizes;
2450 char *ins_data;
2451 int i;
2452 struct list_head ordered_sums;
2453
2454 INIT_LIST_HEAD(&ordered_sums);
2455
2456 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
2457 nr * sizeof(u32), GFP_NOFS);
2458 ins_sizes = (u32 *)ins_data;
2459 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
2460
2461 for (i = 0; i < nr; i++) {
2462 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
2463 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
2464 }
2465 ret = btrfs_insert_empty_items(trans, log, dst_path,
2466 ins_keys, ins_sizes, nr);
2467 BUG_ON(ret);
2468
2469 for (i = 0; i < nr; i++) {
2470 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
2471 dst_path->slots[0]);
2472
2473 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
2474
2475 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
2476 src_offset, ins_sizes[i]);
2477
2478 if (inode_only == LOG_INODE_EXISTS &&
2479 ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
2480 inode_item = btrfs_item_ptr(dst_path->nodes[0],
2481 dst_path->slots[0],
2482 struct btrfs_inode_item);
2483 btrfs_set_inode_size(dst_path->nodes[0], inode_item, 0);
2484
2485 /* set the generation to zero so the recover code
2486 * can tell the difference between an logging
2487 * just to say 'this inode exists' and a logging
2488 * to say 'update this inode with these values'
2489 */
2490 btrfs_set_inode_generation(dst_path->nodes[0],
2491 inode_item, 0);
2492 }
2493 /* take a reference on file data extents so that truncates
2494 * or deletes of this inode don't have to relog the inode
2495 * again
2496 */
2497 if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY) {
2498 int found_type;
2499 extent = btrfs_item_ptr(src, start_slot + i,
2500 struct btrfs_file_extent_item);
2501
2502 found_type = btrfs_file_extent_type(src, extent);
2503 if (found_type == BTRFS_FILE_EXTENT_REG ||
2504 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
2505 u64 ds = btrfs_file_extent_disk_bytenr(src,
2506 extent);
2507 u64 dl = btrfs_file_extent_disk_num_bytes(src,
2508 extent);
2509 u64 cs = btrfs_file_extent_offset(src, extent);
2510 u64 cl = btrfs_file_extent_num_bytes(src,
2511 extent);;
2512 if (btrfs_file_extent_compression(src,
2513 extent)) {
2514 cs = 0;
2515 cl = dl;
2516 }
2517 /* ds == 0 is a hole */
2518 if (ds != 0) {
2519 ret = btrfs_inc_extent_ref(trans, log,
2520 ds, dl,
2521 dst_path->nodes[0]->start,
2522 BTRFS_TREE_LOG_OBJECTID,
2523 trans->transid,
2524 ins_keys[i].objectid);
2525 BUG_ON(ret);
2526 ret = btrfs_lookup_csums_range(
2527 log->fs_info->csum_root,
2528 ds + cs, ds + cs + cl - 1,
2529 &ordered_sums);
2530 BUG_ON(ret);
2531 }
2532 }
2533 }
2534 dst_path->slots[0]++;
2535 }
2536
2537 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
2538 btrfs_release_path(log, dst_path);
2539 kfree(ins_data);
2540
2541 /*
2542 * we have to do this after the loop above to avoid changing the
2543 * log tree while trying to change the log tree.
2544 */
2545 while (!list_empty(&ordered_sums)) {
2546 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
2547 struct btrfs_ordered_sum,
2548 list);
2549 ret = btrfs_csum_file_blocks(trans, log, sums);
2550 BUG_ON(ret);
2551 list_del(&sums->list);
2552 kfree(sums);
2553 }
2554 return 0;
2555}
2556
2557/* log a single inode in the tree log.
2558 * At least one parent directory for this inode must exist in the tree
2559 * or be logged already.
2560 *
2561 * Any items from this inode changed by the current transaction are copied
2562 * to the log tree. An extra reference is taken on any extents in this
2563 * file, allowing us to avoid a whole pile of corner cases around logging
2564 * blocks that have been removed from the tree.
2565 *
2566 * See LOG_INODE_ALL and related defines for a description of what inode_only
2567 * does.
2568 *
2569 * This handles both files and directories.
2570 */
2571static int __btrfs_log_inode(struct btrfs_trans_handle *trans,
2572 struct btrfs_root *root, struct inode *inode,
2573 int inode_only)
2574{
2575 struct btrfs_path *path;
2576 struct btrfs_path *dst_path;
2577 struct btrfs_key min_key;
2578 struct btrfs_key max_key;
2579 struct btrfs_root *log = root->log_root;
2580 struct extent_buffer *src = NULL;
2581 u32 size;
2582 int ret;
2583 int nritems;
2584 int ins_start_slot = 0;
2585 int ins_nr;
2586
2587 log = root->log_root;
2588
2589 path = btrfs_alloc_path();
2590 dst_path = btrfs_alloc_path();
2591
2592 min_key.objectid = inode->i_ino;
2593 min_key.type = BTRFS_INODE_ITEM_KEY;
2594 min_key.offset = 0;
2595
2596 max_key.objectid = inode->i_ino;
2597 if (inode_only == LOG_INODE_EXISTS || S_ISDIR(inode->i_mode))
2598 max_key.type = BTRFS_XATTR_ITEM_KEY;
2599 else
2600 max_key.type = (u8)-1;
2601 max_key.offset = (u64)-1;
2602
2603 /*
2604 * if this inode has already been logged and we're in inode_only
2605 * mode, we don't want to delete the things that have already
2606 * been written to the log.
2607 *
2608 * But, if the inode has been through an inode_only log,
2609 * the logged_trans field is not set. This allows us to catch
2610 * any new names for this inode in the backrefs by logging it
2611 * again
2612 */
2613 if (inode_only == LOG_INODE_EXISTS &&
2614 BTRFS_I(inode)->logged_trans == trans->transid) {
2615 btrfs_free_path(path);
2616 btrfs_free_path(dst_path);
2617 goto out;
2618 }
2619 mutex_lock(&BTRFS_I(inode)->log_mutex);
2620
2621 /*
2622 * a brute force approach to making sure we get the most uptodate
2623 * copies of everything.
2624 */
2625 if (S_ISDIR(inode->i_mode)) {
2626 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
2627
2628 if (inode_only == LOG_INODE_EXISTS)
2629 max_key_type = BTRFS_XATTR_ITEM_KEY;
2630 ret = drop_objectid_items(trans, log, path,
2631 inode->i_ino, max_key_type);
2632 } else {
2633 ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0);
2634 }
2635 BUG_ON(ret);
2636 path->keep_locks = 1;
2637
2638 while (1) {
2639 ins_nr = 0;
2640 ret = btrfs_search_forward(root, &min_key, &max_key,
2641 path, 0, trans->transid);
2642 if (ret != 0)
2643 break;
2644again:
2645 /* note, ins_nr might be > 0 here, cleanup outside the loop */
2646 if (min_key.objectid != inode->i_ino)
2647 break;
2648 if (min_key.type > max_key.type)
2649 break;
2650
2651 src = path->nodes[0];
2652 size = btrfs_item_size_nr(src, path->slots[0]);
2653 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
2654 ins_nr++;
2655 goto next_slot;
2656 } else if (!ins_nr) {
2657 ins_start_slot = path->slots[0];
2658 ins_nr = 1;
2659 goto next_slot;
2660 }
2661
2662 ret = copy_items(trans, log, dst_path, src, ins_start_slot,
2663 ins_nr, inode_only);
2664 BUG_ON(ret);
2665 ins_nr = 1;
2666 ins_start_slot = path->slots[0];
2667next_slot:
2668
2669 nritems = btrfs_header_nritems(path->nodes[0]);
2670 path->slots[0]++;
2671 if (path->slots[0] < nritems) {
2672 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
2673 path->slots[0]);
2674 goto again;
2675 }
2676 if (ins_nr) {
2677 ret = copy_items(trans, log, dst_path, src,
2678 ins_start_slot,
2679 ins_nr, inode_only);
2680 BUG_ON(ret);
2681 ins_nr = 0;
2682 }
2683 btrfs_release_path(root, path);
2684
2685 if (min_key.offset < (u64)-1)
2686 min_key.offset++;
2687 else if (min_key.type < (u8)-1)
2688 min_key.type++;
2689 else if (min_key.objectid < (u64)-1)
2690 min_key.objectid++;
2691 else
2692 break;
2693 }
2694 if (ins_nr) {
2695 ret = copy_items(trans, log, dst_path, src,
2696 ins_start_slot,
2697 ins_nr, inode_only);
2698 BUG_ON(ret);
2699 ins_nr = 0;
2700 }
2701 WARN_ON(ins_nr);
2702 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
2703 btrfs_release_path(root, path);
2704 btrfs_release_path(log, dst_path);
2705 BTRFS_I(inode)->log_dirty_trans = 0;
2706 ret = log_directory_changes(trans, root, inode, path, dst_path);
2707 BUG_ON(ret);
2708 }
2709 BTRFS_I(inode)->logged_trans = trans->transid;
2710 mutex_unlock(&BTRFS_I(inode)->log_mutex);
2711
2712 btrfs_free_path(path);
2713 btrfs_free_path(dst_path);
2714
2715 mutex_lock(&root->fs_info->tree_log_mutex);
2716 ret = update_log_root(trans, log);
2717 BUG_ON(ret);
2718 mutex_unlock(&root->fs_info->tree_log_mutex);
2719out:
2720 return 0;
2721}
2722
2723int btrfs_log_inode(struct btrfs_trans_handle *trans,
2724 struct btrfs_root *root, struct inode *inode,
2725 int inode_only)
2726{
2727 int ret;
2728
2729 start_log_trans(trans, root);
2730 ret = __btrfs_log_inode(trans, root, inode, inode_only);
2731 end_log_trans(root);
2732 return ret;
2733}
2734
2735/*
2736 * helper function around btrfs_log_inode to make sure newly created
2737 * parent directories also end up in the log. A minimal inode and backref
2738 * only logging is done of any parent directories that are older than
2739 * the last committed transaction
2740 */
2741int btrfs_log_dentry(struct btrfs_trans_handle *trans,
2742 struct btrfs_root *root, struct dentry *dentry)
2743{
2744 int inode_only = LOG_INODE_ALL;
2745 struct super_block *sb;
2746 int ret;
2747
2748 start_log_trans(trans, root);
2749 sb = dentry->d_inode->i_sb;
2750 while (1) {
2751 ret = __btrfs_log_inode(trans, root, dentry->d_inode,
2752 inode_only);
2753 BUG_ON(ret);
2754 inode_only = LOG_INODE_EXISTS;
2755
2756 dentry = dentry->d_parent;
2757 if (!dentry || !dentry->d_inode || sb != dentry->d_inode->i_sb)
2758 break;
2759
2760 if (BTRFS_I(dentry->d_inode)->generation <=
2761 root->fs_info->last_trans_committed)
2762 break;
2763 }
2764 end_log_trans(root);
2765 return 0;
2766}
2767
2768/*
2769 * it is not safe to log dentry if the chunk root has added new
2770 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
2771 * If this returns 1, you must commit the transaction to safely get your
2772 * data on disk.
2773 */
2774int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
2775 struct btrfs_root *root, struct dentry *dentry)
2776{
2777 u64 gen;
2778 gen = root->fs_info->last_trans_new_blockgroup;
2779 if (gen > root->fs_info->last_trans_committed)
2780 return 1;
2781 else
2782 return btrfs_log_dentry(trans, root, dentry);
2783}
2784
2785/*
2786 * should be called during mount to recover any replay any log trees
2787 * from the FS
2788 */
2789int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
2790{
2791 int ret;
2792 struct btrfs_path *path;
2793 struct btrfs_trans_handle *trans;
2794 struct btrfs_key key;
2795 struct btrfs_key found_key;
2796 struct btrfs_key tmp_key;
2797 struct btrfs_root *log;
2798 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
2799 u64 highest_inode;
2800 struct walk_control wc = {
2801 .process_func = process_one_buffer,
2802 .stage = 0,
2803 };
2804
2805 fs_info->log_root_recovering = 1;
2806 path = btrfs_alloc_path();
2807 BUG_ON(!path);
2808
2809 trans = btrfs_start_transaction(fs_info->tree_root, 1);
2810
2811 wc.trans = trans;
2812 wc.pin = 1;
2813
2814 walk_log_tree(trans, log_root_tree, &wc);
2815
2816again:
2817 key.objectid = BTRFS_TREE_LOG_OBJECTID;
2818 key.offset = (u64)-1;
2819 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
2820
2821 while (1) {
2822 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
2823 if (ret < 0)
2824 break;
2825 if (ret > 0) {
2826 if (path->slots[0] == 0)
2827 break;
2828 path->slots[0]--;
2829 }
2830 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2831 path->slots[0]);
2832 btrfs_release_path(log_root_tree, path);
2833 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
2834 break;
2835
2836 log = btrfs_read_fs_root_no_radix(log_root_tree,
2837 &found_key);
2838 BUG_ON(!log);
2839
2840
2841 tmp_key.objectid = found_key.offset;
2842 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
2843 tmp_key.offset = (u64)-1;
2844
2845 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
2846 BUG_ON(!wc.replay_dest);
2847
2848 wc.replay_dest->log_root = log;
2849 btrfs_record_root_in_trans(wc.replay_dest);
2850 ret = walk_log_tree(trans, log, &wc);
2851 BUG_ON(ret);
2852
2853 if (wc.stage == LOG_WALK_REPLAY_ALL) {
2854 ret = fixup_inode_link_counts(trans, wc.replay_dest,
2855 path);
2856 BUG_ON(ret);
2857 }
2858 ret = btrfs_find_highest_inode(wc.replay_dest, &highest_inode);
2859 if (ret == 0) {
2860 wc.replay_dest->highest_inode = highest_inode;
2861 wc.replay_dest->last_inode_alloc = highest_inode;
2862 }
2863
2864 key.offset = found_key.offset - 1;
2865 wc.replay_dest->log_root = NULL;
2866 free_extent_buffer(log->node);
2867 kfree(log);
2868
2869 if (found_key.offset == 0)
2870 break;
2871 }
2872 btrfs_release_path(log_root_tree, path);
2873
2874 /* step one is to pin it all, step two is to replay just inodes */
2875 if (wc.pin) {
2876 wc.pin = 0;
2877 wc.process_func = replay_one_buffer;
2878 wc.stage = LOG_WALK_REPLAY_INODES;
2879 goto again;
2880 }
2881 /* step three is to replay everything */
2882 if (wc.stage < LOG_WALK_REPLAY_ALL) {
2883 wc.stage++;
2884 goto again;
2885 }
2886
2887 btrfs_free_path(path);
2888
2889 free_extent_buffer(log_root_tree->node);
2890 log_root_tree->log_root = NULL;
2891 fs_info->log_root_recovering = 0;
2892
2893 /* step 4: commit the transaction, which also unpins the blocks */
2894 btrfs_commit_transaction(trans, fs_info->tree_root);
2895
2896 kfree(log_root_tree);
2897 return 0;
2898}
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
new file mode 100644
index 000000000000..b9409b32ed02
--- /dev/null
+++ b/fs/btrfs/tree-log.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#ifndef __TREE_LOG_
20#define __TREE_LOG_
21
22int btrfs_sync_log(struct btrfs_trans_handle *trans,
23 struct btrfs_root *root);
24int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root);
25int btrfs_log_dentry(struct btrfs_trans_handle *trans,
26 struct btrfs_root *root, struct dentry *dentry);
27int btrfs_recover_log_trees(struct btrfs_root *tree_root);
28int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
29 struct btrfs_root *root, struct dentry *dentry);
30int btrfs_log_inode(struct btrfs_trans_handle *trans,
31 struct btrfs_root *root, struct inode *inode,
32 int inode_only);
33int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root,
35 const char *name, int name_len,
36 struct inode *dir, u64 index);
37int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
38 struct btrfs_root *root,
39 const char *name, int name_len,
40 struct inode *inode, u64 dirid);
41#endif
diff --git a/fs/btrfs/version.h b/fs/btrfs/version.h
new file mode 100644
index 000000000000..9bf3946d5ef2
--- /dev/null
+++ b/fs/btrfs/version.h
@@ -0,0 +1,4 @@
1#ifndef __BTRFS_VERSION_H
2#define __BTRFS_VERSION_H
3#define BTRFS_BUILD_VERSION "Btrfs"
4#endif
diff --git a/fs/btrfs/version.sh b/fs/btrfs/version.sh
new file mode 100644
index 000000000000..1ca1952fd917
--- /dev/null
+++ b/fs/btrfs/version.sh
@@ -0,0 +1,43 @@
1#!/bin/bash
2#
3# determine-version -- report a useful version for releases
4#
5# Copyright 2008, Aron Griffis <agriffis@n01se.net>
6# Copyright 2008, Oracle
7# Released under the GNU GPLv2
8
9v="v0.16"
10
11which git &> /dev/null
12if [ $? == 0 ]; then
13 git branch >& /dev/null
14 if [ $? == 0 ]; then
15 if head=`git rev-parse --verify HEAD 2>/dev/null`; then
16 if tag=`git describe --tags 2>/dev/null`; then
17 v="$tag"
18 fi
19
20 # Are there uncommitted changes?
21 git update-index --refresh --unmerged > /dev/null
22 if git diff-index --name-only HEAD | \
23 grep -v "^scripts/package" \
24 | read dummy; then
25 v="$v"-dirty
26 fi
27 fi
28 fi
29fi
30
31echo "#ifndef __BUILD_VERSION" > .build-version.h
32echo "#define __BUILD_VERSION" >> .build-version.h
33echo "#define BTRFS_BUILD_VERSION \"Btrfs $v\"" >> .build-version.h
34echo "#endif" >> .build-version.h
35
36diff -q version.h .build-version.h >& /dev/null
37
38if [ $? == 0 ]; then
39 rm .build-version.h
40 exit 0
41fi
42
43mv .build-version.h version.h
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
new file mode 100644
index 000000000000..b187b537888e
--- /dev/null
+++ b/fs/btrfs/volumes.c
@@ -0,0 +1,3218 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/bio.h>
20#include <linux/buffer_head.h>
21#include <linux/blkdev.h>
22#include <linux/random.h>
23#include <linux/version.h>
24#include <asm/div64.h>
25#include "compat.h"
26#include "ctree.h"
27#include "extent_map.h"
28#include "disk-io.h"
29#include "transaction.h"
30#include "print-tree.h"
31#include "volumes.h"
32#include "async-thread.h"
33
34struct map_lookup {
35 u64 type;
36 int io_align;
37 int io_width;
38 int stripe_len;
39 int sector_size;
40 int num_stripes;
41 int sub_stripes;
42 struct btrfs_bio_stripe stripes[];
43};
44
45static int init_first_rw_device(struct btrfs_trans_handle *trans,
46 struct btrfs_root *root,
47 struct btrfs_device *device);
48static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
49
50#define map_lookup_size(n) (sizeof(struct map_lookup) + \
51 (sizeof(struct btrfs_bio_stripe) * (n)))
52
53static DEFINE_MUTEX(uuid_mutex);
54static LIST_HEAD(fs_uuids);
55
56void btrfs_lock_volumes(void)
57{
58 mutex_lock(&uuid_mutex);
59}
60
61void btrfs_unlock_volumes(void)
62{
63 mutex_unlock(&uuid_mutex);
64}
65
66static void lock_chunks(struct btrfs_root *root)
67{
68 mutex_lock(&root->fs_info->chunk_mutex);
69}
70
71static void unlock_chunks(struct btrfs_root *root)
72{
73 mutex_unlock(&root->fs_info->chunk_mutex);
74}
75
76static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
77{
78 struct btrfs_device *device;
79 WARN_ON(fs_devices->opened);
80 while (!list_empty(&fs_devices->devices)) {
81 device = list_entry(fs_devices->devices.next,
82 struct btrfs_device, dev_list);
83 list_del(&device->dev_list);
84 kfree(device->name);
85 kfree(device);
86 }
87 kfree(fs_devices);
88}
89
90int btrfs_cleanup_fs_uuids(void)
91{
92 struct btrfs_fs_devices *fs_devices;
93
94 while (!list_empty(&fs_uuids)) {
95 fs_devices = list_entry(fs_uuids.next,
96 struct btrfs_fs_devices, list);
97 list_del(&fs_devices->list);
98 free_fs_devices(fs_devices);
99 }
100 return 0;
101}
102
103static noinline struct btrfs_device *__find_device(struct list_head *head,
104 u64 devid, u8 *uuid)
105{
106 struct btrfs_device *dev;
107 struct list_head *cur;
108
109 list_for_each(cur, head) {
110 dev = list_entry(cur, struct btrfs_device, dev_list);
111 if (dev->devid == devid &&
112 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
113 return dev;
114 }
115 }
116 return NULL;
117}
118
119static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
120{
121 struct list_head *cur;
122 struct btrfs_fs_devices *fs_devices;
123
124 list_for_each(cur, &fs_uuids) {
125 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
126 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
127 return fs_devices;
128 }
129 return NULL;
130}
131
132/*
133 * we try to collect pending bios for a device so we don't get a large
134 * number of procs sending bios down to the same device. This greatly
135 * improves the schedulers ability to collect and merge the bios.
136 *
137 * But, it also turns into a long list of bios to process and that is sure
138 * to eventually make the worker thread block. The solution here is to
139 * make some progress and then put this work struct back at the end of
140 * the list if the block device is congested. This way, multiple devices
141 * can make progress from a single worker thread.
142 */
143static noinline int run_scheduled_bios(struct btrfs_device *device)
144{
145 struct bio *pending;
146 struct backing_dev_info *bdi;
147 struct btrfs_fs_info *fs_info;
148 struct bio *tail;
149 struct bio *cur;
150 int again = 0;
151 unsigned long num_run = 0;
152 unsigned long limit;
153
154 bdi = device->bdev->bd_inode->i_mapping->backing_dev_info;
155 fs_info = device->dev_root->fs_info;
156 limit = btrfs_async_submit_limit(fs_info);
157 limit = limit * 2 / 3;
158
159loop:
160 spin_lock(&device->io_lock);
161
162 /* take all the bios off the list at once and process them
163 * later on (without the lock held). But, remember the
164 * tail and other pointers so the bios can be properly reinserted
165 * into the list if we hit congestion
166 */
167 pending = device->pending_bios;
168 tail = device->pending_bio_tail;
169 WARN_ON(pending && !tail);
170 device->pending_bios = NULL;
171 device->pending_bio_tail = NULL;
172
173 /*
174 * if pending was null this time around, no bios need processing
175 * at all and we can stop. Otherwise it'll loop back up again
176 * and do an additional check so no bios are missed.
177 *
178 * device->running_pending is used to synchronize with the
179 * schedule_bio code.
180 */
181 if (pending) {
182 again = 1;
183 device->running_pending = 1;
184 } else {
185 again = 0;
186 device->running_pending = 0;
187 }
188 spin_unlock(&device->io_lock);
189
190 while (pending) {
191 cur = pending;
192 pending = pending->bi_next;
193 cur->bi_next = NULL;
194 atomic_dec(&fs_info->nr_async_bios);
195
196 if (atomic_read(&fs_info->nr_async_bios) < limit &&
197 waitqueue_active(&fs_info->async_submit_wait))
198 wake_up(&fs_info->async_submit_wait);
199
200 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
201 bio_get(cur);
202 submit_bio(cur->bi_rw, cur);
203 bio_put(cur);
204 num_run++;
205
206 /*
207 * we made progress, there is more work to do and the bdi
208 * is now congested. Back off and let other work structs
209 * run instead
210 */
211 if (pending && bdi_write_congested(bdi) &&
212 fs_info->fs_devices->open_devices > 1) {
213 struct bio *old_head;
214
215 spin_lock(&device->io_lock);
216
217 old_head = device->pending_bios;
218 device->pending_bios = pending;
219 if (device->pending_bio_tail)
220 tail->bi_next = old_head;
221 else
222 device->pending_bio_tail = tail;
223
224 spin_unlock(&device->io_lock);
225 btrfs_requeue_work(&device->work);
226 goto done;
227 }
228 }
229 if (again)
230 goto loop;
231done:
232 return 0;
233}
234
235static void pending_bios_fn(struct btrfs_work *work)
236{
237 struct btrfs_device *device;
238
239 device = container_of(work, struct btrfs_device, work);
240 run_scheduled_bios(device);
241}
242
243static noinline int device_list_add(const char *path,
244 struct btrfs_super_block *disk_super,
245 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
246{
247 struct btrfs_device *device;
248 struct btrfs_fs_devices *fs_devices;
249 u64 found_transid = btrfs_super_generation(disk_super);
250
251 fs_devices = find_fsid(disk_super->fsid);
252 if (!fs_devices) {
253 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
254 if (!fs_devices)
255 return -ENOMEM;
256 INIT_LIST_HEAD(&fs_devices->devices);
257 INIT_LIST_HEAD(&fs_devices->alloc_list);
258 list_add(&fs_devices->list, &fs_uuids);
259 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
260 fs_devices->latest_devid = devid;
261 fs_devices->latest_trans = found_transid;
262 device = NULL;
263 } else {
264 device = __find_device(&fs_devices->devices, devid,
265 disk_super->dev_item.uuid);
266 }
267 if (!device) {
268 if (fs_devices->opened)
269 return -EBUSY;
270
271 device = kzalloc(sizeof(*device), GFP_NOFS);
272 if (!device) {
273 /* we can safely leave the fs_devices entry around */
274 return -ENOMEM;
275 }
276 device->devid = devid;
277 device->work.func = pending_bios_fn;
278 memcpy(device->uuid, disk_super->dev_item.uuid,
279 BTRFS_UUID_SIZE);
280 device->barriers = 1;
281 spin_lock_init(&device->io_lock);
282 device->name = kstrdup(path, GFP_NOFS);
283 if (!device->name) {
284 kfree(device);
285 return -ENOMEM;
286 }
287 INIT_LIST_HEAD(&device->dev_alloc_list);
288 list_add(&device->dev_list, &fs_devices->devices);
289 device->fs_devices = fs_devices;
290 fs_devices->num_devices++;
291 }
292
293 if (found_transid > fs_devices->latest_trans) {
294 fs_devices->latest_devid = devid;
295 fs_devices->latest_trans = found_transid;
296 }
297 *fs_devices_ret = fs_devices;
298 return 0;
299}
300
301static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
302{
303 struct btrfs_fs_devices *fs_devices;
304 struct btrfs_device *device;
305 struct btrfs_device *orig_dev;
306
307 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
308 if (!fs_devices)
309 return ERR_PTR(-ENOMEM);
310
311 INIT_LIST_HEAD(&fs_devices->devices);
312 INIT_LIST_HEAD(&fs_devices->alloc_list);
313 INIT_LIST_HEAD(&fs_devices->list);
314 fs_devices->latest_devid = orig->latest_devid;
315 fs_devices->latest_trans = orig->latest_trans;
316 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
317
318 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
319 device = kzalloc(sizeof(*device), GFP_NOFS);
320 if (!device)
321 goto error;
322
323 device->name = kstrdup(orig_dev->name, GFP_NOFS);
324 if (!device->name)
325 goto error;
326
327 device->devid = orig_dev->devid;
328 device->work.func = pending_bios_fn;
329 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
330 device->barriers = 1;
331 spin_lock_init(&device->io_lock);
332 INIT_LIST_HEAD(&device->dev_list);
333 INIT_LIST_HEAD(&device->dev_alloc_list);
334
335 list_add(&device->dev_list, &fs_devices->devices);
336 device->fs_devices = fs_devices;
337 fs_devices->num_devices++;
338 }
339 return fs_devices;
340error:
341 free_fs_devices(fs_devices);
342 return ERR_PTR(-ENOMEM);
343}
344
345int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
346{
347 struct list_head *tmp;
348 struct list_head *cur;
349 struct btrfs_device *device;
350
351 mutex_lock(&uuid_mutex);
352again:
353 list_for_each_safe(cur, tmp, &fs_devices->devices) {
354 device = list_entry(cur, struct btrfs_device, dev_list);
355 if (device->in_fs_metadata)
356 continue;
357
358 if (device->bdev) {
359 close_bdev_exclusive(device->bdev, device->mode);
360 device->bdev = NULL;
361 fs_devices->open_devices--;
362 }
363 if (device->writeable) {
364 list_del_init(&device->dev_alloc_list);
365 device->writeable = 0;
366 fs_devices->rw_devices--;
367 }
368 list_del_init(&device->dev_list);
369 fs_devices->num_devices--;
370 kfree(device->name);
371 kfree(device);
372 }
373
374 if (fs_devices->seed) {
375 fs_devices = fs_devices->seed;
376 goto again;
377 }
378
379 mutex_unlock(&uuid_mutex);
380 return 0;
381}
382
383static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
384{
385 struct list_head *cur;
386 struct btrfs_device *device;
387
388 if (--fs_devices->opened > 0)
389 return 0;
390
391 list_for_each(cur, &fs_devices->devices) {
392 device = list_entry(cur, struct btrfs_device, dev_list);
393 if (device->bdev) {
394 close_bdev_exclusive(device->bdev, device->mode);
395 fs_devices->open_devices--;
396 }
397 if (device->writeable) {
398 list_del_init(&device->dev_alloc_list);
399 fs_devices->rw_devices--;
400 }
401
402 device->bdev = NULL;
403 device->writeable = 0;
404 device->in_fs_metadata = 0;
405 }
406 WARN_ON(fs_devices->open_devices);
407 WARN_ON(fs_devices->rw_devices);
408 fs_devices->opened = 0;
409 fs_devices->seeding = 0;
410
411 return 0;
412}
413
414int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
415{
416 struct btrfs_fs_devices *seed_devices = NULL;
417 int ret;
418
419 mutex_lock(&uuid_mutex);
420 ret = __btrfs_close_devices(fs_devices);
421 if (!fs_devices->opened) {
422 seed_devices = fs_devices->seed;
423 fs_devices->seed = NULL;
424 }
425 mutex_unlock(&uuid_mutex);
426
427 while (seed_devices) {
428 fs_devices = seed_devices;
429 seed_devices = fs_devices->seed;
430 __btrfs_close_devices(fs_devices);
431 free_fs_devices(fs_devices);
432 }
433 return ret;
434}
435
436static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
437 fmode_t flags, void *holder)
438{
439 struct block_device *bdev;
440 struct list_head *head = &fs_devices->devices;
441 struct list_head *cur;
442 struct btrfs_device *device;
443 struct block_device *latest_bdev = NULL;
444 struct buffer_head *bh;
445 struct btrfs_super_block *disk_super;
446 u64 latest_devid = 0;
447 u64 latest_transid = 0;
448 u64 devid;
449 int seeding = 1;
450 int ret = 0;
451
452 list_for_each(cur, head) {
453 device = list_entry(cur, struct btrfs_device, dev_list);
454 if (device->bdev)
455 continue;
456 if (!device->name)
457 continue;
458
459 bdev = open_bdev_exclusive(device->name, flags, holder);
460 if (IS_ERR(bdev)) {
461 printk(KERN_INFO "open %s failed\n", device->name);
462 goto error;
463 }
464 set_blocksize(bdev, 4096);
465
466 bh = btrfs_read_dev_super(bdev);
467 if (!bh)
468 goto error_close;
469
470 disk_super = (struct btrfs_super_block *)bh->b_data;
471 devid = le64_to_cpu(disk_super->dev_item.devid);
472 if (devid != device->devid)
473 goto error_brelse;
474
475 if (memcmp(device->uuid, disk_super->dev_item.uuid,
476 BTRFS_UUID_SIZE))
477 goto error_brelse;
478
479 device->generation = btrfs_super_generation(disk_super);
480 if (!latest_transid || device->generation > latest_transid) {
481 latest_devid = devid;
482 latest_transid = device->generation;
483 latest_bdev = bdev;
484 }
485
486 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
487 device->writeable = 0;
488 } else {
489 device->writeable = !bdev_read_only(bdev);
490 seeding = 0;
491 }
492
493 device->bdev = bdev;
494 device->in_fs_metadata = 0;
495 device->mode = flags;
496
497 fs_devices->open_devices++;
498 if (device->writeable) {
499 fs_devices->rw_devices++;
500 list_add(&device->dev_alloc_list,
501 &fs_devices->alloc_list);
502 }
503 continue;
504
505error_brelse:
506 brelse(bh);
507error_close:
508 close_bdev_exclusive(bdev, FMODE_READ);
509error:
510 continue;
511 }
512 if (fs_devices->open_devices == 0) {
513 ret = -EIO;
514 goto out;
515 }
516 fs_devices->seeding = seeding;
517 fs_devices->opened = 1;
518 fs_devices->latest_bdev = latest_bdev;
519 fs_devices->latest_devid = latest_devid;
520 fs_devices->latest_trans = latest_transid;
521 fs_devices->total_rw_bytes = 0;
522out:
523 return ret;
524}
525
526int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
527 fmode_t flags, void *holder)
528{
529 int ret;
530
531 mutex_lock(&uuid_mutex);
532 if (fs_devices->opened) {
533 fs_devices->opened++;
534 ret = 0;
535 } else {
536 ret = __btrfs_open_devices(fs_devices, flags, holder);
537 }
538 mutex_unlock(&uuid_mutex);
539 return ret;
540}
541
542int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
543 struct btrfs_fs_devices **fs_devices_ret)
544{
545 struct btrfs_super_block *disk_super;
546 struct block_device *bdev;
547 struct buffer_head *bh;
548 int ret;
549 u64 devid;
550 u64 transid;
551
552 mutex_lock(&uuid_mutex);
553
554 bdev = open_bdev_exclusive(path, flags, holder);
555
556 if (IS_ERR(bdev)) {
557 ret = PTR_ERR(bdev);
558 goto error;
559 }
560
561 ret = set_blocksize(bdev, 4096);
562 if (ret)
563 goto error_close;
564 bh = btrfs_read_dev_super(bdev);
565 if (!bh) {
566 ret = -EIO;
567 goto error_close;
568 }
569 disk_super = (struct btrfs_super_block *)bh->b_data;
570 devid = le64_to_cpu(disk_super->dev_item.devid);
571 transid = btrfs_super_generation(disk_super);
572 if (disk_super->label[0])
573 printk(KERN_INFO "device label %s ", disk_super->label);
574 else {
575 /* FIXME, make a readl uuid parser */
576 printk(KERN_INFO "device fsid %llx-%llx ",
577 *(unsigned long long *)disk_super->fsid,
578 *(unsigned long long *)(disk_super->fsid + 8));
579 }
580 printk(KERN_INFO "devid %llu transid %llu %s\n",
581 (unsigned long long)devid, (unsigned long long)transid, path);
582 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
583
584 brelse(bh);
585error_close:
586 close_bdev_exclusive(bdev, flags);
587error:
588 mutex_unlock(&uuid_mutex);
589 return ret;
590}
591
592/*
593 * this uses a pretty simple search, the expectation is that it is
594 * called very infrequently and that a given device has a small number
595 * of extents
596 */
597static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
598 struct btrfs_device *device,
599 u64 num_bytes, u64 *start)
600{
601 struct btrfs_key key;
602 struct btrfs_root *root = device->dev_root;
603 struct btrfs_dev_extent *dev_extent = NULL;
604 struct btrfs_path *path;
605 u64 hole_size = 0;
606 u64 last_byte = 0;
607 u64 search_start = 0;
608 u64 search_end = device->total_bytes;
609 int ret;
610 int slot = 0;
611 int start_found;
612 struct extent_buffer *l;
613
614 path = btrfs_alloc_path();
615 if (!path)
616 return -ENOMEM;
617 path->reada = 2;
618 start_found = 0;
619
620 /* FIXME use last free of some kind */
621
622 /* we don't want to overwrite the superblock on the drive,
623 * so we make sure to start at an offset of at least 1MB
624 */
625 search_start = max((u64)1024 * 1024, search_start);
626
627 if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
628 search_start = max(root->fs_info->alloc_start, search_start);
629
630 key.objectid = device->devid;
631 key.offset = search_start;
632 key.type = BTRFS_DEV_EXTENT_KEY;
633 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
634 if (ret < 0)
635 goto error;
636 ret = btrfs_previous_item(root, path, 0, key.type);
637 if (ret < 0)
638 goto error;
639 l = path->nodes[0];
640 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
641 while (1) {
642 l = path->nodes[0];
643 slot = path->slots[0];
644 if (slot >= btrfs_header_nritems(l)) {
645 ret = btrfs_next_leaf(root, path);
646 if (ret == 0)
647 continue;
648 if (ret < 0)
649 goto error;
650no_more_items:
651 if (!start_found) {
652 if (search_start >= search_end) {
653 ret = -ENOSPC;
654 goto error;
655 }
656 *start = search_start;
657 start_found = 1;
658 goto check_pending;
659 }
660 *start = last_byte > search_start ?
661 last_byte : search_start;
662 if (search_end <= *start) {
663 ret = -ENOSPC;
664 goto error;
665 }
666 goto check_pending;
667 }
668 btrfs_item_key_to_cpu(l, &key, slot);
669
670 if (key.objectid < device->devid)
671 goto next;
672
673 if (key.objectid > device->devid)
674 goto no_more_items;
675
676 if (key.offset >= search_start && key.offset > last_byte &&
677 start_found) {
678 if (last_byte < search_start)
679 last_byte = search_start;
680 hole_size = key.offset - last_byte;
681 if (key.offset > last_byte &&
682 hole_size >= num_bytes) {
683 *start = last_byte;
684 goto check_pending;
685 }
686 }
687 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
688 goto next;
689
690 start_found = 1;
691 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
692 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
693next:
694 path->slots[0]++;
695 cond_resched();
696 }
697check_pending:
698 /* we have to make sure we didn't find an extent that has already
699 * been allocated by the map tree or the original allocation
700 */
701 BUG_ON(*start < search_start);
702
703 if (*start + num_bytes > search_end) {
704 ret = -ENOSPC;
705 goto error;
706 }
707 /* check for pending inserts here */
708 ret = 0;
709
710error:
711 btrfs_free_path(path);
712 return ret;
713}
714
715static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
716 struct btrfs_device *device,
717 u64 start)
718{
719 int ret;
720 struct btrfs_path *path;
721 struct btrfs_root *root = device->dev_root;
722 struct btrfs_key key;
723 struct btrfs_key found_key;
724 struct extent_buffer *leaf = NULL;
725 struct btrfs_dev_extent *extent = NULL;
726
727 path = btrfs_alloc_path();
728 if (!path)
729 return -ENOMEM;
730
731 key.objectid = device->devid;
732 key.offset = start;
733 key.type = BTRFS_DEV_EXTENT_KEY;
734
735 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
736 if (ret > 0) {
737 ret = btrfs_previous_item(root, path, key.objectid,
738 BTRFS_DEV_EXTENT_KEY);
739 BUG_ON(ret);
740 leaf = path->nodes[0];
741 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
742 extent = btrfs_item_ptr(leaf, path->slots[0],
743 struct btrfs_dev_extent);
744 BUG_ON(found_key.offset > start || found_key.offset +
745 btrfs_dev_extent_length(leaf, extent) < start);
746 ret = 0;
747 } else if (ret == 0) {
748 leaf = path->nodes[0];
749 extent = btrfs_item_ptr(leaf, path->slots[0],
750 struct btrfs_dev_extent);
751 }
752 BUG_ON(ret);
753
754 if (device->bytes_used > 0)
755 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
756 ret = btrfs_del_item(trans, root, path);
757 BUG_ON(ret);
758
759 btrfs_free_path(path);
760 return ret;
761}
762
763int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
764 struct btrfs_device *device,
765 u64 chunk_tree, u64 chunk_objectid,
766 u64 chunk_offset, u64 start, u64 num_bytes)
767{
768 int ret;
769 struct btrfs_path *path;
770 struct btrfs_root *root = device->dev_root;
771 struct btrfs_dev_extent *extent;
772 struct extent_buffer *leaf;
773 struct btrfs_key key;
774
775 WARN_ON(!device->in_fs_metadata);
776 path = btrfs_alloc_path();
777 if (!path)
778 return -ENOMEM;
779
780 key.objectid = device->devid;
781 key.offset = start;
782 key.type = BTRFS_DEV_EXTENT_KEY;
783 ret = btrfs_insert_empty_item(trans, root, path, &key,
784 sizeof(*extent));
785 BUG_ON(ret);
786
787 leaf = path->nodes[0];
788 extent = btrfs_item_ptr(leaf, path->slots[0],
789 struct btrfs_dev_extent);
790 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
791 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
792 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
793
794 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
795 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
796 BTRFS_UUID_SIZE);
797
798 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
799 btrfs_mark_buffer_dirty(leaf);
800 btrfs_free_path(path);
801 return ret;
802}
803
804static noinline int find_next_chunk(struct btrfs_root *root,
805 u64 objectid, u64 *offset)
806{
807 struct btrfs_path *path;
808 int ret;
809 struct btrfs_key key;
810 struct btrfs_chunk *chunk;
811 struct btrfs_key found_key;
812
813 path = btrfs_alloc_path();
814 BUG_ON(!path);
815
816 key.objectid = objectid;
817 key.offset = (u64)-1;
818 key.type = BTRFS_CHUNK_ITEM_KEY;
819
820 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
821 if (ret < 0)
822 goto error;
823
824 BUG_ON(ret == 0);
825
826 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
827 if (ret) {
828 *offset = 0;
829 } else {
830 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
831 path->slots[0]);
832 if (found_key.objectid != objectid)
833 *offset = 0;
834 else {
835 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
836 struct btrfs_chunk);
837 *offset = found_key.offset +
838 btrfs_chunk_length(path->nodes[0], chunk);
839 }
840 }
841 ret = 0;
842error:
843 btrfs_free_path(path);
844 return ret;
845}
846
847static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
848{
849 int ret;
850 struct btrfs_key key;
851 struct btrfs_key found_key;
852 struct btrfs_path *path;
853
854 root = root->fs_info->chunk_root;
855
856 path = btrfs_alloc_path();
857 if (!path)
858 return -ENOMEM;
859
860 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
861 key.type = BTRFS_DEV_ITEM_KEY;
862 key.offset = (u64)-1;
863
864 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
865 if (ret < 0)
866 goto error;
867
868 BUG_ON(ret == 0);
869
870 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
871 BTRFS_DEV_ITEM_KEY);
872 if (ret) {
873 *objectid = 1;
874 } else {
875 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
876 path->slots[0]);
877 *objectid = found_key.offset + 1;
878 }
879 ret = 0;
880error:
881 btrfs_free_path(path);
882 return ret;
883}
884
885/*
886 * the device information is stored in the chunk root
887 * the btrfs_device struct should be fully filled in
888 */
889int btrfs_add_device(struct btrfs_trans_handle *trans,
890 struct btrfs_root *root,
891 struct btrfs_device *device)
892{
893 int ret;
894 struct btrfs_path *path;
895 struct btrfs_dev_item *dev_item;
896 struct extent_buffer *leaf;
897 struct btrfs_key key;
898 unsigned long ptr;
899
900 root = root->fs_info->chunk_root;
901
902 path = btrfs_alloc_path();
903 if (!path)
904 return -ENOMEM;
905
906 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
907 key.type = BTRFS_DEV_ITEM_KEY;
908 key.offset = device->devid;
909
910 ret = btrfs_insert_empty_item(trans, root, path, &key,
911 sizeof(*dev_item));
912 if (ret)
913 goto out;
914
915 leaf = path->nodes[0];
916 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
917
918 btrfs_set_device_id(leaf, dev_item, device->devid);
919 btrfs_set_device_generation(leaf, dev_item, 0);
920 btrfs_set_device_type(leaf, dev_item, device->type);
921 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
922 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
923 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
924 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
925 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
926 btrfs_set_device_group(leaf, dev_item, 0);
927 btrfs_set_device_seek_speed(leaf, dev_item, 0);
928 btrfs_set_device_bandwidth(leaf, dev_item, 0);
929 btrfs_set_device_start_offset(leaf, dev_item, 0);
930
931 ptr = (unsigned long)btrfs_device_uuid(dev_item);
932 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
933 ptr = (unsigned long)btrfs_device_fsid(dev_item);
934 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
935 btrfs_mark_buffer_dirty(leaf);
936
937 ret = 0;
938out:
939 btrfs_free_path(path);
940 return ret;
941}
942
943static int btrfs_rm_dev_item(struct btrfs_root *root,
944 struct btrfs_device *device)
945{
946 int ret;
947 struct btrfs_path *path;
948 struct btrfs_key key;
949 struct btrfs_trans_handle *trans;
950
951 root = root->fs_info->chunk_root;
952
953 path = btrfs_alloc_path();
954 if (!path)
955 return -ENOMEM;
956
957 trans = btrfs_start_transaction(root, 1);
958 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
959 key.type = BTRFS_DEV_ITEM_KEY;
960 key.offset = device->devid;
961 lock_chunks(root);
962
963 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
964 if (ret < 0)
965 goto out;
966
967 if (ret > 0) {
968 ret = -ENOENT;
969 goto out;
970 }
971
972 ret = btrfs_del_item(trans, root, path);
973 if (ret)
974 goto out;
975out:
976 btrfs_free_path(path);
977 unlock_chunks(root);
978 btrfs_commit_transaction(trans, root);
979 return ret;
980}
981
982int btrfs_rm_device(struct btrfs_root *root, char *device_path)
983{
984 struct btrfs_device *device;
985 struct btrfs_device *next_device;
986 struct block_device *bdev;
987 struct buffer_head *bh = NULL;
988 struct btrfs_super_block *disk_super;
989 u64 all_avail;
990 u64 devid;
991 u64 num_devices;
992 u8 *dev_uuid;
993 int ret = 0;
994
995 mutex_lock(&uuid_mutex);
996 mutex_lock(&root->fs_info->volume_mutex);
997
998 all_avail = root->fs_info->avail_data_alloc_bits |
999 root->fs_info->avail_system_alloc_bits |
1000 root->fs_info->avail_metadata_alloc_bits;
1001
1002 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1003 root->fs_info->fs_devices->rw_devices <= 4) {
1004 printk(KERN_ERR "btrfs: unable to go below four devices "
1005 "on raid10\n");
1006 ret = -EINVAL;
1007 goto out;
1008 }
1009
1010 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1011 root->fs_info->fs_devices->rw_devices <= 2) {
1012 printk(KERN_ERR "btrfs: unable to go below two "
1013 "devices on raid1\n");
1014 ret = -EINVAL;
1015 goto out;
1016 }
1017
1018 if (strcmp(device_path, "missing") == 0) {
1019 struct list_head *cur;
1020 struct list_head *devices;
1021 struct btrfs_device *tmp;
1022
1023 device = NULL;
1024 devices = &root->fs_info->fs_devices->devices;
1025 list_for_each(cur, devices) {
1026 tmp = list_entry(cur, struct btrfs_device, dev_list);
1027 if (tmp->in_fs_metadata && !tmp->bdev) {
1028 device = tmp;
1029 break;
1030 }
1031 }
1032 bdev = NULL;
1033 bh = NULL;
1034 disk_super = NULL;
1035 if (!device) {
1036 printk(KERN_ERR "btrfs: no missing devices found to "
1037 "remove\n");
1038 goto out;
1039 }
1040 } else {
1041 bdev = open_bdev_exclusive(device_path, FMODE_READ,
1042 root->fs_info->bdev_holder);
1043 if (IS_ERR(bdev)) {
1044 ret = PTR_ERR(bdev);
1045 goto out;
1046 }
1047
1048 set_blocksize(bdev, 4096);
1049 bh = btrfs_read_dev_super(bdev);
1050 if (!bh) {
1051 ret = -EIO;
1052 goto error_close;
1053 }
1054 disk_super = (struct btrfs_super_block *)bh->b_data;
1055 devid = le64_to_cpu(disk_super->dev_item.devid);
1056 dev_uuid = disk_super->dev_item.uuid;
1057 device = btrfs_find_device(root, devid, dev_uuid,
1058 disk_super->fsid);
1059 if (!device) {
1060 ret = -ENOENT;
1061 goto error_brelse;
1062 }
1063 }
1064
1065 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1066 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1067 "device\n");
1068 ret = -EINVAL;
1069 goto error_brelse;
1070 }
1071
1072 if (device->writeable) {
1073 list_del_init(&device->dev_alloc_list);
1074 root->fs_info->fs_devices->rw_devices--;
1075 }
1076
1077 ret = btrfs_shrink_device(device, 0);
1078 if (ret)
1079 goto error_brelse;
1080
1081 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1082 if (ret)
1083 goto error_brelse;
1084
1085 device->in_fs_metadata = 0;
1086 list_del_init(&device->dev_list);
1087 device->fs_devices->num_devices--;
1088
1089 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1090 struct btrfs_device, dev_list);
1091 if (device->bdev == root->fs_info->sb->s_bdev)
1092 root->fs_info->sb->s_bdev = next_device->bdev;
1093 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1094 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1095
1096 if (device->bdev) {
1097 close_bdev_exclusive(device->bdev, device->mode);
1098 device->bdev = NULL;
1099 device->fs_devices->open_devices--;
1100 }
1101
1102 num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1103 btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
1104
1105 if (device->fs_devices->open_devices == 0) {
1106 struct btrfs_fs_devices *fs_devices;
1107 fs_devices = root->fs_info->fs_devices;
1108 while (fs_devices) {
1109 if (fs_devices->seed == device->fs_devices)
1110 break;
1111 fs_devices = fs_devices->seed;
1112 }
1113 fs_devices->seed = device->fs_devices->seed;
1114 device->fs_devices->seed = NULL;
1115 __btrfs_close_devices(device->fs_devices);
1116 free_fs_devices(device->fs_devices);
1117 }
1118
1119 /*
1120 * at this point, the device is zero sized. We want to
1121 * remove it from the devices list and zero out the old super
1122 */
1123 if (device->writeable) {
1124 /* make sure this device isn't detected as part of
1125 * the FS anymore
1126 */
1127 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1128 set_buffer_dirty(bh);
1129 sync_dirty_buffer(bh);
1130 }
1131
1132 kfree(device->name);
1133 kfree(device);
1134 ret = 0;
1135
1136error_brelse:
1137 brelse(bh);
1138error_close:
1139 if (bdev)
1140 close_bdev_exclusive(bdev, FMODE_READ);
1141out:
1142 mutex_unlock(&root->fs_info->volume_mutex);
1143 mutex_unlock(&uuid_mutex);
1144 return ret;
1145}
1146
1147/*
1148 * does all the dirty work required for changing file system's UUID.
1149 */
1150static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
1151 struct btrfs_root *root)
1152{
1153 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1154 struct btrfs_fs_devices *old_devices;
1155 struct btrfs_fs_devices *seed_devices;
1156 struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
1157 struct btrfs_device *device;
1158 u64 super_flags;
1159
1160 BUG_ON(!mutex_is_locked(&uuid_mutex));
1161 if (!fs_devices->seeding)
1162 return -EINVAL;
1163
1164 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1165 if (!seed_devices)
1166 return -ENOMEM;
1167
1168 old_devices = clone_fs_devices(fs_devices);
1169 if (IS_ERR(old_devices)) {
1170 kfree(seed_devices);
1171 return PTR_ERR(old_devices);
1172 }
1173
1174 list_add(&old_devices->list, &fs_uuids);
1175
1176 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1177 seed_devices->opened = 1;
1178 INIT_LIST_HEAD(&seed_devices->devices);
1179 INIT_LIST_HEAD(&seed_devices->alloc_list);
1180 list_splice_init(&fs_devices->devices, &seed_devices->devices);
1181 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1182 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1183 device->fs_devices = seed_devices;
1184 }
1185
1186 fs_devices->seeding = 0;
1187 fs_devices->num_devices = 0;
1188 fs_devices->open_devices = 0;
1189 fs_devices->seed = seed_devices;
1190
1191 generate_random_uuid(fs_devices->fsid);
1192 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1193 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1194 super_flags = btrfs_super_flags(disk_super) &
1195 ~BTRFS_SUPER_FLAG_SEEDING;
1196 btrfs_set_super_flags(disk_super, super_flags);
1197
1198 return 0;
1199}
1200
1201/*
1202 * strore the expected generation for seed devices in device items.
1203 */
1204static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1205 struct btrfs_root *root)
1206{
1207 struct btrfs_path *path;
1208 struct extent_buffer *leaf;
1209 struct btrfs_dev_item *dev_item;
1210 struct btrfs_device *device;
1211 struct btrfs_key key;
1212 u8 fs_uuid[BTRFS_UUID_SIZE];
1213 u8 dev_uuid[BTRFS_UUID_SIZE];
1214 u64 devid;
1215 int ret;
1216
1217 path = btrfs_alloc_path();
1218 if (!path)
1219 return -ENOMEM;
1220
1221 root = root->fs_info->chunk_root;
1222 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1223 key.offset = 0;
1224 key.type = BTRFS_DEV_ITEM_KEY;
1225
1226 while (1) {
1227 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1228 if (ret < 0)
1229 goto error;
1230
1231 leaf = path->nodes[0];
1232next_slot:
1233 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1234 ret = btrfs_next_leaf(root, path);
1235 if (ret > 0)
1236 break;
1237 if (ret < 0)
1238 goto error;
1239 leaf = path->nodes[0];
1240 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1241 btrfs_release_path(root, path);
1242 continue;
1243 }
1244
1245 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1246 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1247 key.type != BTRFS_DEV_ITEM_KEY)
1248 break;
1249
1250 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1251 struct btrfs_dev_item);
1252 devid = btrfs_device_id(leaf, dev_item);
1253 read_extent_buffer(leaf, dev_uuid,
1254 (unsigned long)btrfs_device_uuid(dev_item),
1255 BTRFS_UUID_SIZE);
1256 read_extent_buffer(leaf, fs_uuid,
1257 (unsigned long)btrfs_device_fsid(dev_item),
1258 BTRFS_UUID_SIZE);
1259 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1260 BUG_ON(!device);
1261
1262 if (device->fs_devices->seeding) {
1263 btrfs_set_device_generation(leaf, dev_item,
1264 device->generation);
1265 btrfs_mark_buffer_dirty(leaf);
1266 }
1267
1268 path->slots[0]++;
1269 goto next_slot;
1270 }
1271 ret = 0;
1272error:
1273 btrfs_free_path(path);
1274 return ret;
1275}
1276
1277int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1278{
1279 struct btrfs_trans_handle *trans;
1280 struct btrfs_device *device;
1281 struct block_device *bdev;
1282 struct list_head *cur;
1283 struct list_head *devices;
1284 struct super_block *sb = root->fs_info->sb;
1285 u64 total_bytes;
1286 int seeding_dev = 0;
1287 int ret = 0;
1288
1289 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1290 return -EINVAL;
1291
1292 bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
1293 if (!bdev)
1294 return -EIO;
1295
1296 if (root->fs_info->fs_devices->seeding) {
1297 seeding_dev = 1;
1298 down_write(&sb->s_umount);
1299 mutex_lock(&uuid_mutex);
1300 }
1301
1302 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1303 mutex_lock(&root->fs_info->volume_mutex);
1304
1305 devices = &root->fs_info->fs_devices->devices;
1306 list_for_each(cur, devices) {
1307 device = list_entry(cur, struct btrfs_device, dev_list);
1308 if (device->bdev == bdev) {
1309 ret = -EEXIST;
1310 goto error;
1311 }
1312 }
1313
1314 device = kzalloc(sizeof(*device), GFP_NOFS);
1315 if (!device) {
1316 /* we can safely leave the fs_devices entry around */
1317 ret = -ENOMEM;
1318 goto error;
1319 }
1320
1321 device->name = kstrdup(device_path, GFP_NOFS);
1322 if (!device->name) {
1323 kfree(device);
1324 ret = -ENOMEM;
1325 goto error;
1326 }
1327
1328 ret = find_next_devid(root, &device->devid);
1329 if (ret) {
1330 kfree(device);
1331 goto error;
1332 }
1333
1334 trans = btrfs_start_transaction(root, 1);
1335 lock_chunks(root);
1336
1337 device->barriers = 1;
1338 device->writeable = 1;
1339 device->work.func = pending_bios_fn;
1340 generate_random_uuid(device->uuid);
1341 spin_lock_init(&device->io_lock);
1342 device->generation = trans->transid;
1343 device->io_width = root->sectorsize;
1344 device->io_align = root->sectorsize;
1345 device->sector_size = root->sectorsize;
1346 device->total_bytes = i_size_read(bdev->bd_inode);
1347 device->dev_root = root->fs_info->dev_root;
1348 device->bdev = bdev;
1349 device->in_fs_metadata = 1;
1350 device->mode = 0;
1351 set_blocksize(device->bdev, 4096);
1352
1353 if (seeding_dev) {
1354 sb->s_flags &= ~MS_RDONLY;
1355 ret = btrfs_prepare_sprout(trans, root);
1356 BUG_ON(ret);
1357 }
1358
1359 device->fs_devices = root->fs_info->fs_devices;
1360 list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
1361 list_add(&device->dev_alloc_list,
1362 &root->fs_info->fs_devices->alloc_list);
1363 root->fs_info->fs_devices->num_devices++;
1364 root->fs_info->fs_devices->open_devices++;
1365 root->fs_info->fs_devices->rw_devices++;
1366 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1367
1368 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
1369 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
1370 total_bytes + device->total_bytes);
1371
1372 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
1373 btrfs_set_super_num_devices(&root->fs_info->super_copy,
1374 total_bytes + 1);
1375
1376 if (seeding_dev) {
1377 ret = init_first_rw_device(trans, root, device);
1378 BUG_ON(ret);
1379 ret = btrfs_finish_sprout(trans, root);
1380 BUG_ON(ret);
1381 } else {
1382 ret = btrfs_add_device(trans, root, device);
1383 }
1384
1385 unlock_chunks(root);
1386 btrfs_commit_transaction(trans, root);
1387
1388 if (seeding_dev) {
1389 mutex_unlock(&uuid_mutex);
1390 up_write(&sb->s_umount);
1391
1392 ret = btrfs_relocate_sys_chunks(root);
1393 BUG_ON(ret);
1394 }
1395out:
1396 mutex_unlock(&root->fs_info->volume_mutex);
1397 return ret;
1398error:
1399 close_bdev_exclusive(bdev, 0);
1400 if (seeding_dev) {
1401 mutex_unlock(&uuid_mutex);
1402 up_write(&sb->s_umount);
1403 }
1404 goto out;
1405}
1406
1407static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1408 struct btrfs_device *device)
1409{
1410 int ret;
1411 struct btrfs_path *path;
1412 struct btrfs_root *root;
1413 struct btrfs_dev_item *dev_item;
1414 struct extent_buffer *leaf;
1415 struct btrfs_key key;
1416
1417 root = device->dev_root->fs_info->chunk_root;
1418
1419 path = btrfs_alloc_path();
1420 if (!path)
1421 return -ENOMEM;
1422
1423 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1424 key.type = BTRFS_DEV_ITEM_KEY;
1425 key.offset = device->devid;
1426
1427 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1428 if (ret < 0)
1429 goto out;
1430
1431 if (ret > 0) {
1432 ret = -ENOENT;
1433 goto out;
1434 }
1435
1436 leaf = path->nodes[0];
1437 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1438
1439 btrfs_set_device_id(leaf, dev_item, device->devid);
1440 btrfs_set_device_type(leaf, dev_item, device->type);
1441 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1442 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1443 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1444 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1445 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1446 btrfs_mark_buffer_dirty(leaf);
1447
1448out:
1449 btrfs_free_path(path);
1450 return ret;
1451}
1452
1453static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1454 struct btrfs_device *device, u64 new_size)
1455{
1456 struct btrfs_super_block *super_copy =
1457 &device->dev_root->fs_info->super_copy;
1458 u64 old_total = btrfs_super_total_bytes(super_copy);
1459 u64 diff = new_size - device->total_bytes;
1460
1461 if (!device->writeable)
1462 return -EACCES;
1463 if (new_size <= device->total_bytes)
1464 return -EINVAL;
1465
1466 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1467 device->fs_devices->total_rw_bytes += diff;
1468
1469 device->total_bytes = new_size;
1470 return btrfs_update_device(trans, device);
1471}
1472
1473int btrfs_grow_device(struct btrfs_trans_handle *trans,
1474 struct btrfs_device *device, u64 new_size)
1475{
1476 int ret;
1477 lock_chunks(device->dev_root);
1478 ret = __btrfs_grow_device(trans, device, new_size);
1479 unlock_chunks(device->dev_root);
1480 return ret;
1481}
1482
1483static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1484 struct btrfs_root *root,
1485 u64 chunk_tree, u64 chunk_objectid,
1486 u64 chunk_offset)
1487{
1488 int ret;
1489 struct btrfs_path *path;
1490 struct btrfs_key key;
1491
1492 root = root->fs_info->chunk_root;
1493 path = btrfs_alloc_path();
1494 if (!path)
1495 return -ENOMEM;
1496
1497 key.objectid = chunk_objectid;
1498 key.offset = chunk_offset;
1499 key.type = BTRFS_CHUNK_ITEM_KEY;
1500
1501 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1502 BUG_ON(ret);
1503
1504 ret = btrfs_del_item(trans, root, path);
1505 BUG_ON(ret);
1506
1507 btrfs_free_path(path);
1508 return 0;
1509}
1510
1511static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1512 chunk_offset)
1513{
1514 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1515 struct btrfs_disk_key *disk_key;
1516 struct btrfs_chunk *chunk;
1517 u8 *ptr;
1518 int ret = 0;
1519 u32 num_stripes;
1520 u32 array_size;
1521 u32 len = 0;
1522 u32 cur;
1523 struct btrfs_key key;
1524
1525 array_size = btrfs_super_sys_array_size(super_copy);
1526
1527 ptr = super_copy->sys_chunk_array;
1528 cur = 0;
1529
1530 while (cur < array_size) {
1531 disk_key = (struct btrfs_disk_key *)ptr;
1532 btrfs_disk_key_to_cpu(&key, disk_key);
1533
1534 len = sizeof(*disk_key);
1535
1536 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1537 chunk = (struct btrfs_chunk *)(ptr + len);
1538 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1539 len += btrfs_chunk_item_size(num_stripes);
1540 } else {
1541 ret = -EIO;
1542 break;
1543 }
1544 if (key.objectid == chunk_objectid &&
1545 key.offset == chunk_offset) {
1546 memmove(ptr, ptr + len, array_size - (cur + len));
1547 array_size -= len;
1548 btrfs_set_super_sys_array_size(super_copy, array_size);
1549 } else {
1550 ptr += len;
1551 cur += len;
1552 }
1553 }
1554 return ret;
1555}
1556
1557static int btrfs_relocate_chunk(struct btrfs_root *root,
1558 u64 chunk_tree, u64 chunk_objectid,
1559 u64 chunk_offset)
1560{
1561 struct extent_map_tree *em_tree;
1562 struct btrfs_root *extent_root;
1563 struct btrfs_trans_handle *trans;
1564 struct extent_map *em;
1565 struct map_lookup *map;
1566 int ret;
1567 int i;
1568
1569 printk(KERN_INFO "btrfs relocating chunk %llu\n",
1570 (unsigned long long)chunk_offset);
1571 root = root->fs_info->chunk_root;
1572 extent_root = root->fs_info->extent_root;
1573 em_tree = &root->fs_info->mapping_tree.map_tree;
1574
1575 /* step one, relocate all the extents inside this chunk */
1576 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1577 BUG_ON(ret);
1578
1579 trans = btrfs_start_transaction(root, 1);
1580 BUG_ON(!trans);
1581
1582 lock_chunks(root);
1583
1584 /*
1585 * step two, delete the device extents and the
1586 * chunk tree entries
1587 */
1588 spin_lock(&em_tree->lock);
1589 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1590 spin_unlock(&em_tree->lock);
1591
1592 BUG_ON(em->start > chunk_offset ||
1593 em->start + em->len < chunk_offset);
1594 map = (struct map_lookup *)em->bdev;
1595
1596 for (i = 0; i < map->num_stripes; i++) {
1597 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1598 map->stripes[i].physical);
1599 BUG_ON(ret);
1600
1601 if (map->stripes[i].dev) {
1602 ret = btrfs_update_device(trans, map->stripes[i].dev);
1603 BUG_ON(ret);
1604 }
1605 }
1606 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1607 chunk_offset);
1608
1609 BUG_ON(ret);
1610
1611 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1612 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1613 BUG_ON(ret);
1614 }
1615
1616 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1617 BUG_ON(ret);
1618
1619 spin_lock(&em_tree->lock);
1620 remove_extent_mapping(em_tree, em);
1621 spin_unlock(&em_tree->lock);
1622
1623 kfree(map);
1624 em->bdev = NULL;
1625
1626 /* once for the tree */
1627 free_extent_map(em);
1628 /* once for us */
1629 free_extent_map(em);
1630
1631 unlock_chunks(root);
1632 btrfs_end_transaction(trans, root);
1633 return 0;
1634}
1635
1636static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
1637{
1638 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
1639 struct btrfs_path *path;
1640 struct extent_buffer *leaf;
1641 struct btrfs_chunk *chunk;
1642 struct btrfs_key key;
1643 struct btrfs_key found_key;
1644 u64 chunk_tree = chunk_root->root_key.objectid;
1645 u64 chunk_type;
1646 int ret;
1647
1648 path = btrfs_alloc_path();
1649 if (!path)
1650 return -ENOMEM;
1651
1652 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1653 key.offset = (u64)-1;
1654 key.type = BTRFS_CHUNK_ITEM_KEY;
1655
1656 while (1) {
1657 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1658 if (ret < 0)
1659 goto error;
1660 BUG_ON(ret == 0);
1661
1662 ret = btrfs_previous_item(chunk_root, path, key.objectid,
1663 key.type);
1664 if (ret < 0)
1665 goto error;
1666 if (ret > 0)
1667 break;
1668
1669 leaf = path->nodes[0];
1670 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1671
1672 chunk = btrfs_item_ptr(leaf, path->slots[0],
1673 struct btrfs_chunk);
1674 chunk_type = btrfs_chunk_type(leaf, chunk);
1675 btrfs_release_path(chunk_root, path);
1676
1677 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
1678 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
1679 found_key.objectid,
1680 found_key.offset);
1681 BUG_ON(ret);
1682 }
1683
1684 if (found_key.offset == 0)
1685 break;
1686 key.offset = found_key.offset - 1;
1687 }
1688 ret = 0;
1689error:
1690 btrfs_free_path(path);
1691 return ret;
1692}
1693
1694static u64 div_factor(u64 num, int factor)
1695{
1696 if (factor == 10)
1697 return num;
1698 num *= factor;
1699 do_div(num, 10);
1700 return num;
1701}
1702
1703int btrfs_balance(struct btrfs_root *dev_root)
1704{
1705 int ret;
1706 struct list_head *cur;
1707 struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1708 struct btrfs_device *device;
1709 u64 old_size;
1710 u64 size_to_free;
1711 struct btrfs_path *path;
1712 struct btrfs_key key;
1713 struct btrfs_chunk *chunk;
1714 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1715 struct btrfs_trans_handle *trans;
1716 struct btrfs_key found_key;
1717
1718 if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
1719 return -EROFS;
1720
1721 mutex_lock(&dev_root->fs_info->volume_mutex);
1722 dev_root = dev_root->fs_info->dev_root;
1723
1724 /* step one make some room on all the devices */
1725 list_for_each(cur, devices) {
1726 device = list_entry(cur, struct btrfs_device, dev_list);
1727 old_size = device->total_bytes;
1728 size_to_free = div_factor(old_size, 1);
1729 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
1730 if (!device->writeable ||
1731 device->total_bytes - device->bytes_used > size_to_free)
1732 continue;
1733
1734 ret = btrfs_shrink_device(device, old_size - size_to_free);
1735 BUG_ON(ret);
1736
1737 trans = btrfs_start_transaction(dev_root, 1);
1738 BUG_ON(!trans);
1739
1740 ret = btrfs_grow_device(trans, device, old_size);
1741 BUG_ON(ret);
1742
1743 btrfs_end_transaction(trans, dev_root);
1744 }
1745
1746 /* step two, relocate all the chunks */
1747 path = btrfs_alloc_path();
1748 BUG_ON(!path);
1749
1750 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1751 key.offset = (u64)-1;
1752 key.type = BTRFS_CHUNK_ITEM_KEY;
1753
1754 while (1) {
1755 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1756 if (ret < 0)
1757 goto error;
1758
1759 /*
1760 * this shouldn't happen, it means the last relocate
1761 * failed
1762 */
1763 if (ret == 0)
1764 break;
1765
1766 ret = btrfs_previous_item(chunk_root, path, 0,
1767 BTRFS_CHUNK_ITEM_KEY);
1768 if (ret)
1769 break;
1770
1771 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1772 path->slots[0]);
1773 if (found_key.objectid != key.objectid)
1774 break;
1775
1776 chunk = btrfs_item_ptr(path->nodes[0],
1777 path->slots[0],
1778 struct btrfs_chunk);
1779 key.offset = found_key.offset;
1780 /* chunk zero is special */
1781 if (key.offset == 0)
1782 break;
1783
1784 btrfs_release_path(chunk_root, path);
1785 ret = btrfs_relocate_chunk(chunk_root,
1786 chunk_root->root_key.objectid,
1787 found_key.objectid,
1788 found_key.offset);
1789 BUG_ON(ret);
1790 }
1791 ret = 0;
1792error:
1793 btrfs_free_path(path);
1794 mutex_unlock(&dev_root->fs_info->volume_mutex);
1795 return ret;
1796}
1797
1798/*
1799 * shrinking a device means finding all of the device extents past
1800 * the new size, and then following the back refs to the chunks.
1801 * The chunk relocation code actually frees the device extent
1802 */
1803int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1804{
1805 struct btrfs_trans_handle *trans;
1806 struct btrfs_root *root = device->dev_root;
1807 struct btrfs_dev_extent *dev_extent = NULL;
1808 struct btrfs_path *path;
1809 u64 length;
1810 u64 chunk_tree;
1811 u64 chunk_objectid;
1812 u64 chunk_offset;
1813 int ret;
1814 int slot;
1815 struct extent_buffer *l;
1816 struct btrfs_key key;
1817 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1818 u64 old_total = btrfs_super_total_bytes(super_copy);
1819 u64 diff = device->total_bytes - new_size;
1820
1821 if (new_size >= device->total_bytes)
1822 return -EINVAL;
1823
1824 path = btrfs_alloc_path();
1825 if (!path)
1826 return -ENOMEM;
1827
1828 trans = btrfs_start_transaction(root, 1);
1829 if (!trans) {
1830 ret = -ENOMEM;
1831 goto done;
1832 }
1833
1834 path->reada = 2;
1835
1836 lock_chunks(root);
1837
1838 device->total_bytes = new_size;
1839 if (device->writeable)
1840 device->fs_devices->total_rw_bytes -= diff;
1841 ret = btrfs_update_device(trans, device);
1842 if (ret) {
1843 unlock_chunks(root);
1844 btrfs_end_transaction(trans, root);
1845 goto done;
1846 }
1847 WARN_ON(diff > old_total);
1848 btrfs_set_super_total_bytes(super_copy, old_total - diff);
1849 unlock_chunks(root);
1850 btrfs_end_transaction(trans, root);
1851
1852 key.objectid = device->devid;
1853 key.offset = (u64)-1;
1854 key.type = BTRFS_DEV_EXTENT_KEY;
1855
1856 while (1) {
1857 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1858 if (ret < 0)
1859 goto done;
1860
1861 ret = btrfs_previous_item(root, path, 0, key.type);
1862 if (ret < 0)
1863 goto done;
1864 if (ret) {
1865 ret = 0;
1866 goto done;
1867 }
1868
1869 l = path->nodes[0];
1870 slot = path->slots[0];
1871 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1872
1873 if (key.objectid != device->devid)
1874 goto done;
1875
1876 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1877 length = btrfs_dev_extent_length(l, dev_extent);
1878
1879 if (key.offset + length <= new_size)
1880 goto done;
1881
1882 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1883 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1884 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1885 btrfs_release_path(root, path);
1886
1887 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
1888 chunk_offset);
1889 if (ret)
1890 goto done;
1891 }
1892
1893done:
1894 btrfs_free_path(path);
1895 return ret;
1896}
1897
1898static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
1899 struct btrfs_root *root,
1900 struct btrfs_key *key,
1901 struct btrfs_chunk *chunk, int item_size)
1902{
1903 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1904 struct btrfs_disk_key disk_key;
1905 u32 array_size;
1906 u8 *ptr;
1907
1908 array_size = btrfs_super_sys_array_size(super_copy);
1909 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
1910 return -EFBIG;
1911
1912 ptr = super_copy->sys_chunk_array + array_size;
1913 btrfs_cpu_key_to_disk(&disk_key, key);
1914 memcpy(ptr, &disk_key, sizeof(disk_key));
1915 ptr += sizeof(disk_key);
1916 memcpy(ptr, chunk, item_size);
1917 item_size += sizeof(disk_key);
1918 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
1919 return 0;
1920}
1921
1922static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size,
1923 int num_stripes, int sub_stripes)
1924{
1925 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
1926 return calc_size;
1927 else if (type & BTRFS_BLOCK_GROUP_RAID10)
1928 return calc_size * (num_stripes / sub_stripes);
1929 else
1930 return calc_size * num_stripes;
1931}
1932
1933static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
1934 struct btrfs_root *extent_root,
1935 struct map_lookup **map_ret,
1936 u64 *num_bytes, u64 *stripe_size,
1937 u64 start, u64 type)
1938{
1939 struct btrfs_fs_info *info = extent_root->fs_info;
1940 struct btrfs_device *device = NULL;
1941 struct btrfs_fs_devices *fs_devices = info->fs_devices;
1942 struct list_head *cur;
1943 struct map_lookup *map = NULL;
1944 struct extent_map_tree *em_tree;
1945 struct extent_map *em;
1946 struct list_head private_devs;
1947 int min_stripe_size = 1 * 1024 * 1024;
1948 u64 calc_size = 1024 * 1024 * 1024;
1949 u64 max_chunk_size = calc_size;
1950 u64 min_free;
1951 u64 avail;
1952 u64 max_avail = 0;
1953 u64 dev_offset;
1954 int num_stripes = 1;
1955 int min_stripes = 1;
1956 int sub_stripes = 0;
1957 int looped = 0;
1958 int ret;
1959 int index;
1960 int stripe_len = 64 * 1024;
1961
1962 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
1963 (type & BTRFS_BLOCK_GROUP_DUP)) {
1964 WARN_ON(1);
1965 type &= ~BTRFS_BLOCK_GROUP_DUP;
1966 }
1967 if (list_empty(&fs_devices->alloc_list))
1968 return -ENOSPC;
1969
1970 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
1971 num_stripes = fs_devices->rw_devices;
1972 min_stripes = 2;
1973 }
1974 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
1975 num_stripes = 2;
1976 min_stripes = 2;
1977 }
1978 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
1979 num_stripes = min_t(u64, 2, fs_devices->rw_devices);
1980 if (num_stripes < 2)
1981 return -ENOSPC;
1982 min_stripes = 2;
1983 }
1984 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1985 num_stripes = fs_devices->rw_devices;
1986 if (num_stripes < 4)
1987 return -ENOSPC;
1988 num_stripes &= ~(u32)1;
1989 sub_stripes = 2;
1990 min_stripes = 4;
1991 }
1992
1993 if (type & BTRFS_BLOCK_GROUP_DATA) {
1994 max_chunk_size = 10 * calc_size;
1995 min_stripe_size = 64 * 1024 * 1024;
1996 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
1997 max_chunk_size = 4 * calc_size;
1998 min_stripe_size = 32 * 1024 * 1024;
1999 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
2000 calc_size = 8 * 1024 * 1024;
2001 max_chunk_size = calc_size * 2;
2002 min_stripe_size = 1 * 1024 * 1024;
2003 }
2004
2005 /* we don't want a chunk larger than 10% of writeable space */
2006 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
2007 max_chunk_size);
2008
2009again:
2010 if (!map || map->num_stripes != num_stripes) {
2011 kfree(map);
2012 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2013 if (!map)
2014 return -ENOMEM;
2015 map->num_stripes = num_stripes;
2016 }
2017
2018 if (calc_size * num_stripes > max_chunk_size) {
2019 calc_size = max_chunk_size;
2020 do_div(calc_size, num_stripes);
2021 do_div(calc_size, stripe_len);
2022 calc_size *= stripe_len;
2023 }
2024 /* we don't want tiny stripes */
2025 calc_size = max_t(u64, min_stripe_size, calc_size);
2026
2027 do_div(calc_size, stripe_len);
2028 calc_size *= stripe_len;
2029
2030 cur = fs_devices->alloc_list.next;
2031 index = 0;
2032
2033 if (type & BTRFS_BLOCK_GROUP_DUP)
2034 min_free = calc_size * 2;
2035 else
2036 min_free = calc_size;
2037
2038 /*
2039 * we add 1MB because we never use the first 1MB of the device, unless
2040 * we've looped, then we are likely allocating the maximum amount of
2041 * space left already
2042 */
2043 if (!looped)
2044 min_free += 1024 * 1024;
2045
2046 INIT_LIST_HEAD(&private_devs);
2047 while (index < num_stripes) {
2048 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
2049 BUG_ON(!device->writeable);
2050 if (device->total_bytes > device->bytes_used)
2051 avail = device->total_bytes - device->bytes_used;
2052 else
2053 avail = 0;
2054 cur = cur->next;
2055
2056 if (device->in_fs_metadata && avail >= min_free) {
2057 ret = find_free_dev_extent(trans, device,
2058 min_free, &dev_offset);
2059 if (ret == 0) {
2060 list_move_tail(&device->dev_alloc_list,
2061 &private_devs);
2062 map->stripes[index].dev = device;
2063 map->stripes[index].physical = dev_offset;
2064 index++;
2065 if (type & BTRFS_BLOCK_GROUP_DUP) {
2066 map->stripes[index].dev = device;
2067 map->stripes[index].physical =
2068 dev_offset + calc_size;
2069 index++;
2070 }
2071 }
2072 } else if (device->in_fs_metadata && avail > max_avail)
2073 max_avail = avail;
2074 if (cur == &fs_devices->alloc_list)
2075 break;
2076 }
2077 list_splice(&private_devs, &fs_devices->alloc_list);
2078 if (index < num_stripes) {
2079 if (index >= min_stripes) {
2080 num_stripes = index;
2081 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2082 num_stripes /= sub_stripes;
2083 num_stripes *= sub_stripes;
2084 }
2085 looped = 1;
2086 goto again;
2087 }
2088 if (!looped && max_avail > 0) {
2089 looped = 1;
2090 calc_size = max_avail;
2091 goto again;
2092 }
2093 kfree(map);
2094 return -ENOSPC;
2095 }
2096 map->sector_size = extent_root->sectorsize;
2097 map->stripe_len = stripe_len;
2098 map->io_align = stripe_len;
2099 map->io_width = stripe_len;
2100 map->type = type;
2101 map->num_stripes = num_stripes;
2102 map->sub_stripes = sub_stripes;
2103
2104 *map_ret = map;
2105 *stripe_size = calc_size;
2106 *num_bytes = chunk_bytes_by_type(type, calc_size,
2107 num_stripes, sub_stripes);
2108
2109 em = alloc_extent_map(GFP_NOFS);
2110 if (!em) {
2111 kfree(map);
2112 return -ENOMEM;
2113 }
2114 em->bdev = (struct block_device *)map;
2115 em->start = start;
2116 em->len = *num_bytes;
2117 em->block_start = 0;
2118 em->block_len = em->len;
2119
2120 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
2121 spin_lock(&em_tree->lock);
2122 ret = add_extent_mapping(em_tree, em);
2123 spin_unlock(&em_tree->lock);
2124 BUG_ON(ret);
2125 free_extent_map(em);
2126
2127 ret = btrfs_make_block_group(trans, extent_root, 0, type,
2128 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2129 start, *num_bytes);
2130 BUG_ON(ret);
2131
2132 index = 0;
2133 while (index < map->num_stripes) {
2134 device = map->stripes[index].dev;
2135 dev_offset = map->stripes[index].physical;
2136
2137 ret = btrfs_alloc_dev_extent(trans, device,
2138 info->chunk_root->root_key.objectid,
2139 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2140 start, dev_offset, calc_size);
2141 BUG_ON(ret);
2142 index++;
2143 }
2144
2145 return 0;
2146}
2147
2148static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
2149 struct btrfs_root *extent_root,
2150 struct map_lookup *map, u64 chunk_offset,
2151 u64 chunk_size, u64 stripe_size)
2152{
2153 u64 dev_offset;
2154 struct btrfs_key key;
2155 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2156 struct btrfs_device *device;
2157 struct btrfs_chunk *chunk;
2158 struct btrfs_stripe *stripe;
2159 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
2160 int index = 0;
2161 int ret;
2162
2163 chunk = kzalloc(item_size, GFP_NOFS);
2164 if (!chunk)
2165 return -ENOMEM;
2166
2167 index = 0;
2168 while (index < map->num_stripes) {
2169 device = map->stripes[index].dev;
2170 device->bytes_used += stripe_size;
2171 ret = btrfs_update_device(trans, device);
2172 BUG_ON(ret);
2173 index++;
2174 }
2175
2176 index = 0;
2177 stripe = &chunk->stripe;
2178 while (index < map->num_stripes) {
2179 device = map->stripes[index].dev;
2180 dev_offset = map->stripes[index].physical;
2181
2182 btrfs_set_stack_stripe_devid(stripe, device->devid);
2183 btrfs_set_stack_stripe_offset(stripe, dev_offset);
2184 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
2185 stripe++;
2186 index++;
2187 }
2188
2189 btrfs_set_stack_chunk_length(chunk, chunk_size);
2190 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
2191 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
2192 btrfs_set_stack_chunk_type(chunk, map->type);
2193 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
2194 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
2195 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
2196 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
2197 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
2198
2199 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2200 key.type = BTRFS_CHUNK_ITEM_KEY;
2201 key.offset = chunk_offset;
2202
2203 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
2204 BUG_ON(ret);
2205
2206 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2207 ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
2208 item_size);
2209 BUG_ON(ret);
2210 }
2211 kfree(chunk);
2212 return 0;
2213}
2214
2215/*
2216 * Chunk allocation falls into two parts. The first part does works
2217 * that make the new allocated chunk useable, but not do any operation
2218 * that modifies the chunk tree. The second part does the works that
2219 * require modifying the chunk tree. This division is important for the
2220 * bootstrap process of adding storage to a seed btrfs.
2221 */
2222int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2223 struct btrfs_root *extent_root, u64 type)
2224{
2225 u64 chunk_offset;
2226 u64 chunk_size;
2227 u64 stripe_size;
2228 struct map_lookup *map;
2229 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2230 int ret;
2231
2232 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2233 &chunk_offset);
2234 if (ret)
2235 return ret;
2236
2237 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2238 &stripe_size, chunk_offset, type);
2239 if (ret)
2240 return ret;
2241
2242 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2243 chunk_size, stripe_size);
2244 BUG_ON(ret);
2245 return 0;
2246}
2247
2248static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
2249 struct btrfs_root *root,
2250 struct btrfs_device *device)
2251{
2252 u64 chunk_offset;
2253 u64 sys_chunk_offset;
2254 u64 chunk_size;
2255 u64 sys_chunk_size;
2256 u64 stripe_size;
2257 u64 sys_stripe_size;
2258 u64 alloc_profile;
2259 struct map_lookup *map;
2260 struct map_lookup *sys_map;
2261 struct btrfs_fs_info *fs_info = root->fs_info;
2262 struct btrfs_root *extent_root = fs_info->extent_root;
2263 int ret;
2264
2265 ret = find_next_chunk(fs_info->chunk_root,
2266 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
2267 BUG_ON(ret);
2268
2269 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
2270 (fs_info->metadata_alloc_profile &
2271 fs_info->avail_metadata_alloc_bits);
2272 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2273
2274 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2275 &stripe_size, chunk_offset, alloc_profile);
2276 BUG_ON(ret);
2277
2278 sys_chunk_offset = chunk_offset + chunk_size;
2279
2280 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
2281 (fs_info->system_alloc_profile &
2282 fs_info->avail_system_alloc_bits);
2283 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2284
2285 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
2286 &sys_chunk_size, &sys_stripe_size,
2287 sys_chunk_offset, alloc_profile);
2288 BUG_ON(ret);
2289
2290 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
2291 BUG_ON(ret);
2292
2293 /*
2294 * Modifying chunk tree needs allocating new blocks from both
2295 * system block group and metadata block group. So we only can
2296 * do operations require modifying the chunk tree after both
2297 * block groups were created.
2298 */
2299 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2300 chunk_size, stripe_size);
2301 BUG_ON(ret);
2302
2303 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
2304 sys_chunk_offset, sys_chunk_size,
2305 sys_stripe_size);
2306 BUG_ON(ret);
2307 return 0;
2308}
2309
2310int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
2311{
2312 struct extent_map *em;
2313 struct map_lookup *map;
2314 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2315 int readonly = 0;
2316 int i;
2317
2318 spin_lock(&map_tree->map_tree.lock);
2319 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2320 spin_unlock(&map_tree->map_tree.lock);
2321 if (!em)
2322 return 1;
2323
2324 map = (struct map_lookup *)em->bdev;
2325 for (i = 0; i < map->num_stripes; i++) {
2326 if (!map->stripes[i].dev->writeable) {
2327 readonly = 1;
2328 break;
2329 }
2330 }
2331 free_extent_map(em);
2332 return readonly;
2333}
2334
2335void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
2336{
2337 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
2338}
2339
2340void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
2341{
2342 struct extent_map *em;
2343
2344 while (1) {
2345 spin_lock(&tree->map_tree.lock);
2346 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
2347 if (em)
2348 remove_extent_mapping(&tree->map_tree, em);
2349 spin_unlock(&tree->map_tree.lock);
2350 if (!em)
2351 break;
2352 kfree(em->bdev);
2353 /* once for us */
2354 free_extent_map(em);
2355 /* once for the tree */
2356 free_extent_map(em);
2357 }
2358}
2359
2360int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
2361{
2362 struct extent_map *em;
2363 struct map_lookup *map;
2364 struct extent_map_tree *em_tree = &map_tree->map_tree;
2365 int ret;
2366
2367 spin_lock(&em_tree->lock);
2368 em = lookup_extent_mapping(em_tree, logical, len);
2369 spin_unlock(&em_tree->lock);
2370 BUG_ON(!em);
2371
2372 BUG_ON(em->start > logical || em->start + em->len < logical);
2373 map = (struct map_lookup *)em->bdev;
2374 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
2375 ret = map->num_stripes;
2376 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2377 ret = map->sub_stripes;
2378 else
2379 ret = 1;
2380 free_extent_map(em);
2381 return ret;
2382}
2383
2384static int find_live_mirror(struct map_lookup *map, int first, int num,
2385 int optimal)
2386{
2387 int i;
2388 if (map->stripes[optimal].dev->bdev)
2389 return optimal;
2390 for (i = first; i < first + num; i++) {
2391 if (map->stripes[i].dev->bdev)
2392 return i;
2393 }
2394 /* we couldn't find one that doesn't fail. Just return something
2395 * and the io error handling code will clean up eventually
2396 */
2397 return optimal;
2398}
2399
2400static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2401 u64 logical, u64 *length,
2402 struct btrfs_multi_bio **multi_ret,
2403 int mirror_num, struct page *unplug_page)
2404{
2405 struct extent_map *em;
2406 struct map_lookup *map;
2407 struct extent_map_tree *em_tree = &map_tree->map_tree;
2408 u64 offset;
2409 u64 stripe_offset;
2410 u64 stripe_nr;
2411 int stripes_allocated = 8;
2412 int stripes_required = 1;
2413 int stripe_index;
2414 int i;
2415 int num_stripes;
2416 int max_errors = 0;
2417 struct btrfs_multi_bio *multi = NULL;
2418
2419 if (multi_ret && !(rw & (1 << BIO_RW)))
2420 stripes_allocated = 1;
2421again:
2422 if (multi_ret) {
2423 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
2424 GFP_NOFS);
2425 if (!multi)
2426 return -ENOMEM;
2427
2428 atomic_set(&multi->error, 0);
2429 }
2430
2431 spin_lock(&em_tree->lock);
2432 em = lookup_extent_mapping(em_tree, logical, *length);
2433 spin_unlock(&em_tree->lock);
2434
2435 if (!em && unplug_page)
2436 return 0;
2437
2438 if (!em) {
2439 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
2440 (unsigned long long)logical,
2441 (unsigned long long)*length);
2442 BUG();
2443 }
2444
2445 BUG_ON(em->start > logical || em->start + em->len < logical);
2446 map = (struct map_lookup *)em->bdev;
2447 offset = logical - em->start;
2448
2449 if (mirror_num > map->num_stripes)
2450 mirror_num = 0;
2451
2452 /* if our multi bio struct is too small, back off and try again */
2453 if (rw & (1 << BIO_RW)) {
2454 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
2455 BTRFS_BLOCK_GROUP_DUP)) {
2456 stripes_required = map->num_stripes;
2457 max_errors = 1;
2458 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2459 stripes_required = map->sub_stripes;
2460 max_errors = 1;
2461 }
2462 }
2463 if (multi_ret && rw == WRITE &&
2464 stripes_allocated < stripes_required) {
2465 stripes_allocated = map->num_stripes;
2466 free_extent_map(em);
2467 kfree(multi);
2468 goto again;
2469 }
2470 stripe_nr = offset;
2471 /*
2472 * stripe_nr counts the total number of stripes we have to stride
2473 * to get to this block
2474 */
2475 do_div(stripe_nr, map->stripe_len);
2476
2477 stripe_offset = stripe_nr * map->stripe_len;
2478 BUG_ON(offset < stripe_offset);
2479
2480 /* stripe_offset is the offset of this block in its stripe*/
2481 stripe_offset = offset - stripe_offset;
2482
2483 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2484 BTRFS_BLOCK_GROUP_RAID10 |
2485 BTRFS_BLOCK_GROUP_DUP)) {
2486 /* we limit the length of each bio to what fits in a stripe */
2487 *length = min_t(u64, em->len - offset,
2488 map->stripe_len - stripe_offset);
2489 } else {
2490 *length = em->len - offset;
2491 }
2492
2493 if (!multi_ret && !unplug_page)
2494 goto out;
2495
2496 num_stripes = 1;
2497 stripe_index = 0;
2498 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
2499 if (unplug_page || (rw & (1 << BIO_RW)))
2500 num_stripes = map->num_stripes;
2501 else if (mirror_num)
2502 stripe_index = mirror_num - 1;
2503 else {
2504 stripe_index = find_live_mirror(map, 0,
2505 map->num_stripes,
2506 current->pid % map->num_stripes);
2507 }
2508
2509 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
2510 if (rw & (1 << BIO_RW))
2511 num_stripes = map->num_stripes;
2512 else if (mirror_num)
2513 stripe_index = mirror_num - 1;
2514
2515 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2516 int factor = map->num_stripes / map->sub_stripes;
2517
2518 stripe_index = do_div(stripe_nr, factor);
2519 stripe_index *= map->sub_stripes;
2520
2521 if (unplug_page || (rw & (1 << BIO_RW)))
2522 num_stripes = map->sub_stripes;
2523 else if (mirror_num)
2524 stripe_index += mirror_num - 1;
2525 else {
2526 stripe_index = find_live_mirror(map, stripe_index,
2527 map->sub_stripes, stripe_index +
2528 current->pid % map->sub_stripes);
2529 }
2530 } else {
2531 /*
2532 * after this do_div call, stripe_nr is the number of stripes
2533 * on this device we have to walk to find the data, and
2534 * stripe_index is the number of our device in the stripe array
2535 */
2536 stripe_index = do_div(stripe_nr, map->num_stripes);
2537 }
2538 BUG_ON(stripe_index >= map->num_stripes);
2539
2540 for (i = 0; i < num_stripes; i++) {
2541 if (unplug_page) {
2542 struct btrfs_device *device;
2543 struct backing_dev_info *bdi;
2544
2545 device = map->stripes[stripe_index].dev;
2546 if (device->bdev) {
2547 bdi = blk_get_backing_dev_info(device->bdev);
2548 if (bdi->unplug_io_fn)
2549 bdi->unplug_io_fn(bdi, unplug_page);
2550 }
2551 } else {
2552 multi->stripes[i].physical =
2553 map->stripes[stripe_index].physical +
2554 stripe_offset + stripe_nr * map->stripe_len;
2555 multi->stripes[i].dev = map->stripes[stripe_index].dev;
2556 }
2557 stripe_index++;
2558 }
2559 if (multi_ret) {
2560 *multi_ret = multi;
2561 multi->num_stripes = num_stripes;
2562 multi->max_errors = max_errors;
2563 }
2564out:
2565 free_extent_map(em);
2566 return 0;
2567}
2568
2569int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2570 u64 logical, u64 *length,
2571 struct btrfs_multi_bio **multi_ret, int mirror_num)
2572{
2573 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
2574 mirror_num, NULL);
2575}
2576
2577int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
2578 u64 chunk_start, u64 physical, u64 devid,
2579 u64 **logical, int *naddrs, int *stripe_len)
2580{
2581 struct extent_map_tree *em_tree = &map_tree->map_tree;
2582 struct extent_map *em;
2583 struct map_lookup *map;
2584 u64 *buf;
2585 u64 bytenr;
2586 u64 length;
2587 u64 stripe_nr;
2588 int i, j, nr = 0;
2589
2590 spin_lock(&em_tree->lock);
2591 em = lookup_extent_mapping(em_tree, chunk_start, 1);
2592 spin_unlock(&em_tree->lock);
2593
2594 BUG_ON(!em || em->start != chunk_start);
2595 map = (struct map_lookup *)em->bdev;
2596
2597 length = em->len;
2598 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2599 do_div(length, map->num_stripes / map->sub_stripes);
2600 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
2601 do_div(length, map->num_stripes);
2602
2603 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
2604 BUG_ON(!buf);
2605
2606 for (i = 0; i < map->num_stripes; i++) {
2607 if (devid && map->stripes[i].dev->devid != devid)
2608 continue;
2609 if (map->stripes[i].physical > physical ||
2610 map->stripes[i].physical + length <= physical)
2611 continue;
2612
2613 stripe_nr = physical - map->stripes[i].physical;
2614 do_div(stripe_nr, map->stripe_len);
2615
2616 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2617 stripe_nr = stripe_nr * map->num_stripes + i;
2618 do_div(stripe_nr, map->sub_stripes);
2619 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
2620 stripe_nr = stripe_nr * map->num_stripes + i;
2621 }
2622 bytenr = chunk_start + stripe_nr * map->stripe_len;
2623 WARN_ON(nr >= map->num_stripes);
2624 for (j = 0; j < nr; j++) {
2625 if (buf[j] == bytenr)
2626 break;
2627 }
2628 if (j == nr) {
2629 WARN_ON(nr >= map->num_stripes);
2630 buf[nr++] = bytenr;
2631 }
2632 }
2633
2634 for (i = 0; i > nr; i++) {
2635 struct btrfs_multi_bio *multi;
2636 struct btrfs_bio_stripe *stripe;
2637 int ret;
2638
2639 length = 1;
2640 ret = btrfs_map_block(map_tree, WRITE, buf[i],
2641 &length, &multi, 0);
2642 BUG_ON(ret);
2643
2644 stripe = multi->stripes;
2645 for (j = 0; j < multi->num_stripes; j++) {
2646 if (stripe->physical >= physical &&
2647 physical < stripe->physical + length)
2648 break;
2649 }
2650 BUG_ON(j >= multi->num_stripes);
2651 kfree(multi);
2652 }
2653
2654 *logical = buf;
2655 *naddrs = nr;
2656 *stripe_len = map->stripe_len;
2657
2658 free_extent_map(em);
2659 return 0;
2660}
2661
2662int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
2663 u64 logical, struct page *page)
2664{
2665 u64 length = PAGE_CACHE_SIZE;
2666 return __btrfs_map_block(map_tree, READ, logical, &length,
2667 NULL, 0, page);
2668}
2669
2670static void end_bio_multi_stripe(struct bio *bio, int err)
2671{
2672 struct btrfs_multi_bio *multi = bio->bi_private;
2673 int is_orig_bio = 0;
2674
2675 if (err)
2676 atomic_inc(&multi->error);
2677
2678 if (bio == multi->orig_bio)
2679 is_orig_bio = 1;
2680
2681 if (atomic_dec_and_test(&multi->stripes_pending)) {
2682 if (!is_orig_bio) {
2683 bio_put(bio);
2684 bio = multi->orig_bio;
2685 }
2686 bio->bi_private = multi->private;
2687 bio->bi_end_io = multi->end_io;
2688 /* only send an error to the higher layers if it is
2689 * beyond the tolerance of the multi-bio
2690 */
2691 if (atomic_read(&multi->error) > multi->max_errors) {
2692 err = -EIO;
2693 } else if (err) {
2694 /*
2695 * this bio is actually up to date, we didn't
2696 * go over the max number of errors
2697 */
2698 set_bit(BIO_UPTODATE, &bio->bi_flags);
2699 err = 0;
2700 }
2701 kfree(multi);
2702
2703 bio_endio(bio, err);
2704 } else if (!is_orig_bio) {
2705 bio_put(bio);
2706 }
2707}
2708
2709struct async_sched {
2710 struct bio *bio;
2711 int rw;
2712 struct btrfs_fs_info *info;
2713 struct btrfs_work work;
2714};
2715
2716/*
2717 * see run_scheduled_bios for a description of why bios are collected for
2718 * async submit.
2719 *
2720 * This will add one bio to the pending list for a device and make sure
2721 * the work struct is scheduled.
2722 */
2723static noinline int schedule_bio(struct btrfs_root *root,
2724 struct btrfs_device *device,
2725 int rw, struct bio *bio)
2726{
2727 int should_queue = 1;
2728
2729 /* don't bother with additional async steps for reads, right now */
2730 if (!(rw & (1 << BIO_RW))) {
2731 bio_get(bio);
2732 submit_bio(rw, bio);
2733 bio_put(bio);
2734 return 0;
2735 }
2736
2737 /*
2738 * nr_async_bios allows us to reliably return congestion to the
2739 * higher layers. Otherwise, the async bio makes it appear we have
2740 * made progress against dirty pages when we've really just put it
2741 * on a queue for later
2742 */
2743 atomic_inc(&root->fs_info->nr_async_bios);
2744 WARN_ON(bio->bi_next);
2745 bio->bi_next = NULL;
2746 bio->bi_rw |= rw;
2747
2748 spin_lock(&device->io_lock);
2749
2750 if (device->pending_bio_tail)
2751 device->pending_bio_tail->bi_next = bio;
2752
2753 device->pending_bio_tail = bio;
2754 if (!device->pending_bios)
2755 device->pending_bios = bio;
2756 if (device->running_pending)
2757 should_queue = 0;
2758
2759 spin_unlock(&device->io_lock);
2760
2761 if (should_queue)
2762 btrfs_queue_worker(&root->fs_info->submit_workers,
2763 &device->work);
2764 return 0;
2765}
2766
2767int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
2768 int mirror_num, int async_submit)
2769{
2770 struct btrfs_mapping_tree *map_tree;
2771 struct btrfs_device *dev;
2772 struct bio *first_bio = bio;
2773 u64 logical = (u64)bio->bi_sector << 9;
2774 u64 length = 0;
2775 u64 map_length;
2776 struct btrfs_multi_bio *multi = NULL;
2777 int ret;
2778 int dev_nr = 0;
2779 int total_devs = 1;
2780
2781 length = bio->bi_size;
2782 map_tree = &root->fs_info->mapping_tree;
2783 map_length = length;
2784
2785 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
2786 mirror_num);
2787 BUG_ON(ret);
2788
2789 total_devs = multi->num_stripes;
2790 if (map_length < length) {
2791 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
2792 "len %llu\n", (unsigned long long)logical,
2793 (unsigned long long)length,
2794 (unsigned long long)map_length);
2795 BUG();
2796 }
2797 multi->end_io = first_bio->bi_end_io;
2798 multi->private = first_bio->bi_private;
2799 multi->orig_bio = first_bio;
2800 atomic_set(&multi->stripes_pending, multi->num_stripes);
2801
2802 while (dev_nr < total_devs) {
2803 if (total_devs > 1) {
2804 if (dev_nr < total_devs - 1) {
2805 bio = bio_clone(first_bio, GFP_NOFS);
2806 BUG_ON(!bio);
2807 } else {
2808 bio = first_bio;
2809 }
2810 bio->bi_private = multi;
2811 bio->bi_end_io = end_bio_multi_stripe;
2812 }
2813 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
2814 dev = multi->stripes[dev_nr].dev;
2815 BUG_ON(rw == WRITE && !dev->writeable);
2816 if (dev && dev->bdev) {
2817 bio->bi_bdev = dev->bdev;
2818 if (async_submit)
2819 schedule_bio(root, dev, rw, bio);
2820 else
2821 submit_bio(rw, bio);
2822 } else {
2823 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
2824 bio->bi_sector = logical >> 9;
2825 bio_endio(bio, -EIO);
2826 }
2827 dev_nr++;
2828 }
2829 if (total_devs == 1)
2830 kfree(multi);
2831 return 0;
2832}
2833
2834struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
2835 u8 *uuid, u8 *fsid)
2836{
2837 struct btrfs_device *device;
2838 struct btrfs_fs_devices *cur_devices;
2839
2840 cur_devices = root->fs_info->fs_devices;
2841 while (cur_devices) {
2842 if (!fsid ||
2843 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
2844 device = __find_device(&cur_devices->devices,
2845 devid, uuid);
2846 if (device)
2847 return device;
2848 }
2849 cur_devices = cur_devices->seed;
2850 }
2851 return NULL;
2852}
2853
2854static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
2855 u64 devid, u8 *dev_uuid)
2856{
2857 struct btrfs_device *device;
2858 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2859
2860 device = kzalloc(sizeof(*device), GFP_NOFS);
2861 if (!device)
2862 return NULL;
2863 list_add(&device->dev_list,
2864 &fs_devices->devices);
2865 device->barriers = 1;
2866 device->dev_root = root->fs_info->dev_root;
2867 device->devid = devid;
2868 device->work.func = pending_bios_fn;
2869 device->fs_devices = fs_devices;
2870 fs_devices->num_devices++;
2871 spin_lock_init(&device->io_lock);
2872 INIT_LIST_HEAD(&device->dev_alloc_list);
2873 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
2874 return device;
2875}
2876
2877static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
2878 struct extent_buffer *leaf,
2879 struct btrfs_chunk *chunk)
2880{
2881 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2882 struct map_lookup *map;
2883 struct extent_map *em;
2884 u64 logical;
2885 u64 length;
2886 u64 devid;
2887 u8 uuid[BTRFS_UUID_SIZE];
2888 int num_stripes;
2889 int ret;
2890 int i;
2891
2892 logical = key->offset;
2893 length = btrfs_chunk_length(leaf, chunk);
2894
2895 spin_lock(&map_tree->map_tree.lock);
2896 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
2897 spin_unlock(&map_tree->map_tree.lock);
2898
2899 /* already mapped? */
2900 if (em && em->start <= logical && em->start + em->len > logical) {
2901 free_extent_map(em);
2902 return 0;
2903 } else if (em) {
2904 free_extent_map(em);
2905 }
2906
2907 map = kzalloc(sizeof(*map), GFP_NOFS);
2908 if (!map)
2909 return -ENOMEM;
2910
2911 em = alloc_extent_map(GFP_NOFS);
2912 if (!em)
2913 return -ENOMEM;
2914 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2915 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2916 if (!map) {
2917 free_extent_map(em);
2918 return -ENOMEM;
2919 }
2920
2921 em->bdev = (struct block_device *)map;
2922 em->start = logical;
2923 em->len = length;
2924 em->block_start = 0;
2925 em->block_len = em->len;
2926
2927 map->num_stripes = num_stripes;
2928 map->io_width = btrfs_chunk_io_width(leaf, chunk);
2929 map->io_align = btrfs_chunk_io_align(leaf, chunk);
2930 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
2931 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
2932 map->type = btrfs_chunk_type(leaf, chunk);
2933 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
2934 for (i = 0; i < num_stripes; i++) {
2935 map->stripes[i].physical =
2936 btrfs_stripe_offset_nr(leaf, chunk, i);
2937 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
2938 read_extent_buffer(leaf, uuid, (unsigned long)
2939 btrfs_stripe_dev_uuid_nr(chunk, i),
2940 BTRFS_UUID_SIZE);
2941 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
2942 NULL);
2943 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
2944 kfree(map);
2945 free_extent_map(em);
2946 return -EIO;
2947 }
2948 if (!map->stripes[i].dev) {
2949 map->stripes[i].dev =
2950 add_missing_dev(root, devid, uuid);
2951 if (!map->stripes[i].dev) {
2952 kfree(map);
2953 free_extent_map(em);
2954 return -EIO;
2955 }
2956 }
2957 map->stripes[i].dev->in_fs_metadata = 1;
2958 }
2959
2960 spin_lock(&map_tree->map_tree.lock);
2961 ret = add_extent_mapping(&map_tree->map_tree, em);
2962 spin_unlock(&map_tree->map_tree.lock);
2963 BUG_ON(ret);
2964 free_extent_map(em);
2965
2966 return 0;
2967}
2968
2969static int fill_device_from_item(struct extent_buffer *leaf,
2970 struct btrfs_dev_item *dev_item,
2971 struct btrfs_device *device)
2972{
2973 unsigned long ptr;
2974
2975 device->devid = btrfs_device_id(leaf, dev_item);
2976 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2977 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2978 device->type = btrfs_device_type(leaf, dev_item);
2979 device->io_align = btrfs_device_io_align(leaf, dev_item);
2980 device->io_width = btrfs_device_io_width(leaf, dev_item);
2981 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
2982
2983 ptr = (unsigned long)btrfs_device_uuid(dev_item);
2984 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
2985
2986 return 0;
2987}
2988
2989static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
2990{
2991 struct btrfs_fs_devices *fs_devices;
2992 int ret;
2993
2994 mutex_lock(&uuid_mutex);
2995
2996 fs_devices = root->fs_info->fs_devices->seed;
2997 while (fs_devices) {
2998 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
2999 ret = 0;
3000 goto out;
3001 }
3002 fs_devices = fs_devices->seed;
3003 }
3004
3005 fs_devices = find_fsid(fsid);
3006 if (!fs_devices) {
3007 ret = -ENOENT;
3008 goto out;
3009 }
3010
3011 fs_devices = clone_fs_devices(fs_devices);
3012 if (IS_ERR(fs_devices)) {
3013 ret = PTR_ERR(fs_devices);
3014 goto out;
3015 }
3016
3017 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
3018 root->fs_info->bdev_holder);
3019 if (ret)
3020 goto out;
3021
3022 if (!fs_devices->seeding) {
3023 __btrfs_close_devices(fs_devices);
3024 free_fs_devices(fs_devices);
3025 ret = -EINVAL;
3026 goto out;
3027 }
3028
3029 fs_devices->seed = root->fs_info->fs_devices->seed;
3030 root->fs_info->fs_devices->seed = fs_devices;
3031out:
3032 mutex_unlock(&uuid_mutex);
3033 return ret;
3034}
3035
3036static int read_one_dev(struct btrfs_root *root,
3037 struct extent_buffer *leaf,
3038 struct btrfs_dev_item *dev_item)
3039{
3040 struct btrfs_device *device;
3041 u64 devid;
3042 int ret;
3043 u8 fs_uuid[BTRFS_UUID_SIZE];
3044 u8 dev_uuid[BTRFS_UUID_SIZE];
3045
3046 devid = btrfs_device_id(leaf, dev_item);
3047 read_extent_buffer(leaf, dev_uuid,
3048 (unsigned long)btrfs_device_uuid(dev_item),
3049 BTRFS_UUID_SIZE);
3050 read_extent_buffer(leaf, fs_uuid,
3051 (unsigned long)btrfs_device_fsid(dev_item),
3052 BTRFS_UUID_SIZE);
3053
3054 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
3055 ret = open_seed_devices(root, fs_uuid);
3056 if (ret && !btrfs_test_opt(root, DEGRADED))
3057 return ret;
3058 }
3059
3060 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
3061 if (!device || !device->bdev) {
3062 if (!btrfs_test_opt(root, DEGRADED))
3063 return -EIO;
3064
3065 if (!device) {
3066 printk(KERN_WARNING "warning devid %llu missing\n",
3067 (unsigned long long)devid);
3068 device = add_missing_dev(root, devid, dev_uuid);
3069 if (!device)
3070 return -ENOMEM;
3071 }
3072 }
3073
3074 if (device->fs_devices != root->fs_info->fs_devices) {
3075 BUG_ON(device->writeable);
3076 if (device->generation !=
3077 btrfs_device_generation(leaf, dev_item))
3078 return -EINVAL;
3079 }
3080
3081 fill_device_from_item(leaf, dev_item, device);
3082 device->dev_root = root->fs_info->dev_root;
3083 device->in_fs_metadata = 1;
3084 if (device->writeable)
3085 device->fs_devices->total_rw_bytes += device->total_bytes;
3086 ret = 0;
3087 return ret;
3088}
3089
3090int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
3091{
3092 struct btrfs_dev_item *dev_item;
3093
3094 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
3095 dev_item);
3096 return read_one_dev(root, buf, dev_item);
3097}
3098
3099int btrfs_read_sys_array(struct btrfs_root *root)
3100{
3101 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
3102 struct extent_buffer *sb;
3103 struct btrfs_disk_key *disk_key;
3104 struct btrfs_chunk *chunk;
3105 u8 *ptr;
3106 unsigned long sb_ptr;
3107 int ret = 0;
3108 u32 num_stripes;
3109 u32 array_size;
3110 u32 len = 0;
3111 u32 cur;
3112 struct btrfs_key key;
3113
3114 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
3115 BTRFS_SUPER_INFO_SIZE);
3116 if (!sb)
3117 return -ENOMEM;
3118 btrfs_set_buffer_uptodate(sb);
3119 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
3120 array_size = btrfs_super_sys_array_size(super_copy);
3121
3122 ptr = super_copy->sys_chunk_array;
3123 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
3124 cur = 0;
3125
3126 while (cur < array_size) {
3127 disk_key = (struct btrfs_disk_key *)ptr;
3128 btrfs_disk_key_to_cpu(&key, disk_key);
3129
3130 len = sizeof(*disk_key); ptr += len;
3131 sb_ptr += len;
3132 cur += len;
3133
3134 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
3135 chunk = (struct btrfs_chunk *)sb_ptr;
3136 ret = read_one_chunk(root, &key, sb, chunk);
3137 if (ret)
3138 break;
3139 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
3140 len = btrfs_chunk_item_size(num_stripes);
3141 } else {
3142 ret = -EIO;
3143 break;
3144 }
3145 ptr += len;
3146 sb_ptr += len;
3147 cur += len;
3148 }
3149 free_extent_buffer(sb);
3150 return ret;
3151}
3152
3153int btrfs_read_chunk_tree(struct btrfs_root *root)
3154{
3155 struct btrfs_path *path;
3156 struct extent_buffer *leaf;
3157 struct btrfs_key key;
3158 struct btrfs_key found_key;
3159 int ret;
3160 int slot;
3161
3162 root = root->fs_info->chunk_root;
3163
3164 path = btrfs_alloc_path();
3165 if (!path)
3166 return -ENOMEM;
3167
3168 /* first we search for all of the device items, and then we
3169 * read in all of the chunk items. This way we can create chunk
3170 * mappings that reference all of the devices that are afound
3171 */
3172 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
3173 key.offset = 0;
3174 key.type = 0;
3175again:
3176 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3177 while (1) {
3178 leaf = path->nodes[0];
3179 slot = path->slots[0];
3180 if (slot >= btrfs_header_nritems(leaf)) {
3181 ret = btrfs_next_leaf(root, path);
3182 if (ret == 0)
3183 continue;
3184 if (ret < 0)
3185 goto error;
3186 break;
3187 }
3188 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3189 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3190 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
3191 break;
3192 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
3193 struct btrfs_dev_item *dev_item;
3194 dev_item = btrfs_item_ptr(leaf, slot,
3195 struct btrfs_dev_item);
3196 ret = read_one_dev(root, leaf, dev_item);
3197 if (ret)
3198 goto error;
3199 }
3200 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
3201 struct btrfs_chunk *chunk;
3202 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3203 ret = read_one_chunk(root, &found_key, leaf, chunk);
3204 if (ret)
3205 goto error;
3206 }
3207 path->slots[0]++;
3208 }
3209 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3210 key.objectid = 0;
3211 btrfs_release_path(root, path);
3212 goto again;
3213 }
3214 ret = 0;
3215error:
3216 btrfs_free_path(path);
3217 return ret;
3218}
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
new file mode 100644
index 000000000000..86c44e9ae110
--- /dev/null
+++ b/fs/btrfs/volumes.h
@@ -0,0 +1,162 @@
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#ifndef __BTRFS_VOLUMES_
20#define __BTRFS_VOLUMES_
21
22#include <linux/bio.h>
23#include "async-thread.h"
24
25struct buffer_head;
26struct btrfs_device {
27 struct list_head dev_list;
28 struct list_head dev_alloc_list;
29 struct btrfs_fs_devices *fs_devices;
30 struct btrfs_root *dev_root;
31 struct bio *pending_bios;
32 struct bio *pending_bio_tail;
33 int running_pending;
34 u64 generation;
35
36 int barriers;
37 int writeable;
38 int in_fs_metadata;
39
40 spinlock_t io_lock;
41
42 struct block_device *bdev;
43
44 /* the mode sent to open_bdev_exclusive */
45 fmode_t mode;
46
47 char *name;
48
49 /* the internal btrfs device id */
50 u64 devid;
51
52 /* size of the device */
53 u64 total_bytes;
54
55 /* bytes used */
56 u64 bytes_used;
57
58 /* optimal io alignment for this device */
59 u32 io_align;
60
61 /* optimal io width for this device */
62 u32 io_width;
63
64 /* minimal io size for this device */
65 u32 sector_size;
66
67 /* type and info about this device */
68 u64 type;
69
70 /* physical drive uuid (or lvm uuid) */
71 u8 uuid[BTRFS_UUID_SIZE];
72
73 struct btrfs_work work;
74};
75
76struct btrfs_fs_devices {
77 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
78
79 /* the device with this id has the most recent coyp of the super */
80 u64 latest_devid;
81 u64 latest_trans;
82 u64 num_devices;
83 u64 open_devices;
84 u64 rw_devices;
85 u64 total_rw_bytes;
86 struct block_device *latest_bdev;
87 /* all of the devices in the FS */
88 struct list_head devices;
89
90 /* devices not currently being allocated */
91 struct list_head alloc_list;
92 struct list_head list;
93
94 struct btrfs_fs_devices *seed;
95 int seeding;
96
97 int opened;
98};
99
100struct btrfs_bio_stripe {
101 struct btrfs_device *dev;
102 u64 physical;
103};
104
105struct btrfs_multi_bio {
106 atomic_t stripes_pending;
107 bio_end_io_t *end_io;
108 struct bio *orig_bio;
109 void *private;
110 atomic_t error;
111 int max_errors;
112 int num_stripes;
113 struct btrfs_bio_stripe stripes[];
114};
115
116#define btrfs_multi_bio_size(n) (sizeof(struct btrfs_multi_bio) + \
117 (sizeof(struct btrfs_bio_stripe) * (n)))
118
119int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
120 struct btrfs_device *device,
121 u64 chunk_tree, u64 chunk_objectid,
122 u64 chunk_offset, u64 start, u64 num_bytes);
123int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
124 u64 logical, u64 *length,
125 struct btrfs_multi_bio **multi_ret, int mirror_num);
126int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
127 u64 chunk_start, u64 physical, u64 devid,
128 u64 **logical, int *naddrs, int *stripe_len);
129int btrfs_read_sys_array(struct btrfs_root *root);
130int btrfs_read_chunk_tree(struct btrfs_root *root);
131int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
132 struct btrfs_root *extent_root, u64 type);
133void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
134void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
135int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
136 int mirror_num, int async_submit);
137int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf);
138int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
139 fmode_t flags, void *holder);
140int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
141 struct btrfs_fs_devices **fs_devices_ret);
142int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
143int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices);
144int btrfs_add_device(struct btrfs_trans_handle *trans,
145 struct btrfs_root *root,
146 struct btrfs_device *device);
147int btrfs_rm_device(struct btrfs_root *root, char *device_path);
148int btrfs_cleanup_fs_uuids(void);
149int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len);
150int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
151 u64 logical, struct page *page);
152int btrfs_grow_device(struct btrfs_trans_handle *trans,
153 struct btrfs_device *device, u64 new_size);
154struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
155 u8 *uuid, u8 *fsid);
156int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
157int btrfs_init_new_device(struct btrfs_root *root, char *path);
158int btrfs_balance(struct btrfs_root *dev_root);
159void btrfs_unlock_volumes(void);
160void btrfs_lock_volumes(void);
161int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
162#endif
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
new file mode 100644
index 000000000000..7f332e270894
--- /dev/null
+++ b/fs/btrfs/xattr.c
@@ -0,0 +1,322 @@
1/*
2 * Copyright (C) 2007 Red Hat. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/init.h>
20#include <linux/fs.h>
21#include <linux/slab.h>
22#include <linux/rwsem.h>
23#include <linux/xattr.h>
24#include "ctree.h"
25#include "btrfs_inode.h"
26#include "transaction.h"
27#include "xattr.h"
28#include "disk-io.h"
29
30
31ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
32 void *buffer, size_t size)
33{
34 struct btrfs_dir_item *di;
35 struct btrfs_root *root = BTRFS_I(inode)->root;
36 struct btrfs_path *path;
37 struct extent_buffer *leaf;
38 int ret = 0;
39 unsigned long data_ptr;
40
41 path = btrfs_alloc_path();
42 if (!path)
43 return -ENOMEM;
44
45 /* lookup the xattr by name */
46 di = btrfs_lookup_xattr(NULL, root, path, inode->i_ino, name,
47 strlen(name), 0);
48 if (!di || IS_ERR(di)) {
49 ret = -ENODATA;
50 goto out;
51 }
52
53 leaf = path->nodes[0];
54 /* if size is 0, that means we want the size of the attr */
55 if (!size) {
56 ret = btrfs_dir_data_len(leaf, di);
57 goto out;
58 }
59
60 /* now get the data out of our dir_item */
61 if (btrfs_dir_data_len(leaf, di) > size) {
62 ret = -ERANGE;
63 goto out;
64 }
65 data_ptr = (unsigned long)((char *)(di + 1) +
66 btrfs_dir_name_len(leaf, di));
67 read_extent_buffer(leaf, buffer, data_ptr,
68 btrfs_dir_data_len(leaf, di));
69 ret = btrfs_dir_data_len(leaf, di);
70
71out:
72 btrfs_free_path(path);
73 return ret;
74}
75
76int __btrfs_setxattr(struct inode *inode, const char *name,
77 const void *value, size_t size, int flags)
78{
79 struct btrfs_dir_item *di;
80 struct btrfs_root *root = BTRFS_I(inode)->root;
81 struct btrfs_trans_handle *trans;
82 struct btrfs_path *path;
83 int ret = 0, mod = 0;
84
85 path = btrfs_alloc_path();
86 if (!path)
87 return -ENOMEM;
88
89 trans = btrfs_start_transaction(root, 1);
90 btrfs_set_trans_block_group(trans, inode);
91
92 /* first lets see if we already have this xattr */
93 di = btrfs_lookup_xattr(trans, root, path, inode->i_ino, name,
94 strlen(name), -1);
95 if (IS_ERR(di)) {
96 ret = PTR_ERR(di);
97 goto out;
98 }
99
100 /* ok we already have this xattr, lets remove it */
101 if (di) {
102 /* if we want create only exit */
103 if (flags & XATTR_CREATE) {
104 ret = -EEXIST;
105 goto out;
106 }
107
108 ret = btrfs_delete_one_dir_name(trans, root, path, di);
109 if (ret)
110 goto out;
111 btrfs_release_path(root, path);
112
113 /* if we don't have a value then we are removing the xattr */
114 if (!value) {
115 mod = 1;
116 goto out;
117 }
118 } else {
119 btrfs_release_path(root, path);
120
121 if (flags & XATTR_REPLACE) {
122 /* we couldn't find the attr to replace */
123 ret = -ENODATA;
124 goto out;
125 }
126 }
127
128 /* ok we have to create a completely new xattr */
129 ret = btrfs_insert_xattr_item(trans, root, name, strlen(name),
130 value, size, inode->i_ino);
131 if (ret)
132 goto out;
133 mod = 1;
134
135out:
136 if (mod) {
137 inode->i_ctime = CURRENT_TIME;
138 ret = btrfs_update_inode(trans, root, inode);
139 }
140
141 btrfs_end_transaction(trans, root);
142 btrfs_free_path(path);
143 return ret;
144}
145
146ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
147{
148 struct btrfs_key key, found_key;
149 struct inode *inode = dentry->d_inode;
150 struct btrfs_root *root = BTRFS_I(inode)->root;
151 struct btrfs_path *path;
152 struct btrfs_item *item;
153 struct extent_buffer *leaf;
154 struct btrfs_dir_item *di;
155 int ret = 0, slot, advance;
156 size_t total_size = 0, size_left = size;
157 unsigned long name_ptr;
158 size_t name_len;
159 u32 nritems;
160
161 /*
162 * ok we want all objects associated with this id.
163 * NOTE: we set key.offset = 0; because we want to start with the
164 * first xattr that we find and walk forward
165 */
166 key.objectid = inode->i_ino;
167 btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
168 key.offset = 0;
169
170 path = btrfs_alloc_path();
171 if (!path)
172 return -ENOMEM;
173 path->reada = 2;
174
175 /* search for our xattrs */
176 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
177 if (ret < 0)
178 goto err;
179 ret = 0;
180 advance = 0;
181 while (1) {
182 leaf = path->nodes[0];
183 nritems = btrfs_header_nritems(leaf);
184 slot = path->slots[0];
185
186 /* this is where we start walking through the path */
187 if (advance || slot >= nritems) {
188 /*
189 * if we've reached the last slot in this leaf we need
190 * to go to the next leaf and reset everything
191 */
192 if (slot >= nritems-1) {
193 ret = btrfs_next_leaf(root, path);
194 if (ret)
195 break;
196 leaf = path->nodes[0];
197 nritems = btrfs_header_nritems(leaf);
198 slot = path->slots[0];
199 } else {
200 /*
201 * just walking through the slots on this leaf
202 */
203 slot++;
204 path->slots[0]++;
205 }
206 }
207 advance = 1;
208
209 item = btrfs_item_nr(leaf, slot);
210 btrfs_item_key_to_cpu(leaf, &found_key, slot);
211
212 /* check to make sure this item is what we want */
213 if (found_key.objectid != key.objectid)
214 break;
215 if (btrfs_key_type(&found_key) != BTRFS_XATTR_ITEM_KEY)
216 break;
217
218 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
219
220 name_len = btrfs_dir_name_len(leaf, di);
221 total_size += name_len + 1;
222
223 /* we are just looking for how big our buffer needs to be */
224 if (!size)
225 continue;
226
227 if (!buffer || (name_len + 1) > size_left) {
228 ret = -ERANGE;
229 goto err;
230 }
231
232 name_ptr = (unsigned long)(di + 1);
233 read_extent_buffer(leaf, buffer, name_ptr, name_len);
234 buffer[name_len] = '\0';
235
236 size_left -= name_len + 1;
237 buffer += name_len + 1;
238 }
239 ret = total_size;
240
241err:
242 btrfs_free_path(path);
243
244 return ret;
245}
246
247/*
248 * List of handlers for synthetic system.* attributes. All real ondisk
249 * attributes are handled directly.
250 */
251struct xattr_handler *btrfs_xattr_handlers[] = {
252#ifdef CONFIG_FS_POSIX_ACL
253 &btrfs_xattr_acl_access_handler,
254 &btrfs_xattr_acl_default_handler,
255#endif
256 NULL,
257};
258
259/*
260 * Check if the attribute is in a supported namespace.
261 *
262 * This applied after the check for the synthetic attributes in the system
263 * namespace.
264 */
265static bool btrfs_is_valid_xattr(const char *name)
266{
267 return !strncmp(name, XATTR_SECURITY_PREFIX,
268 XATTR_SECURITY_PREFIX_LEN) ||
269 !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) ||
270 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
271 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
272}
273
274ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
275 void *buffer, size_t size)
276{
277 /*
278 * If this is a request for a synthetic attribute in the system.*
279 * namespace use the generic infrastructure to resolve a handler
280 * for it via sb->s_xattr.
281 */
282 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
283 return generic_getxattr(dentry, name, buffer, size);
284
285 if (!btrfs_is_valid_xattr(name))
286 return -EOPNOTSUPP;
287 return __btrfs_getxattr(dentry->d_inode, name, buffer, size);
288}
289
290int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
291 size_t size, int flags)
292{
293 /*
294 * If this is a request for a synthetic attribute in the system.*
295 * namespace use the generic infrastructure to resolve a handler
296 * for it via sb->s_xattr.
297 */
298 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
299 return generic_setxattr(dentry, name, value, size, flags);
300
301 if (!btrfs_is_valid_xattr(name))
302 return -EOPNOTSUPP;
303
304 if (size == 0)
305 value = ""; /* empty EA, do not remove */
306 return __btrfs_setxattr(dentry->d_inode, name, value, size, flags);
307}
308
309int btrfs_removexattr(struct dentry *dentry, const char *name)
310{
311 /*
312 * If this is a request for a synthetic attribute in the system.*
313 * namespace use the generic infrastructure to resolve a handler
314 * for it via sb->s_xattr.
315 */
316 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
317 return generic_removexattr(dentry, name);
318
319 if (!btrfs_is_valid_xattr(name))
320 return -EOPNOTSUPP;
321 return __btrfs_setxattr(dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
322}
diff --git a/fs/btrfs/xattr.h b/fs/btrfs/xattr.h
new file mode 100644
index 000000000000..5b1d08f8e68d
--- /dev/null
+++ b/fs/btrfs/xattr.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright (C) 2007 Red Hat. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#ifndef __XATTR__
20#define __XATTR__
21
22#include <linux/xattr.h>
23
24extern struct xattr_handler btrfs_xattr_acl_access_handler;
25extern struct xattr_handler btrfs_xattr_acl_default_handler;
26extern struct xattr_handler *btrfs_xattr_handlers[];
27
28extern ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
29 void *buffer, size_t size);
30extern int __btrfs_setxattr(struct inode *inode, const char *name,
31 const void *value, size_t size, int flags);
32
33extern ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
34 void *buffer, size_t size);
35extern int btrfs_setxattr(struct dentry *dentry, const char *name,
36 const void *value, size_t size, int flags);
37extern int btrfs_removexattr(struct dentry *dentry, const char *name);
38
39#endif /* __XATTR__ */
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
new file mode 100644
index 000000000000..ecfbce836d32
--- /dev/null
+++ b/fs/btrfs/zlib.c
@@ -0,0 +1,632 @@
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 *
18 * Based on jffs2 zlib code:
19 * Copyright © 2001-2007 Red Hat, Inc.
20 * Created by David Woodhouse <dwmw2@infradead.org>
21 */
22
23#include <linux/kernel.h>
24#include <linux/slab.h>
25#include <linux/zlib.h>
26#include <linux/zutil.h>
27#include <linux/vmalloc.h>
28#include <linux/init.h>
29#include <linux/err.h>
30#include <linux/sched.h>
31#include <linux/pagemap.h>
32#include <linux/bio.h>
33#include "compression.h"
34
35/* Plan: call deflate() with avail_in == *sourcelen,
36 avail_out = *dstlen - 12 and flush == Z_FINISH.
37 If it doesn't manage to finish, call it again with
38 avail_in == 0 and avail_out set to the remaining 12
39 bytes for it to clean up.
40 Q: Is 12 bytes sufficient?
41*/
42#define STREAM_END_SPACE 12
43
44struct workspace {
45 z_stream inf_strm;
46 z_stream def_strm;
47 char *buf;
48 struct list_head list;
49};
50
51static LIST_HEAD(idle_workspace);
52static DEFINE_SPINLOCK(workspace_lock);
53static unsigned long num_workspace;
54static atomic_t alloc_workspace = ATOMIC_INIT(0);
55static DECLARE_WAIT_QUEUE_HEAD(workspace_wait);
56
57/*
58 * this finds an available zlib workspace or allocates a new one
59 * NULL or an ERR_PTR is returned if things go bad.
60 */
61static struct workspace *find_zlib_workspace(void)
62{
63 struct workspace *workspace;
64 int ret;
65 int cpus = num_online_cpus();
66
67again:
68 spin_lock(&workspace_lock);
69 if (!list_empty(&idle_workspace)) {
70 workspace = list_entry(idle_workspace.next, struct workspace,
71 list);
72 list_del(&workspace->list);
73 num_workspace--;
74 spin_unlock(&workspace_lock);
75 return workspace;
76
77 }
78 spin_unlock(&workspace_lock);
79 if (atomic_read(&alloc_workspace) > cpus) {
80 DEFINE_WAIT(wait);
81 prepare_to_wait(&workspace_wait, &wait, TASK_UNINTERRUPTIBLE);
82 if (atomic_read(&alloc_workspace) > cpus)
83 schedule();
84 finish_wait(&workspace_wait, &wait);
85 goto again;
86 }
87 atomic_inc(&alloc_workspace);
88 workspace = kzalloc(sizeof(*workspace), GFP_NOFS);
89 if (!workspace) {
90 ret = -ENOMEM;
91 goto fail;
92 }
93
94 workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize());
95 if (!workspace->def_strm.workspace) {
96 ret = -ENOMEM;
97 goto fail;
98 }
99 workspace->inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
100 if (!workspace->inf_strm.workspace) {
101 ret = -ENOMEM;
102 goto fail_inflate;
103 }
104 workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS);
105 if (!workspace->buf) {
106 ret = -ENOMEM;
107 goto fail_kmalloc;
108 }
109 return workspace;
110
111fail_kmalloc:
112 vfree(workspace->inf_strm.workspace);
113fail_inflate:
114 vfree(workspace->def_strm.workspace);
115fail:
116 kfree(workspace);
117 atomic_dec(&alloc_workspace);
118 wake_up(&workspace_wait);
119 return ERR_PTR(ret);
120}
121
122/*
123 * put a workspace struct back on the list or free it if we have enough
124 * idle ones sitting around
125 */
126static int free_workspace(struct workspace *workspace)
127{
128 spin_lock(&workspace_lock);
129 if (num_workspace < num_online_cpus()) {
130 list_add_tail(&workspace->list, &idle_workspace);
131 num_workspace++;
132 spin_unlock(&workspace_lock);
133 if (waitqueue_active(&workspace_wait))
134 wake_up(&workspace_wait);
135 return 0;
136 }
137 spin_unlock(&workspace_lock);
138 vfree(workspace->def_strm.workspace);
139 vfree(workspace->inf_strm.workspace);
140 kfree(workspace->buf);
141 kfree(workspace);
142
143 atomic_dec(&alloc_workspace);
144 if (waitqueue_active(&workspace_wait))
145 wake_up(&workspace_wait);
146 return 0;
147}
148
149/*
150 * cleanup function for module exit
151 */
152static void free_workspaces(void)
153{
154 struct workspace *workspace;
155 while (!list_empty(&idle_workspace)) {
156 workspace = list_entry(idle_workspace.next, struct workspace,
157 list);
158 list_del(&workspace->list);
159 vfree(workspace->def_strm.workspace);
160 vfree(workspace->inf_strm.workspace);
161 kfree(workspace->buf);
162 kfree(workspace);
163 atomic_dec(&alloc_workspace);
164 }
165}
166
167/*
168 * given an address space and start/len, compress the bytes.
169 *
170 * pages are allocated to hold the compressed result and stored
171 * in 'pages'
172 *
173 * out_pages is used to return the number of pages allocated. There
174 * may be pages allocated even if we return an error
175 *
176 * total_in is used to return the number of bytes actually read. It
177 * may be smaller then len if we had to exit early because we
178 * ran out of room in the pages array or because we cross the
179 * max_out threshold.
180 *
181 * total_out is used to return the total number of compressed bytes
182 *
183 * max_out tells us the max number of bytes that we're allowed to
184 * stuff into pages
185 */
186int btrfs_zlib_compress_pages(struct address_space *mapping,
187 u64 start, unsigned long len,
188 struct page **pages,
189 unsigned long nr_dest_pages,
190 unsigned long *out_pages,
191 unsigned long *total_in,
192 unsigned long *total_out,
193 unsigned long max_out)
194{
195 int ret;
196 struct workspace *workspace;
197 char *data_in;
198 char *cpage_out;
199 int nr_pages = 0;
200 struct page *in_page = NULL;
201 struct page *out_page = NULL;
202 int out_written = 0;
203 int in_read = 0;
204 unsigned long bytes_left;
205
206 *out_pages = 0;
207 *total_out = 0;
208 *total_in = 0;
209
210 workspace = find_zlib_workspace();
211 if (!workspace)
212 return -1;
213
214 if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) {
215 printk(KERN_WARNING "deflateInit failed\n");
216 ret = -1;
217 goto out;
218 }
219
220 workspace->def_strm.total_in = 0;
221 workspace->def_strm.total_out = 0;
222
223 in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
224 data_in = kmap(in_page);
225
226 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
227 cpage_out = kmap(out_page);
228 pages[0] = out_page;
229 nr_pages = 1;
230
231 workspace->def_strm.next_in = data_in;
232 workspace->def_strm.next_out = cpage_out;
233 workspace->def_strm.avail_out = PAGE_CACHE_SIZE;
234 workspace->def_strm.avail_in = min(len, PAGE_CACHE_SIZE);
235
236 out_written = 0;
237 in_read = 0;
238
239 while (workspace->def_strm.total_in < len) {
240 ret = zlib_deflate(&workspace->def_strm, Z_SYNC_FLUSH);
241 if (ret != Z_OK) {
242 printk(KERN_DEBUG "btrfs deflate in loop returned %d\n",
243 ret);
244 zlib_deflateEnd(&workspace->def_strm);
245 ret = -1;
246 goto out;
247 }
248
249 /* we're making it bigger, give up */
250 if (workspace->def_strm.total_in > 8192 &&
251 workspace->def_strm.total_in <
252 workspace->def_strm.total_out) {
253 ret = -1;
254 goto out;
255 }
256 /* we need another page for writing out. Test this
257 * before the total_in so we will pull in a new page for
258 * the stream end if required
259 */
260 if (workspace->def_strm.avail_out == 0) {
261 kunmap(out_page);
262 if (nr_pages == nr_dest_pages) {
263 out_page = NULL;
264 ret = -1;
265 goto out;
266 }
267 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
268 cpage_out = kmap(out_page);
269 pages[nr_pages] = out_page;
270 nr_pages++;
271 workspace->def_strm.avail_out = PAGE_CACHE_SIZE;
272 workspace->def_strm.next_out = cpage_out;
273 }
274 /* we're all done */
275 if (workspace->def_strm.total_in >= len)
276 break;
277
278 /* we've read in a full page, get a new one */
279 if (workspace->def_strm.avail_in == 0) {
280 if (workspace->def_strm.total_out > max_out)
281 break;
282
283 bytes_left = len - workspace->def_strm.total_in;
284 kunmap(in_page);
285 page_cache_release(in_page);
286
287 start += PAGE_CACHE_SIZE;
288 in_page = find_get_page(mapping,
289 start >> PAGE_CACHE_SHIFT);
290 data_in = kmap(in_page);
291 workspace->def_strm.avail_in = min(bytes_left,
292 PAGE_CACHE_SIZE);
293 workspace->def_strm.next_in = data_in;
294 }
295 }
296 workspace->def_strm.avail_in = 0;
297 ret = zlib_deflate(&workspace->def_strm, Z_FINISH);
298 zlib_deflateEnd(&workspace->def_strm);
299
300 if (ret != Z_STREAM_END) {
301 ret = -1;
302 goto out;
303 }
304
305 if (workspace->def_strm.total_out >= workspace->def_strm.total_in) {
306 ret = -1;
307 goto out;
308 }
309
310 ret = 0;
311 *total_out = workspace->def_strm.total_out;
312 *total_in = workspace->def_strm.total_in;
313out:
314 *out_pages = nr_pages;
315 if (out_page)
316 kunmap(out_page);
317
318 if (in_page) {
319 kunmap(in_page);
320 page_cache_release(in_page);
321 }
322 free_workspace(workspace);
323 return ret;
324}
325
326/*
327 * pages_in is an array of pages with compressed data.
328 *
329 * disk_start is the starting logical offset of this array in the file
330 *
331 * bvec is a bio_vec of pages from the file that we want to decompress into
332 *
333 * vcnt is the count of pages in the biovec
334 *
335 * srclen is the number of bytes in pages_in
336 *
337 * The basic idea is that we have a bio that was created by readpages.
338 * The pages in the bio are for the uncompressed data, and they may not
339 * be contiguous. They all correspond to the range of bytes covered by
340 * the compressed extent.
341 */
342int btrfs_zlib_decompress_biovec(struct page **pages_in,
343 u64 disk_start,
344 struct bio_vec *bvec,
345 int vcnt,
346 size_t srclen)
347{
348 int ret = 0;
349 int wbits = MAX_WBITS;
350 struct workspace *workspace;
351 char *data_in;
352 size_t total_out = 0;
353 unsigned long page_bytes_left;
354 unsigned long page_in_index = 0;
355 unsigned long page_out_index = 0;
356 struct page *page_out;
357 unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) /
358 PAGE_CACHE_SIZE;
359 unsigned long buf_start;
360 unsigned long buf_offset;
361 unsigned long bytes;
362 unsigned long working_bytes;
363 unsigned long pg_offset;
364 unsigned long start_byte;
365 unsigned long current_buf_start;
366 char *kaddr;
367
368 workspace = find_zlib_workspace();
369 if (!workspace)
370 return -ENOMEM;
371
372 data_in = kmap(pages_in[page_in_index]);
373 workspace->inf_strm.next_in = data_in;
374 workspace->inf_strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE);
375 workspace->inf_strm.total_in = 0;
376
377 workspace->inf_strm.total_out = 0;
378 workspace->inf_strm.next_out = workspace->buf;
379 workspace->inf_strm.avail_out = PAGE_CACHE_SIZE;
380 page_out = bvec[page_out_index].bv_page;
381 page_bytes_left = PAGE_CACHE_SIZE;
382 pg_offset = 0;
383
384 /* If it's deflate, and it's got no preset dictionary, then
385 we can tell zlib to skip the adler32 check. */
386 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
387 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
388 !(((data_in[0]<<8) + data_in[1]) % 31)) {
389
390 wbits = -((data_in[0] >> 4) + 8);
391 workspace->inf_strm.next_in += 2;
392 workspace->inf_strm.avail_in -= 2;
393 }
394
395 if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) {
396 printk(KERN_WARNING "inflateInit failed\n");
397 ret = -1;
398 goto out;
399 }
400 while (workspace->inf_strm.total_in < srclen) {
401 ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH);
402 if (ret != Z_OK && ret != Z_STREAM_END)
403 break;
404 /*
405 * buf start is the byte offset we're of the start of
406 * our workspace buffer
407 */
408 buf_start = total_out;
409
410 /* total_out is the last byte of the workspace buffer */
411 total_out = workspace->inf_strm.total_out;
412
413 working_bytes = total_out - buf_start;
414
415 /*
416 * start byte is the first byte of the page we're currently
417 * copying into relative to the start of the compressed data.
418 */
419 start_byte = page_offset(page_out) - disk_start;
420
421 if (working_bytes == 0) {
422 /* we didn't make progress in this inflate
423 * call, we're done
424 */
425 if (ret != Z_STREAM_END)
426 ret = -1;
427 break;
428 }
429
430 /* we haven't yet hit data corresponding to this page */
431 if (total_out <= start_byte)
432 goto next;
433
434 /*
435 * the start of the data we care about is offset into
436 * the middle of our working buffer
437 */
438 if (total_out > start_byte && buf_start < start_byte) {
439 buf_offset = start_byte - buf_start;
440 working_bytes -= buf_offset;
441 } else {
442 buf_offset = 0;
443 }
444 current_buf_start = buf_start;
445
446 /* copy bytes from the working buffer into the pages */
447 while (working_bytes > 0) {
448 bytes = min(PAGE_CACHE_SIZE - pg_offset,
449 PAGE_CACHE_SIZE - buf_offset);
450 bytes = min(bytes, working_bytes);
451 kaddr = kmap_atomic(page_out, KM_USER0);
452 memcpy(kaddr + pg_offset, workspace->buf + buf_offset,
453 bytes);
454 kunmap_atomic(kaddr, KM_USER0);
455 flush_dcache_page(page_out);
456
457 pg_offset += bytes;
458 page_bytes_left -= bytes;
459 buf_offset += bytes;
460 working_bytes -= bytes;
461 current_buf_start += bytes;
462
463 /* check if we need to pick another page */
464 if (page_bytes_left == 0) {
465 page_out_index++;
466 if (page_out_index >= vcnt) {
467 ret = 0;
468 goto done;
469 }
470
471 page_out = bvec[page_out_index].bv_page;
472 pg_offset = 0;
473 page_bytes_left = PAGE_CACHE_SIZE;
474 start_byte = page_offset(page_out) - disk_start;
475
476 /*
477 * make sure our new page is covered by this
478 * working buffer
479 */
480 if (total_out <= start_byte)
481 goto next;
482
483 /* the next page in the biovec might not
484 * be adjacent to the last page, but it
485 * might still be found inside this working
486 * buffer. bump our offset pointer
487 */
488 if (total_out > start_byte &&
489 current_buf_start < start_byte) {
490 buf_offset = start_byte - buf_start;
491 working_bytes = total_out - start_byte;
492 current_buf_start = buf_start +
493 buf_offset;
494 }
495 }
496 }
497next:
498 workspace->inf_strm.next_out = workspace->buf;
499 workspace->inf_strm.avail_out = PAGE_CACHE_SIZE;
500
501 if (workspace->inf_strm.avail_in == 0) {
502 unsigned long tmp;
503 kunmap(pages_in[page_in_index]);
504 page_in_index++;
505 if (page_in_index >= total_pages_in) {
506 data_in = NULL;
507 break;
508 }
509 data_in = kmap(pages_in[page_in_index]);
510 workspace->inf_strm.next_in = data_in;
511 tmp = srclen - workspace->inf_strm.total_in;
512 workspace->inf_strm.avail_in = min(tmp,
513 PAGE_CACHE_SIZE);
514 }
515 }
516 if (ret != Z_STREAM_END)
517 ret = -1;
518 else
519 ret = 0;
520done:
521 zlib_inflateEnd(&workspace->inf_strm);
522 if (data_in)
523 kunmap(pages_in[page_in_index]);
524out:
525 free_workspace(workspace);
526 return ret;
527}
528
529/*
530 * a less complex decompression routine. Our compressed data fits in a
531 * single page, and we want to read a single page out of it.
532 * start_byte tells us the offset into the compressed data we're interested in
533 */
534int btrfs_zlib_decompress(unsigned char *data_in,
535 struct page *dest_page,
536 unsigned long start_byte,
537 size_t srclen, size_t destlen)
538{
539 int ret = 0;
540 int wbits = MAX_WBITS;
541 struct workspace *workspace;
542 unsigned long bytes_left = destlen;
543 unsigned long total_out = 0;
544 char *kaddr;
545
546 if (destlen > PAGE_CACHE_SIZE)
547 return -ENOMEM;
548
549 workspace = find_zlib_workspace();
550 if (!workspace)
551 return -ENOMEM;
552
553 workspace->inf_strm.next_in = data_in;
554 workspace->inf_strm.avail_in = srclen;
555 workspace->inf_strm.total_in = 0;
556
557 workspace->inf_strm.next_out = workspace->buf;
558 workspace->inf_strm.avail_out = PAGE_CACHE_SIZE;
559 workspace->inf_strm.total_out = 0;
560 /* If it's deflate, and it's got no preset dictionary, then
561 we can tell zlib to skip the adler32 check. */
562 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
563 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
564 !(((data_in[0]<<8) + data_in[1]) % 31)) {
565
566 wbits = -((data_in[0] >> 4) + 8);
567 workspace->inf_strm.next_in += 2;
568 workspace->inf_strm.avail_in -= 2;
569 }
570
571 if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) {
572 printk(KERN_WARNING "inflateInit failed\n");
573 ret = -1;
574 goto out;
575 }
576
577 while (bytes_left > 0) {
578 unsigned long buf_start;
579 unsigned long buf_offset;
580 unsigned long bytes;
581 unsigned long pg_offset = 0;
582
583 ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH);
584 if (ret != Z_OK && ret != Z_STREAM_END)
585 break;
586
587 buf_start = total_out;
588 total_out = workspace->inf_strm.total_out;
589
590 if (total_out == buf_start) {
591 ret = -1;
592 break;
593 }
594
595 if (total_out <= start_byte)
596 goto next;
597
598 if (total_out > start_byte && buf_start < start_byte)
599 buf_offset = start_byte - buf_start;
600 else
601 buf_offset = 0;
602
603 bytes = min(PAGE_CACHE_SIZE - pg_offset,
604 PAGE_CACHE_SIZE - buf_offset);
605 bytes = min(bytes, bytes_left);
606
607 kaddr = kmap_atomic(dest_page, KM_USER0);
608 memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
609 kunmap_atomic(kaddr, KM_USER0);
610
611 pg_offset += bytes;
612 bytes_left -= bytes;
613next:
614 workspace->inf_strm.next_out = workspace->buf;
615 workspace->inf_strm.avail_out = PAGE_CACHE_SIZE;
616 }
617
618 if (ret != Z_STREAM_END && bytes_left != 0)
619 ret = -1;
620 else
621 ret = 0;
622
623 zlib_inflateEnd(&workspace->inf_strm);
624out:
625 free_workspace(workspace);
626 return ret;
627}
628
629void btrfs_zlib_exit(void)
630{
631 free_workspaces();
632}
diff --git a/fs/coda/sysctl.c b/fs/coda/sysctl.c
index 81b7771c6465..43c96ce29614 100644
--- a/fs/coda/sysctl.c
+++ b/fs/coda/sysctl.c
@@ -11,7 +11,9 @@
11 11
12#include "coda_int.h" 12#include "coda_int.h"
13 13
14#ifdef CONFIG_SYSCTL
14static struct ctl_table_header *fs_table_header; 15static struct ctl_table_header *fs_table_header;
16#endif
15 17
16static ctl_table coda_table[] = { 18static ctl_table coda_table[] = {
17 { 19 {
@@ -41,6 +43,7 @@ static ctl_table coda_table[] = {
41 {} 43 {}
42}; 44};
43 45
46#ifdef CONFIG_SYSCTL
44static ctl_table fs_table[] = { 47static ctl_table fs_table[] = {
45 { 48 {
46 .ctl_name = CTL_UNNUMBERED, 49 .ctl_name = CTL_UNNUMBERED,
@@ -50,7 +53,7 @@ static ctl_table fs_table[] = {
50 }, 53 },
51 {} 54 {}
52}; 55};
53 56#endif
54 57
55void coda_sysctl_init(void) 58void coda_sysctl_init(void)
56{ 59{
diff --git a/fs/dcache.c b/fs/dcache.c
index e88c23b85a32..4547f66884a0 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1567,10 +1567,6 @@ void d_rehash(struct dentry * entry)
1567 spin_unlock(&dcache_lock); 1567 spin_unlock(&dcache_lock);
1568} 1568}
1569 1569
1570#define do_switch(x,y) do { \
1571 __typeof__ (x) __tmp = x; \
1572 x = y; y = __tmp; } while (0)
1573
1574/* 1570/*
1575 * When switching names, the actual string doesn't strictly have to 1571 * When switching names, the actual string doesn't strictly have to
1576 * be preserved in the target - because we're dropping the target 1572 * be preserved in the target - because we're dropping the target
@@ -1589,7 +1585,7 @@ static void switch_names(struct dentry *dentry, struct dentry *target)
1589 /* 1585 /*
1590 * Both external: swap the pointers 1586 * Both external: swap the pointers
1591 */ 1587 */
1592 do_switch(target->d_name.name, dentry->d_name.name); 1588 swap(target->d_name.name, dentry->d_name.name);
1593 } else { 1589 } else {
1594 /* 1590 /*
1595 * dentry:internal, target:external. Steal target's 1591 * dentry:internal, target:external. Steal target's
@@ -1620,7 +1616,7 @@ static void switch_names(struct dentry *dentry, struct dentry *target)
1620 return; 1616 return;
1621 } 1617 }
1622 } 1618 }
1623 do_switch(dentry->d_name.len, target->d_name.len); 1619 swap(dentry->d_name.len, target->d_name.len);
1624} 1620}
1625 1621
1626/* 1622/*
@@ -1680,7 +1676,7 @@ already_unhashed:
1680 1676
1681 /* Switch the names.. */ 1677 /* Switch the names.. */
1682 switch_names(dentry, target); 1678 switch_names(dentry, target);
1683 do_switch(dentry->d_name.hash, target->d_name.hash); 1679 swap(dentry->d_name.hash, target->d_name.hash);
1684 1680
1685 /* ... and switch the parents */ 1681 /* ... and switch the parents */
1686 if (IS_ROOT(dentry)) { 1682 if (IS_ROOT(dentry)) {
@@ -1688,7 +1684,7 @@ already_unhashed:
1688 target->d_parent = target; 1684 target->d_parent = target;
1689 INIT_LIST_HEAD(&target->d_u.d_child); 1685 INIT_LIST_HEAD(&target->d_u.d_child);
1690 } else { 1686 } else {
1691 do_switch(dentry->d_parent, target->d_parent); 1687 swap(dentry->d_parent, target->d_parent);
1692 1688
1693 /* And add them back to the (new) parent lists */ 1689 /* And add them back to the (new) parent lists */
1694 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs); 1690 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
@@ -1789,7 +1785,7 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
1789 struct dentry *dparent, *aparent; 1785 struct dentry *dparent, *aparent;
1790 1786
1791 switch_names(dentry, anon); 1787 switch_names(dentry, anon);
1792 do_switch(dentry->d_name.hash, anon->d_name.hash); 1788 swap(dentry->d_name.hash, anon->d_name.hash);
1793 1789
1794 dparent = dentry->d_parent; 1790 dparent = dentry->d_parent;
1795 aparent = anon->d_parent; 1791 aparent = anon->d_parent;
diff --git a/fs/dquot.c b/fs/dquot.c
index 61bfff64e5af..48c0571f831d 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -2090,10 +2090,12 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
2090 } 2090 }
2091 if (di->dqb_valid & QIF_BTIME) { 2091 if (di->dqb_valid & QIF_BTIME) {
2092 dm->dqb_btime = di->dqb_btime; 2092 dm->dqb_btime = di->dqb_btime;
2093 check_blim = 1;
2093 __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); 2094 __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2094 } 2095 }
2095 if (di->dqb_valid & QIF_ITIME) { 2096 if (di->dqb_valid & QIF_ITIME) {
2096 dm->dqb_itime = di->dqb_itime; 2097 dm->dqb_itime = di->dqb_itime;
2098 check_ilim = 1;
2097 __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); 2099 __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2098 } 2100 }
2099 2101
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index c454d5db28a5..66321a877e74 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -565,12 +565,8 @@ got:
565 inode->i_blocks = 0; 565 inode->i_blocks = 0;
566 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; 566 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
567 memset(ei->i_data, 0, sizeof(ei->i_data)); 567 memset(ei->i_data, 0, sizeof(ei->i_data));
568 ei->i_flags = EXT2_I(dir)->i_flags & ~EXT2_BTREE_FL; 568 ei->i_flags =
569 if (S_ISLNK(mode)) 569 ext2_mask_flags(mode, EXT2_I(dir)->i_flags & EXT2_FL_INHERITED);
570 ei->i_flags &= ~(EXT2_IMMUTABLE_FL|EXT2_APPEND_FL);
571 /* dirsync is only applied to directories */
572 if (!S_ISDIR(mode))
573 ei->i_flags &= ~EXT2_DIRSYNC_FL;
574 ei->i_faddr = 0; 570 ei->i_faddr = 0;
575 ei->i_frag_no = 0; 571 ei->i_frag_no = 0;
576 ei->i_frag_size = 0; 572 ei->i_frag_size = 0;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 02b39a5deb74..23fff2f87783 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -498,8 +498,6 @@ static int ext2_alloc_branch(struct inode *inode,
498 * ext2_splice_branch - splice the allocated branch onto inode. 498 * ext2_splice_branch - splice the allocated branch onto inode.
499 * @inode: owner 499 * @inode: owner
500 * @block: (logical) number of block we are adding 500 * @block: (logical) number of block we are adding
501 * @chain: chain of indirect blocks (with a missing link - see
502 * ext2_alloc_branch)
503 * @where: location of missing link 501 * @where: location of missing link
504 * @num: number of indirect blocks we are adding 502 * @num: number of indirect blocks we are adding
505 * @blks: number of direct blocks we are adding 503 * @blks: number of direct blocks we are adding
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index de876fa793e1..7cb4badef927 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -50,8 +50,7 @@ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
50 goto setflags_out; 50 goto setflags_out;
51 } 51 }
52 52
53 if (!S_ISDIR(inode->i_mode)) 53 flags = ext2_mask_flags(inode->i_mode, flags);
54 flags &= ~EXT2_DIRSYNC_FL;
55 54
56 mutex_lock(&inode->i_mutex); 55 mutex_lock(&inode->i_mutex);
57 /* Is it quota file? Do not allow user to mess with it */ 56 /* Is it quota file? Do not allow user to mess with it */
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 647cd888ac87..da8bdeaa2e6d 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -132,6 +132,7 @@ static void ext2_put_super (struct super_block * sb)
132 percpu_counter_destroy(&sbi->s_dirs_counter); 132 percpu_counter_destroy(&sbi->s_dirs_counter);
133 brelse (sbi->s_sbh); 133 brelse (sbi->s_sbh);
134 sb->s_fs_info = NULL; 134 sb->s_fs_info = NULL;
135 kfree(sbi->s_blockgroup_lock);
135 kfree(sbi); 136 kfree(sbi);
136 137
137 return; 138 return;
@@ -756,6 +757,13 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
756 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 757 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
757 if (!sbi) 758 if (!sbi)
758 return -ENOMEM; 759 return -ENOMEM;
760
761 sbi->s_blockgroup_lock =
762 kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
763 if (!sbi->s_blockgroup_lock) {
764 kfree(sbi);
765 return -ENOMEM;
766 }
759 sb->s_fs_info = sbi; 767 sb->s_fs_info = sbi;
760 sbi->s_sb_block = sb_block; 768 sbi->s_sb_block = sb_block;
761 769
@@ -983,7 +991,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
983 printk ("EXT2-fs: not enough memory\n"); 991 printk ("EXT2-fs: not enough memory\n");
984 goto failed_mount; 992 goto failed_mount;
985 } 993 }
986 bgl_lock_init(&sbi->s_blockgroup_lock); 994 bgl_lock_init(sbi->s_blockgroup_lock);
987 sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL); 995 sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL);
988 if (!sbi->s_debts) { 996 if (!sbi->s_debts) {
989 printk ("EXT2-fs: not enough memory\n"); 997 printk ("EXT2-fs: not enough memory\n");
diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c
index c30e149fbd2e..7d215b4d4f2e 100644
--- a/fs/ext3/hash.c
+++ b/fs/ext3/hash.c
@@ -35,23 +35,71 @@ static void TEA_transform(__u32 buf[4], __u32 const in[])
35 35
36 36
37/* The old legacy hash */ 37/* The old legacy hash */
38static __u32 dx_hack_hash (const char *name, int len) 38static __u32 dx_hack_hash_unsigned(const char *name, int len)
39{ 39{
40 __u32 hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; 40 __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
41 const unsigned char *ucp = (const unsigned char *) name;
42
43 while (len--) {
44 hash = hash1 + (hash0 ^ (((int) *ucp++) * 7152373));
45
46 if (hash & 0x80000000)
47 hash -= 0x7fffffff;
48 hash1 = hash0;
49 hash0 = hash;
50 }
51 return hash0 << 1;
52}
53
54static __u32 dx_hack_hash_signed(const char *name, int len)
55{
56 __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
57 const signed char *scp = (const signed char *) name;
58
41 while (len--) { 59 while (len--) {
42 __u32 hash = hash1 + (hash0 ^ (*name++ * 7152373)); 60 hash = hash1 + (hash0 ^ (((int) *scp++) * 7152373));
43 61
44 if (hash & 0x80000000) hash -= 0x7fffffff; 62 if (hash & 0x80000000)
63 hash -= 0x7fffffff;
45 hash1 = hash0; 64 hash1 = hash0;
46 hash0 = hash; 65 hash0 = hash;
47 } 66 }
48 return (hash0 << 1); 67 return hash0 << 1;
49} 68}
50 69
51static void str2hashbuf(const char *msg, int len, __u32 *buf, int num) 70static void str2hashbuf_signed(const char *msg, int len, __u32 *buf, int num)
52{ 71{
53 __u32 pad, val; 72 __u32 pad, val;
54 int i; 73 int i;
74 const signed char *scp = (const signed char *) msg;
75
76 pad = (__u32)len | ((__u32)len << 8);
77 pad |= pad << 16;
78
79 val = pad;
80 if (len > num*4)
81 len = num * 4;
82 for (i = 0; i < len; i++) {
83 if ((i % 4) == 0)
84 val = pad;
85 val = ((int) scp[i]) + (val << 8);
86 if ((i % 4) == 3) {
87 *buf++ = val;
88 val = pad;
89 num--;
90 }
91 }
92 if (--num >= 0)
93 *buf++ = val;
94 while (--num >= 0)
95 *buf++ = pad;
96}
97
98static void str2hashbuf_unsigned(const char *msg, int len, __u32 *buf, int num)
99{
100 __u32 pad, val;
101 int i;
102 const unsigned char *ucp = (const unsigned char *) msg;
55 103
56 pad = (__u32)len | ((__u32)len << 8); 104 pad = (__u32)len | ((__u32)len << 8);
57 pad |= pad << 16; 105 pad |= pad << 16;
@@ -62,7 +110,7 @@ static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
62 for (i=0; i < len; i++) { 110 for (i=0; i < len; i++) {
63 if ((i % 4) == 0) 111 if ((i % 4) == 0)
64 val = pad; 112 val = pad;
65 val = msg[i] + (val << 8); 113 val = ((int) ucp[i]) + (val << 8);
66 if ((i % 4) == 3) { 114 if ((i % 4) == 3) {
67 *buf++ = val; 115 *buf++ = val;
68 val = pad; 116 val = pad;
@@ -95,6 +143,8 @@ int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
95 const char *p; 143 const char *p;
96 int i; 144 int i;
97 __u32 in[8], buf[4]; 145 __u32 in[8], buf[4];
146 void (*str2hashbuf)(const char *, int, __u32 *, int) =
147 str2hashbuf_signed;
98 148
99 /* Initialize the default seed for the hash checksum functions */ 149 /* Initialize the default seed for the hash checksum functions */
100 buf[0] = 0x67452301; 150 buf[0] = 0x67452301;
@@ -113,13 +163,18 @@ int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
113 } 163 }
114 164
115 switch (hinfo->hash_version) { 165 switch (hinfo->hash_version) {
166 case DX_HASH_LEGACY_UNSIGNED:
167 hash = dx_hack_hash_unsigned(name, len);
168 break;
116 case DX_HASH_LEGACY: 169 case DX_HASH_LEGACY:
117 hash = dx_hack_hash(name, len); 170 hash = dx_hack_hash_signed(name, len);
118 break; 171 break;
172 case DX_HASH_HALF_MD4_UNSIGNED:
173 str2hashbuf = str2hashbuf_unsigned;
119 case DX_HASH_HALF_MD4: 174 case DX_HASH_HALF_MD4:
120 p = name; 175 p = name;
121 while (len > 0) { 176 while (len > 0) {
122 str2hashbuf(p, len, in, 8); 177 (*str2hashbuf)(p, len, in, 8);
123 half_md4_transform(buf, in); 178 half_md4_transform(buf, in);
124 len -= 32; 179 len -= 32;
125 p += 32; 180 p += 32;
@@ -127,10 +182,12 @@ int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
127 minor_hash = buf[2]; 182 minor_hash = buf[2];
128 hash = buf[1]; 183 hash = buf[1];
129 break; 184 break;
185 case DX_HASH_TEA_UNSIGNED:
186 str2hashbuf = str2hashbuf_unsigned;
130 case DX_HASH_TEA: 187 case DX_HASH_TEA:
131 p = name; 188 p = name;
132 while (len > 0) { 189 while (len > 0) {
133 str2hashbuf(p, len, in, 4); 190 (*str2hashbuf)(p, len, in, 4);
134 TEA_transform(buf, in); 191 TEA_transform(buf, in);
135 len -= 16; 192 len -= 16;
136 p += 16; 193 p += 16;
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 5655fbcbd11f..8de6c720e510 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -559,12 +559,8 @@ got:
559 ei->i_dir_start_lookup = 0; 559 ei->i_dir_start_lookup = 0;
560 ei->i_disksize = 0; 560 ei->i_disksize = 0;
561 561
562 ei->i_flags = EXT3_I(dir)->i_flags & ~EXT3_INDEX_FL; 562 ei->i_flags =
563 if (S_ISLNK(mode)) 563 ext3_mask_flags(mode, EXT3_I(dir)->i_flags & EXT3_FL_INHERITED);
564 ei->i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL);
565 /* dirsync only applies to directories */
566 if (!S_ISDIR(mode))
567 ei->i_flags &= ~EXT3_DIRSYNC_FL;
568#ifdef EXT3_FRAGMENTS 564#ifdef EXT3_FRAGMENTS
569 ei->i_faddr = 0; 565 ei->i_faddr = 0;
570 ei->i_frag_no = 0; 566 ei->i_frag_no = 0;
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index b7394d05ee8e..5e86ce9a86e0 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -53,8 +53,7 @@ int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
53 goto flags_out; 53 goto flags_out;
54 } 54 }
55 55
56 if (!S_ISDIR(inode->i_mode)) 56 flags = ext3_mask_flags(inode->i_mode, flags);
57 flags &= ~EXT3_DIRSYNC_FL;
58 57
59 mutex_lock(&inode->i_mutex); 58 mutex_lock(&inode->i_mutex);
60 /* Is it quota file? Do not allow user to mess with it */ 59 /* Is it quota file? Do not allow user to mess with it */
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 1dd2abe6313e..69a3d19ca9fd 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -74,10 +74,6 @@ static struct buffer_head *ext3_append(handle_t *handle,
74#define assert(test) J_ASSERT(test) 74#define assert(test) J_ASSERT(test)
75#endif 75#endif
76 76
77#ifndef swap
78#define swap(x, y) do { typeof(x) z = x; x = y; y = z; } while (0)
79#endif
80
81#ifdef DX_DEBUG 77#ifdef DX_DEBUG
82#define dxtrace(command) command 78#define dxtrace(command) command
83#else 79#else
@@ -368,6 +364,8 @@ dx_probe(struct qstr *entry, struct inode *dir,
368 goto fail; 364 goto fail;
369 } 365 }
370 hinfo->hash_version = root->info.hash_version; 366 hinfo->hash_version = root->info.hash_version;
367 if (hinfo->hash_version <= DX_HASH_TEA)
368 hinfo->hash_version += EXT3_SB(dir->i_sb)->s_hash_unsigned;
371 hinfo->seed = EXT3_SB(dir->i_sb)->s_hash_seed; 369 hinfo->seed = EXT3_SB(dir->i_sb)->s_hash_seed;
372 if (entry) 370 if (entry)
373 ext3fs_dirhash(entry->name, entry->len, hinfo); 371 ext3fs_dirhash(entry->name, entry->len, hinfo);
@@ -636,6 +634,9 @@ int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
636 dir = dir_file->f_path.dentry->d_inode; 634 dir = dir_file->f_path.dentry->d_inode;
637 if (!(EXT3_I(dir)->i_flags & EXT3_INDEX_FL)) { 635 if (!(EXT3_I(dir)->i_flags & EXT3_INDEX_FL)) {
638 hinfo.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version; 636 hinfo.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version;
637 if (hinfo.hash_version <= DX_HASH_TEA)
638 hinfo.hash_version +=
639 EXT3_SB(dir->i_sb)->s_hash_unsigned;
639 hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed; 640 hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed;
640 count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo, 641 count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo,
641 start_hash, start_minor_hash); 642 start_hash, start_minor_hash);
@@ -1156,9 +1157,9 @@ static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1156 u32 hash2; 1157 u32 hash2;
1157 struct dx_map_entry *map; 1158 struct dx_map_entry *map;
1158 char *data1 = (*bh)->b_data, *data2; 1159 char *data1 = (*bh)->b_data, *data2;
1159 unsigned split, move, size, i; 1160 unsigned split, move, size;
1160 struct ext3_dir_entry_2 *de = NULL, *de2; 1161 struct ext3_dir_entry_2 *de = NULL, *de2;
1161 int err = 0; 1162 int err = 0, i;
1162 1163
1163 bh2 = ext3_append (handle, dir, &newblock, &err); 1164 bh2 = ext3_append (handle, dir, &newblock, &err);
1164 if (!(bh2)) { 1165 if (!(bh2)) {
@@ -1398,6 +1399,8 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
1398 1399
1399 /* Initialize as for dx_probe */ 1400 /* Initialize as for dx_probe */
1400 hinfo.hash_version = root->info.hash_version; 1401 hinfo.hash_version = root->info.hash_version;
1402 if (hinfo.hash_version <= DX_HASH_TEA)
1403 hinfo.hash_version += EXT3_SB(dir->i_sb)->s_hash_unsigned;
1401 hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed; 1404 hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed;
1402 ext3fs_dirhash(name, namelen, &hinfo); 1405 ext3fs_dirhash(name, namelen, &hinfo);
1403 frame = frames; 1406 frame = frames;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index c22d01467bd1..5d047a030a73 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -439,6 +439,7 @@ static void ext3_put_super (struct super_block * sb)
439 ext3_blkdev_remove(sbi); 439 ext3_blkdev_remove(sbi);
440 } 440 }
441 sb->s_fs_info = NULL; 441 sb->s_fs_info = NULL;
442 kfree(sbi->s_blockgroup_lock);
442 kfree(sbi); 443 kfree(sbi);
443 return; 444 return;
444} 445}
@@ -682,6 +683,26 @@ static struct dentry *ext3_fh_to_parent(struct super_block *sb, struct fid *fid,
682 ext3_nfs_get_inode); 683 ext3_nfs_get_inode);
683} 684}
684 685
686/*
687 * Try to release metadata pages (indirect blocks, directories) which are
688 * mapped via the block device. Since these pages could have journal heads
689 * which would prevent try_to_free_buffers() from freeing them, we must use
690 * jbd layer's try_to_free_buffers() function to release them.
691 */
692static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
693 gfp_t wait)
694{
695 journal_t *journal = EXT3_SB(sb)->s_journal;
696
697 WARN_ON(PageChecked(page));
698 if (!page_has_buffers(page))
699 return 0;
700 if (journal)
701 return journal_try_to_free_buffers(journal, page,
702 wait & ~__GFP_WAIT);
703 return try_to_free_buffers(page);
704}
705
685#ifdef CONFIG_QUOTA 706#ifdef CONFIG_QUOTA
686#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group") 707#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
687#define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) 708#define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
@@ -748,6 +769,7 @@ static const struct super_operations ext3_sops = {
748 .quota_read = ext3_quota_read, 769 .quota_read = ext3_quota_read,
749 .quota_write = ext3_quota_write, 770 .quota_write = ext3_quota_write,
750#endif 771#endif
772 .bdev_try_to_free_page = bdev_try_to_free_page,
751}; 773};
752 774
753static const struct export_operations ext3_export_ops = { 775static const struct export_operations ext3_export_ops = {
@@ -1546,6 +1568,13 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1546 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 1568 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
1547 if (!sbi) 1569 if (!sbi)
1548 return -ENOMEM; 1570 return -ENOMEM;
1571
1572 sbi->s_blockgroup_lock =
1573 kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
1574 if (!sbi->s_blockgroup_lock) {
1575 kfree(sbi);
1576 return -ENOMEM;
1577 }
1549 sb->s_fs_info = sbi; 1578 sb->s_fs_info = sbi;
1550 sbi->s_mount_opt = 0; 1579 sbi->s_mount_opt = 0;
1551 sbi->s_resuid = EXT3_DEF_RESUID; 1580 sbi->s_resuid = EXT3_DEF_RESUID;
@@ -1742,6 +1771,18 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1742 for (i=0; i < 4; i++) 1771 for (i=0; i < 4; i++)
1743 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); 1772 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
1744 sbi->s_def_hash_version = es->s_def_hash_version; 1773 sbi->s_def_hash_version = es->s_def_hash_version;
1774 i = le32_to_cpu(es->s_flags);
1775 if (i & EXT2_FLAGS_UNSIGNED_HASH)
1776 sbi->s_hash_unsigned = 3;
1777 else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
1778#ifdef __CHAR_UNSIGNED__
1779 es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
1780 sbi->s_hash_unsigned = 3;
1781#else
1782 es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
1783#endif
1784 sb->s_dirt = 1;
1785 }
1745 1786
1746 if (sbi->s_blocks_per_group > blocksize * 8) { 1787 if (sbi->s_blocks_per_group > blocksize * 8) {
1747 printk (KERN_ERR 1788 printk (KERN_ERR
@@ -1786,7 +1827,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1786 goto failed_mount; 1827 goto failed_mount;
1787 } 1828 }
1788 1829
1789 bgl_lock_init(&sbi->s_blockgroup_lock); 1830 bgl_lock_init(sbi->s_blockgroup_lock);
1790 1831
1791 for (i = 0; i < db_count; i++) { 1832 for (i = 0; i < db_count; i++) {
1792 block = descriptor_loc(sb, logic_sb_block, i); 1833 block = descriptor_loc(sb, logic_sb_block, i);
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 38b3acf5683b..6bba06b09dd1 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -20,6 +20,7 @@
20#include "ext4.h" 20#include "ext4.h"
21#include "ext4_jbd2.h" 21#include "ext4_jbd2.h"
22#include "group.h" 22#include "group.h"
23#include "mballoc.h"
23 24
24/* 25/*
25 * balloc.c contains the blocks allocation and deallocation routines 26 * balloc.c contains the blocks allocation and deallocation routines
@@ -100,10 +101,10 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
100 * essentially implementing a per-group read-only flag. */ 101 * essentially implementing a per-group read-only flag. */
101 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { 102 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
102 ext4_error(sb, __func__, 103 ext4_error(sb, __func__,
103 "Checksum bad for group %lu\n", block_group); 104 "Checksum bad for group %u", block_group);
104 gdp->bg_free_blocks_count = 0; 105 ext4_free_blks_set(sb, gdp, 0);
105 gdp->bg_free_inodes_count = 0; 106 ext4_free_inodes_set(sb, gdp, 0);
106 gdp->bg_itable_unused = 0; 107 ext4_itable_unused_set(sb, gdp, 0);
107 memset(bh->b_data, 0xff, sb->s_blocksize); 108 memset(bh->b_data, 0xff, sb->s_blocksize);
108 return 0; 109 return 0;
109 } 110 }
@@ -205,15 +206,15 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
205 ext4_group_t block_group, 206 ext4_group_t block_group,
206 struct buffer_head **bh) 207 struct buffer_head **bh)
207{ 208{
208 unsigned long group_desc; 209 unsigned int group_desc;
209 unsigned long offset; 210 unsigned int offset;
210 struct ext4_group_desc *desc; 211 struct ext4_group_desc *desc;
211 struct ext4_sb_info *sbi = EXT4_SB(sb); 212 struct ext4_sb_info *sbi = EXT4_SB(sb);
212 213
213 if (block_group >= sbi->s_groups_count) { 214 if (block_group >= sbi->s_groups_count) {
214 ext4_error(sb, "ext4_get_group_desc", 215 ext4_error(sb, "ext4_get_group_desc",
215 "block_group >= groups_count - " 216 "block_group >= groups_count - "
216 "block_group = %lu, groups_count = %lu", 217 "block_group = %u, groups_count = %u",
217 block_group, sbi->s_groups_count); 218 block_group, sbi->s_groups_count);
218 219
219 return NULL; 220 return NULL;
@@ -225,7 +226,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
225 if (!sbi->s_group_desc[group_desc]) { 226 if (!sbi->s_group_desc[group_desc]) {
226 ext4_error(sb, "ext4_get_group_desc", 227 ext4_error(sb, "ext4_get_group_desc",
227 "Group descriptor not loaded - " 228 "Group descriptor not loaded - "
228 "block_group = %lu, group_desc = %lu, desc = %lu", 229 "block_group = %u, group_desc = %u, desc = %u",
229 block_group, group_desc, offset); 230 block_group, group_desc, offset);
230 return NULL; 231 return NULL;
231 } 232 }
@@ -315,29 +316,50 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
315 if (unlikely(!bh)) { 316 if (unlikely(!bh)) {
316 ext4_error(sb, __func__, 317 ext4_error(sb, __func__,
317 "Cannot read block bitmap - " 318 "Cannot read block bitmap - "
318 "block_group = %lu, block_bitmap = %llu", 319 "block_group = %u, block_bitmap = %llu",
319 block_group, bitmap_blk); 320 block_group, bitmap_blk);
320 return NULL; 321 return NULL;
321 } 322 }
322 if (buffer_uptodate(bh) && 323
323 !(desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) 324 if (bitmap_uptodate(bh))
324 return bh; 325 return bh;
325 326
326 lock_buffer(bh); 327 lock_buffer(bh);
328 if (bitmap_uptodate(bh)) {
329 unlock_buffer(bh);
330 return bh;
331 }
327 spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group)); 332 spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
328 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 333 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
329 ext4_init_block_bitmap(sb, bh, block_group, desc); 334 ext4_init_block_bitmap(sb, bh, block_group, desc);
335 set_bitmap_uptodate(bh);
330 set_buffer_uptodate(bh); 336 set_buffer_uptodate(bh);
331 unlock_buffer(bh);
332 spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); 337 spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
338 unlock_buffer(bh);
333 return bh; 339 return bh;
334 } 340 }
335 spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); 341 spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
342 if (buffer_uptodate(bh)) {
343 /*
344 * if not uninit if bh is uptodate,
345 * bitmap is also uptodate
346 */
347 set_bitmap_uptodate(bh);
348 unlock_buffer(bh);
349 return bh;
350 }
351 /*
352 * submit the buffer_head for read. We can
353 * safely mark the bitmap as uptodate now.
354 * We do it here so the bitmap uptodate bit
355 * get set with buffer lock held.
356 */
357 set_bitmap_uptodate(bh);
336 if (bh_submit_read(bh) < 0) { 358 if (bh_submit_read(bh) < 0) {
337 put_bh(bh); 359 put_bh(bh);
338 ext4_error(sb, __func__, 360 ext4_error(sb, __func__,
339 "Cannot read block bitmap - " 361 "Cannot read block bitmap - "
340 "block_group = %lu, block_bitmap = %llu", 362 "block_group = %u, block_bitmap = %llu",
341 block_group, bitmap_blk); 363 block_group, bitmap_blk);
342 return NULL; 364 return NULL;
343 } 365 }
@@ -350,62 +372,44 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
350} 372}
351 373
352/** 374/**
353 * ext4_free_blocks_sb() -- Free given blocks and update quota 375 * ext4_add_groupblocks() -- Add given blocks to an existing group
354 * @handle: handle to this transaction 376 * @handle: handle to this transaction
355 * @sb: super block 377 * @sb: super block
356 * @block: start physcial block to free 378 * @block: start physcial block to add to the block group
357 * @count: number of blocks to free 379 * @count: number of blocks to free
358 * @pdquot_freed_blocks: pointer to quota
359 * 380 *
360 * XXX This function is only used by the on-line resizing code, which 381 * This marks the blocks as free in the bitmap. We ask the
361 * should probably be fixed up to call the mballoc variant. There 382 * mballoc to reload the buddy after this by setting group
362 * this needs to be cleaned up later; in fact, I'm not convinced this 383 * EXT4_GROUP_INFO_NEED_INIT_BIT flag
363 * is 100% correct in the face of the mballoc code. The online resizing
364 * code needs to be fixed up to more tightly (and correctly) interlock
365 * with the mballoc code.
366 */ 384 */
367void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb, 385void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
368 ext4_fsblk_t block, unsigned long count, 386 ext4_fsblk_t block, unsigned long count)
369 unsigned long *pdquot_freed_blocks)
370{ 387{
371 struct buffer_head *bitmap_bh = NULL; 388 struct buffer_head *bitmap_bh = NULL;
372 struct buffer_head *gd_bh; 389 struct buffer_head *gd_bh;
373 ext4_group_t block_group; 390 ext4_group_t block_group;
374 ext4_grpblk_t bit; 391 ext4_grpblk_t bit;
375 unsigned long i; 392 unsigned int i;
376 unsigned long overflow;
377 struct ext4_group_desc *desc; 393 struct ext4_group_desc *desc;
378 struct ext4_super_block *es; 394 struct ext4_super_block *es;
379 struct ext4_sb_info *sbi; 395 struct ext4_sb_info *sbi;
380 int err = 0, ret; 396 int err = 0, ret, blk_free_count;
381 ext4_grpblk_t group_freed; 397 ext4_grpblk_t blocks_freed;
398 struct ext4_group_info *grp;
382 399
383 *pdquot_freed_blocks = 0;
384 sbi = EXT4_SB(sb); 400 sbi = EXT4_SB(sb);
385 es = sbi->s_es; 401 es = sbi->s_es;
386 if (block < le32_to_cpu(es->s_first_data_block) || 402 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
387 block + count < block ||
388 block + count > ext4_blocks_count(es)) {
389 ext4_error(sb, "ext4_free_blocks",
390 "Freeing blocks not in datazone - "
391 "block = %llu, count = %lu", block, count);
392 goto error_return;
393 }
394
395 ext4_debug("freeing block(s) %llu-%llu\n", block, block + count - 1);
396 403
397do_more:
398 overflow = 0;
399 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 404 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
405 grp = ext4_get_group_info(sb, block_group);
400 /* 406 /*
401 * Check to see if we are freeing blocks across a group 407 * Check to see if we are freeing blocks across a group
402 * boundary. 408 * boundary.
403 */ 409 */
404 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { 410 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
405 overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb); 411 goto error_return;
406 count -= overflow;
407 } 412 }
408 brelse(bitmap_bh);
409 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 413 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
410 if (!bitmap_bh) 414 if (!bitmap_bh)
411 goto error_return; 415 goto error_return;
@@ -418,18 +422,17 @@ do_more:
418 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || 422 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
419 in_range(block + count - 1, ext4_inode_table(sb, desc), 423 in_range(block + count - 1, ext4_inode_table(sb, desc),
420 sbi->s_itb_per_group)) { 424 sbi->s_itb_per_group)) {
421 ext4_error(sb, "ext4_free_blocks", 425 ext4_error(sb, __func__,
422 "Freeing blocks in system zones - " 426 "Adding blocks in system zones - "
423 "Block = %llu, count = %lu", 427 "Block = %llu, count = %lu",
424 block, count); 428 block, count);
425 goto error_return; 429 goto error_return;
426 } 430 }
427 431
428 /* 432 /*
429 * We are about to start releasing blocks in the bitmap, 433 * We are about to add blocks to the bitmap,
430 * so we need undo access. 434 * so we need undo access.
431 */ 435 */
432 /* @@@ check errors */
433 BUFFER_TRACE(bitmap_bh, "getting undo access"); 436 BUFFER_TRACE(bitmap_bh, "getting undo access");
434 err = ext4_journal_get_undo_access(handle, bitmap_bh); 437 err = ext4_journal_get_undo_access(handle, bitmap_bh);
435 if (err) 438 if (err)
@@ -444,107 +447,55 @@ do_more:
444 err = ext4_journal_get_write_access(handle, gd_bh); 447 err = ext4_journal_get_write_access(handle, gd_bh);
445 if (err) 448 if (err)
446 goto error_return; 449 goto error_return;
447 450 /*
448 jbd_lock_bh_state(bitmap_bh); 451 * make sure we don't allow a parallel init on other groups in the
449 452 * same buddy cache
450 for (i = 0, group_freed = 0; i < count; i++) { 453 */
451 /* 454 down_write(&grp->alloc_sem);
452 * An HJ special. This is expensive... 455 for (i = 0, blocks_freed = 0; i < count; i++) {
453 */
454#ifdef CONFIG_JBD2_DEBUG
455 jbd_unlock_bh_state(bitmap_bh);
456 {
457 struct buffer_head *debug_bh;
458 debug_bh = sb_find_get_block(sb, block + i);
459 if (debug_bh) {
460 BUFFER_TRACE(debug_bh, "Deleted!");
461 if (!bh2jh(bitmap_bh)->b_committed_data)
462 BUFFER_TRACE(debug_bh,
463 "No commited data in bitmap");
464 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
465 __brelse(debug_bh);
466 }
467 }
468 jbd_lock_bh_state(bitmap_bh);
469#endif
470 if (need_resched()) {
471 jbd_unlock_bh_state(bitmap_bh);
472 cond_resched();
473 jbd_lock_bh_state(bitmap_bh);
474 }
475 /* @@@ This prevents newly-allocated data from being
476 * freed and then reallocated within the same
477 * transaction.
478 *
479 * Ideally we would want to allow that to happen, but to
480 * do so requires making jbd2_journal_forget() capable of
481 * revoking the queued write of a data block, which
482 * implies blocking on the journal lock. *forget()
483 * cannot block due to truncate races.
484 *
485 * Eventually we can fix this by making jbd2_journal_forget()
486 * return a status indicating whether or not it was able
487 * to revoke the buffer. On successful revoke, it is
488 * safe not to set the allocation bit in the committed
489 * bitmap, because we know that there is no outstanding
490 * activity on the buffer any more and so it is safe to
491 * reallocate it.
492 */
493 BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
494 J_ASSERT_BH(bitmap_bh,
495 bh2jh(bitmap_bh)->b_committed_data != NULL);
496 ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
497 bh2jh(bitmap_bh)->b_committed_data);
498
499 /*
500 * We clear the bit in the bitmap after setting the committed
501 * data bit, because this is the reverse order to that which
502 * the allocator uses.
503 */
504 BUFFER_TRACE(bitmap_bh, "clear bit"); 456 BUFFER_TRACE(bitmap_bh, "clear bit");
505 if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group), 457 if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
506 bit + i, bitmap_bh->b_data)) { 458 bit + i, bitmap_bh->b_data)) {
507 jbd_unlock_bh_state(bitmap_bh);
508 ext4_error(sb, __func__, 459 ext4_error(sb, __func__,
509 "bit already cleared for block %llu", 460 "bit already cleared for block %llu",
510 (ext4_fsblk_t)(block + i)); 461 (ext4_fsblk_t)(block + i));
511 jbd_lock_bh_state(bitmap_bh);
512 BUFFER_TRACE(bitmap_bh, "bit already cleared"); 462 BUFFER_TRACE(bitmap_bh, "bit already cleared");
513 } else { 463 } else {
514 group_freed++; 464 blocks_freed++;
515 } 465 }
516 } 466 }
517 jbd_unlock_bh_state(bitmap_bh);
518
519 spin_lock(sb_bgl_lock(sbi, block_group)); 467 spin_lock(sb_bgl_lock(sbi, block_group));
520 le16_add_cpu(&desc->bg_free_blocks_count, group_freed); 468 blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc);
469 ext4_free_blks_set(sb, desc, blk_free_count);
521 desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc); 470 desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
522 spin_unlock(sb_bgl_lock(sbi, block_group)); 471 spin_unlock(sb_bgl_lock(sbi, block_group));
523 percpu_counter_add(&sbi->s_freeblocks_counter, count); 472 percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed);
524 473
525 if (sbi->s_log_groups_per_flex) { 474 if (sbi->s_log_groups_per_flex) {
526 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 475 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
527 spin_lock(sb_bgl_lock(sbi, flex_group)); 476 spin_lock(sb_bgl_lock(sbi, flex_group));
528 sbi->s_flex_groups[flex_group].free_blocks += count; 477 sbi->s_flex_groups[flex_group].free_blocks += blocks_freed;
529 spin_unlock(sb_bgl_lock(sbi, flex_group)); 478 spin_unlock(sb_bgl_lock(sbi, flex_group));
530 } 479 }
480 /*
481 * request to reload the buddy with the
482 * new bitmap information
483 */
484 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
485 ext4_mb_update_group_info(grp, blocks_freed);
486 up_write(&grp->alloc_sem);
531 487
532 /* We dirtied the bitmap block */ 488 /* We dirtied the bitmap block */
533 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 489 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
534 err = ext4_journal_dirty_metadata(handle, bitmap_bh); 490 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
535 491
536 /* And the group descriptor block */ 492 /* And the group descriptor block */
537 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 493 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
538 ret = ext4_journal_dirty_metadata(handle, gd_bh); 494 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
539 if (!err) err = ret; 495 if (!err)
540 *pdquot_freed_blocks += group_freed; 496 err = ret;
541
542 if (overflow && !err) {
543 block += count;
544 count = overflow;
545 goto do_more;
546 }
547 sb->s_dirt = 1; 497 sb->s_dirt = 1;
498
548error_return: 499error_return:
549 brelse(bitmap_bh); 500 brelse(bitmap_bh);
550 ext4_std_error(sb, err); 501 ext4_std_error(sb, err);
@@ -614,7 +565,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
614 if (dirty_blocks < 0) { 565 if (dirty_blocks < 0) {
615 printk(KERN_CRIT "Dirty block accounting " 566 printk(KERN_CRIT "Dirty block accounting "
616 "went wrong %lld\n", 567 "went wrong %lld\n",
617 dirty_blocks); 568 (long long)dirty_blocks);
618 } 569 }
619 } 570 }
620 /* Check whether we have space after 571 /* Check whether we have space after
@@ -666,101 +617,45 @@ int ext4_should_retry_alloc(struct super_block *sb, int *retries)
666 return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal); 617 return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
667} 618}
668 619
669#define EXT4_META_BLOCK 0x1
670
671static ext4_fsblk_t do_blk_alloc(handle_t *handle, struct inode *inode,
672 ext4_lblk_t iblock, ext4_fsblk_t goal,
673 unsigned long *count, int *errp, int flags)
674{
675 struct ext4_allocation_request ar;
676 ext4_fsblk_t ret;
677
678 memset(&ar, 0, sizeof(ar));
679 /* Fill with neighbour allocated blocks */
680
681 ar.inode = inode;
682 ar.goal = goal;
683 ar.len = *count;
684 ar.logical = iblock;
685
686 if (S_ISREG(inode->i_mode) && !(flags & EXT4_META_BLOCK))
687 /* enable in-core preallocation for data block allocation */
688 ar.flags = EXT4_MB_HINT_DATA;
689 else
690 /* disable in-core preallocation for non-regular files */
691 ar.flags = 0;
692
693 ret = ext4_mb_new_blocks(handle, &ar, errp);
694 *count = ar.len;
695 return ret;
696}
697
698/* 620/*
699 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks 621 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
700 * 622 *
701 * @handle: handle to this transaction 623 * @handle: handle to this transaction
702 * @inode: file inode 624 * @inode: file inode
703 * @goal: given target block(filesystem wide) 625 * @goal: given target block(filesystem wide)
704 * @count: total number of blocks need 626 * @count: pointer to total number of blocks needed
705 * @errp: error code 627 * @errp: error code
706 * 628 *
707 * Return 1st allocated block numberon success, *count stores total account 629 * Return 1st allocated block number on success, *count stores total account
708 * error stores in errp pointer 630 * error stores in errp pointer
709 */ 631 */
710ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, 632ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
711 ext4_fsblk_t goal, unsigned long *count, int *errp) 633 ext4_fsblk_t goal, unsigned long *count, int *errp)
712{ 634{
635 struct ext4_allocation_request ar;
713 ext4_fsblk_t ret; 636 ext4_fsblk_t ret;
714 ret = do_blk_alloc(handle, inode, 0, goal, 637
715 count, errp, EXT4_META_BLOCK); 638 memset(&ar, 0, sizeof(ar));
639 /* Fill with neighbour allocated blocks */
640 ar.inode = inode;
641 ar.goal = goal;
642 ar.len = count ? *count : 1;
643
644 ret = ext4_mb_new_blocks(handle, &ar, errp);
645 if (count)
646 *count = ar.len;
647
716 /* 648 /*
717 * Account for the allocated meta blocks 649 * Account for the allocated meta blocks
718 */ 650 */
719 if (!(*errp) && EXT4_I(inode)->i_delalloc_reserved_flag) { 651 if (!(*errp) && EXT4_I(inode)->i_delalloc_reserved_flag) {
720 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 652 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
721 EXT4_I(inode)->i_allocated_meta_blocks += *count; 653 EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
722 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 654 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
723 } 655 }
724 return ret; 656 return ret;
725} 657}
726 658
727/*
728 * ext4_new_meta_block() -- allocate block for meta data (indexing) blocks
729 *
730 * @handle: handle to this transaction
731 * @inode: file inode
732 * @goal: given target block(filesystem wide)
733 * @errp: error code
734 *
735 * Return allocated block number on success
736 */
737ext4_fsblk_t ext4_new_meta_block(handle_t *handle, struct inode *inode,
738 ext4_fsblk_t goal, int *errp)
739{
740 unsigned long count = 1;
741 return ext4_new_meta_blocks(handle, inode, goal, &count, errp);
742}
743
744/*
745 * ext4_new_blocks() -- allocate data blocks
746 *
747 * @handle: handle to this transaction
748 * @inode: file inode
749 * @goal: given target block(filesystem wide)
750 * @count: total number of blocks need
751 * @errp: error code
752 *
753 * Return 1st allocated block numberon success, *count stores total account
754 * error stores in errp pointer
755 */
756
757ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode,
758 ext4_lblk_t iblock, ext4_fsblk_t goal,
759 unsigned long *count, int *errp)
760{
761 return do_blk_alloc(handle, inode, iblock, goal, count, errp, 0);
762}
763
764/** 659/**
765 * ext4_count_free_blocks() -- count filesystem free blocks 660 * ext4_count_free_blocks() -- count filesystem free blocks
766 * @sb: superblock 661 * @sb: superblock
@@ -776,7 +671,7 @@ ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
776#ifdef EXT4FS_DEBUG 671#ifdef EXT4FS_DEBUG
777 struct ext4_super_block *es; 672 struct ext4_super_block *es;
778 ext4_fsblk_t bitmap_count; 673 ext4_fsblk_t bitmap_count;
779 unsigned long x; 674 unsigned int x;
780 struct buffer_head *bitmap_bh = NULL; 675 struct buffer_head *bitmap_bh = NULL;
781 676
782 es = EXT4_SB(sb)->s_es; 677 es = EXT4_SB(sb)->s_es;
@@ -796,7 +691,7 @@ ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
796 continue; 691 continue;
797 692
798 x = ext4_count_free(bitmap_bh, sb->s_blocksize); 693 x = ext4_count_free(bitmap_bh, sb->s_blocksize);
799 printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n", 694 printk(KERN_DEBUG "group %lu: stored = %d, counted = %u\n",
800 i, le16_to_cpu(gdp->bg_free_blocks_count), x); 695 i, le16_to_cpu(gdp->bg_free_blocks_count), x);
801 bitmap_count += x; 696 bitmap_count += x;
802 } 697 }
@@ -812,7 +707,7 @@ ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
812 gdp = ext4_get_group_desc(sb, i, NULL); 707 gdp = ext4_get_group_desc(sb, i, NULL);
813 if (!gdp) 708 if (!gdp)
814 continue; 709 continue;
815 desc_count += le16_to_cpu(gdp->bg_free_blocks_count); 710 desc_count += ext4_free_blks_count(sb, gdp);
816 } 711 }
817 712
818 return desc_count; 713 return desc_count;
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index 0a7a6663c190..fa3af81ac565 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -15,10 +15,9 @@
15 15
16static const int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0}; 16static const int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
17 17
18unsigned long ext4_count_free(struct buffer_head *map, unsigned int numchars) 18unsigned int ext4_count_free(struct buffer_head *map, unsigned int numchars)
19{ 19{
20 unsigned int i; 20 unsigned int i, sum = 0;
21 unsigned long sum = 0;
22 21
23 if (!map) 22 if (!map)
24 return 0; 23 return 0;
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index fed5b610df5a..2df2e40b01af 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -64,7 +64,7 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
64int ext4_check_dir_entry(const char *function, struct inode *dir, 64int ext4_check_dir_entry(const char *function, struct inode *dir,
65 struct ext4_dir_entry_2 *de, 65 struct ext4_dir_entry_2 *de,
66 struct buffer_head *bh, 66 struct buffer_head *bh,
67 unsigned long offset) 67 unsigned int offset)
68{ 68{
69 const char *error_msg = NULL; 69 const char *error_msg = NULL;
70 const int rlen = ext4_rec_len_from_disk(de->rec_len); 70 const int rlen = ext4_rec_len_from_disk(de->rec_len);
@@ -84,9 +84,9 @@ int ext4_check_dir_entry(const char *function, struct inode *dir,
84 if (error_msg != NULL) 84 if (error_msg != NULL)
85 ext4_error(dir->i_sb, function, 85 ext4_error(dir->i_sb, function,
86 "bad entry in directory #%lu: %s - " 86 "bad entry in directory #%lu: %s - "
87 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", 87 "offset=%u, inode=%u, rec_len=%d, name_len=%d",
88 dir->i_ino, error_msg, offset, 88 dir->i_ino, error_msg, offset,
89 (unsigned long) le32_to_cpu(de->inode), 89 le32_to_cpu(de->inode),
90 rlen, de->name_len); 90 rlen, de->name_len);
91 return error_msg == NULL ? 1 : 0; 91 return error_msg == NULL ? 1 : 0;
92} 92}
@@ -95,7 +95,7 @@ static int ext4_readdir(struct file *filp,
95 void *dirent, filldir_t filldir) 95 void *dirent, filldir_t filldir)
96{ 96{
97 int error = 0; 97 int error = 0;
98 unsigned long offset; 98 unsigned int offset;
99 int i, stored; 99 int i, stored;
100 struct ext4_dir_entry_2 *de; 100 struct ext4_dir_entry_2 *de;
101 struct super_block *sb; 101 struct super_block *sb;
@@ -405,7 +405,7 @@ static int call_filldir(struct file *filp, void *dirent,
405 sb = inode->i_sb; 405 sb = inode->i_sb;
406 406
407 if (!fname) { 407 if (!fname) {
408 printk(KERN_ERR "ext4: call_filldir: called with " 408 printk(KERN_ERR "EXT4-fs: call_filldir: called with "
409 "null fname?!?\n"); 409 "null fname?!?\n");
410 return 0; 410 return 0;
411 } 411 }
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 6c46c648430d..c668e4377d76 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -19,6 +19,7 @@
19#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/blkdev.h> 20#include <linux/blkdev.h>
21#include <linux/magic.h> 21#include <linux/magic.h>
22#include <linux/jbd2.h>
22#include "ext4_i.h" 23#include "ext4_i.h"
23 24
24/* 25/*
@@ -94,9 +95,9 @@ struct ext4_allocation_request {
94 /* phys. block for ^^^ */ 95 /* phys. block for ^^^ */
95 ext4_fsblk_t pright; 96 ext4_fsblk_t pright;
96 /* how many blocks we want to allocate */ 97 /* how many blocks we want to allocate */
97 unsigned long len; 98 unsigned int len;
98 /* flags. see above EXT4_MB_HINT_* */ 99 /* flags. see above EXT4_MB_HINT_* */
99 unsigned long flags; 100 unsigned int flags;
100}; 101};
101 102
102/* 103/*
@@ -156,12 +157,12 @@ struct ext4_group_desc
156 __le32 bg_block_bitmap_lo; /* Blocks bitmap block */ 157 __le32 bg_block_bitmap_lo; /* Blocks bitmap block */
157 __le32 bg_inode_bitmap_lo; /* Inodes bitmap block */ 158 __le32 bg_inode_bitmap_lo; /* Inodes bitmap block */
158 __le32 bg_inode_table_lo; /* Inodes table block */ 159 __le32 bg_inode_table_lo; /* Inodes table block */
159 __le16 bg_free_blocks_count; /* Free blocks count */ 160 __le16 bg_free_blocks_count_lo;/* Free blocks count */
160 __le16 bg_free_inodes_count; /* Free inodes count */ 161 __le16 bg_free_inodes_count_lo;/* Free inodes count */
161 __le16 bg_used_dirs_count; /* Directories count */ 162 __le16 bg_used_dirs_count_lo; /* Directories count */
162 __le16 bg_flags; /* EXT4_BG_flags (INODE_UNINIT, etc) */ 163 __le16 bg_flags; /* EXT4_BG_flags (INODE_UNINIT, etc) */
163 __u32 bg_reserved[2]; /* Likely block/inode bitmap checksum */ 164 __u32 bg_reserved[2]; /* Likely block/inode bitmap checksum */
164 __le16 bg_itable_unused; /* Unused inodes count */ 165 __le16 bg_itable_unused_lo; /* Unused inodes count */
165 __le16 bg_checksum; /* crc16(sb_uuid+group+desc) */ 166 __le16 bg_checksum; /* crc16(sb_uuid+group+desc) */
166 __le32 bg_block_bitmap_hi; /* Blocks bitmap block MSB */ 167 __le32 bg_block_bitmap_hi; /* Blocks bitmap block MSB */
167 __le32 bg_inode_bitmap_hi; /* Inodes bitmap block MSB */ 168 __le32 bg_inode_bitmap_hi; /* Inodes bitmap block MSB */
@@ -169,7 +170,7 @@ struct ext4_group_desc
169 __le16 bg_free_blocks_count_hi;/* Free blocks count MSB */ 170 __le16 bg_free_blocks_count_hi;/* Free blocks count MSB */
170 __le16 bg_free_inodes_count_hi;/* Free inodes count MSB */ 171 __le16 bg_free_inodes_count_hi;/* Free inodes count MSB */
171 __le16 bg_used_dirs_count_hi; /* Directories count MSB */ 172 __le16 bg_used_dirs_count_hi; /* Directories count MSB */
172 __le16 bg_itable_unused_hi; /* Unused inodes count MSB */ 173 __le16 bg_itable_unused_hi; /* Unused inodes count MSB */
173 __u32 bg_reserved2[3]; 174 __u32 bg_reserved2[3];
174}; 175};
175 176
@@ -328,6 +329,7 @@ struct ext4_mount_options {
328 uid_t s_resuid; 329 uid_t s_resuid;
329 gid_t s_resgid; 330 gid_t s_resgid;
330 unsigned long s_commit_interval; 331 unsigned long s_commit_interval;
332 u32 s_min_batch_time, s_max_batch_time;
331#ifdef CONFIG_QUOTA 333#ifdef CONFIG_QUOTA
332 int s_jquota_fmt; 334 int s_jquota_fmt;
333 char *s_qf_names[MAXQUOTAS]; 335 char *s_qf_names[MAXQUOTAS];
@@ -534,7 +536,6 @@ do { \
534#define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */ 536#define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */
535#define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */ 537#define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */
536#define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ 538#define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
537#define EXT4_MOUNT_EXTENTS 0x400000 /* Extents support */
538#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ 539#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */
539#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ 540#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */
540#define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */ 541#define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */
@@ -726,11 +727,11 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
726 */ 727 */
727 728
728#define EXT4_HAS_COMPAT_FEATURE(sb,mask) \ 729#define EXT4_HAS_COMPAT_FEATURE(sb,mask) \
729 (EXT4_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask)) 730 ((EXT4_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask)) != 0)
730#define EXT4_HAS_RO_COMPAT_FEATURE(sb,mask) \ 731#define EXT4_HAS_RO_COMPAT_FEATURE(sb,mask) \
731 (EXT4_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask)) 732 ((EXT4_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask)) != 0)
732#define EXT4_HAS_INCOMPAT_FEATURE(sb,mask) \ 733#define EXT4_HAS_INCOMPAT_FEATURE(sb,mask) \
733 (EXT4_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask)) 734 ((EXT4_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask)) != 0)
734#define EXT4_SET_COMPAT_FEATURE(sb,mask) \ 735#define EXT4_SET_COMPAT_FEATURE(sb,mask) \
735 EXT4_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask) 736 EXT4_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
736#define EXT4_SET_RO_COMPAT_FEATURE(sb,mask) \ 737#define EXT4_SET_RO_COMPAT_FEATURE(sb,mask) \
@@ -806,6 +807,12 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
806#define EXT4_DEFM_JMODE_WBACK 0x0060 807#define EXT4_DEFM_JMODE_WBACK 0x0060
807 808
808/* 809/*
810 * Default journal batch times
811 */
812#define EXT4_DEF_MIN_BATCH_TIME 0
813#define EXT4_DEF_MAX_BATCH_TIME 15000 /* 15ms */
814
815/*
809 * Structure of a directory entry 816 * Structure of a directory entry
810 */ 817 */
811#define EXT4_NAME_LEN 255 818#define EXT4_NAME_LEN 255
@@ -891,6 +898,9 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len)
891#define DX_HASH_LEGACY 0 898#define DX_HASH_LEGACY 0
892#define DX_HASH_HALF_MD4 1 899#define DX_HASH_HALF_MD4 1
893#define DX_HASH_TEA 2 900#define DX_HASH_TEA 2
901#define DX_HASH_LEGACY_UNSIGNED 3
902#define DX_HASH_HALF_MD4_UNSIGNED 4
903#define DX_HASH_TEA_UNSIGNED 5
894 904
895#ifdef __KERNEL__ 905#ifdef __KERNEL__
896 906
@@ -955,7 +965,7 @@ ext4_group_first_block_no(struct super_block *sb, ext4_group_t group_no)
955#define ERR_BAD_DX_DIR -75000 965#define ERR_BAD_DX_DIR -75000
956 966
957void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, 967void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
958 unsigned long *blockgrpp, ext4_grpblk_t *offsetp); 968 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp);
959 969
960extern struct proc_dir_entry *ext4_proc_root; 970extern struct proc_dir_entry *ext4_proc_root;
961 971
@@ -987,6 +997,9 @@ do { \
987# define ATTRIB_NORET __attribute__((noreturn)) 997# define ATTRIB_NORET __attribute__((noreturn))
988# define NORET_AND noreturn, 998# define NORET_AND noreturn,
989 999
1000/* bitmap.c */
1001extern unsigned int ext4_count_free(struct buffer_head *, unsigned);
1002
990/* balloc.c */ 1003/* balloc.c */
991extern unsigned int ext4_block_group(struct super_block *sb, 1004extern unsigned int ext4_block_group(struct super_block *sb,
992 ext4_fsblk_t blocknr); 1005 ext4_fsblk_t blocknr);
@@ -995,20 +1008,14 @@ extern ext4_grpblk_t ext4_block_group_offset(struct super_block *sb,
995extern int ext4_bg_has_super(struct super_block *sb, ext4_group_t group); 1008extern int ext4_bg_has_super(struct super_block *sb, ext4_group_t group);
996extern unsigned long ext4_bg_num_gdb(struct super_block *sb, 1009extern unsigned long ext4_bg_num_gdb(struct super_block *sb,
997 ext4_group_t group); 1010 ext4_group_t group);
998extern ext4_fsblk_t ext4_new_meta_block(handle_t *handle, struct inode *inode,
999 ext4_fsblk_t goal, int *errp);
1000extern ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, 1011extern ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
1001 ext4_fsblk_t goal, unsigned long *count, int *errp); 1012 ext4_fsblk_t goal, unsigned long *count, int *errp);
1002extern ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode,
1003 ext4_lblk_t iblock, ext4_fsblk_t goal,
1004 unsigned long *count, int *errp);
1005extern int ext4_claim_free_blocks(struct ext4_sb_info *sbi, s64 nblocks); 1013extern int ext4_claim_free_blocks(struct ext4_sb_info *sbi, s64 nblocks);
1006extern int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks); 1014extern int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks);
1007extern void ext4_free_blocks(handle_t *handle, struct inode *inode, 1015extern void ext4_free_blocks(handle_t *handle, struct inode *inode,
1008 ext4_fsblk_t block, unsigned long count, int metadata); 1016 ext4_fsblk_t block, unsigned long count, int metadata);
1009extern void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb, 1017extern void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
1010 ext4_fsblk_t block, unsigned long count, 1018 ext4_fsblk_t block, unsigned long count);
1011 unsigned long *pdquot_freed_blocks);
1012extern ext4_fsblk_t ext4_count_free_blocks(struct super_block *); 1019extern ext4_fsblk_t ext4_count_free_blocks(struct super_block *);
1013extern void ext4_check_blocks_bitmap(struct super_block *); 1020extern void ext4_check_blocks_bitmap(struct super_block *);
1014extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb, 1021extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
@@ -1019,7 +1026,7 @@ extern int ext4_should_retry_alloc(struct super_block *sb, int *retries);
1019/* dir.c */ 1026/* dir.c */
1020extern int ext4_check_dir_entry(const char *, struct inode *, 1027extern int ext4_check_dir_entry(const char *, struct inode *,
1021 struct ext4_dir_entry_2 *, 1028 struct ext4_dir_entry_2 *,
1022 struct buffer_head *, unsigned long); 1029 struct buffer_head *, unsigned int);
1023extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash, 1030extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
1024 __u32 minor_hash, 1031 __u32 minor_hash,
1025 struct ext4_dir_entry_2 *dirent); 1032 struct ext4_dir_entry_2 *dirent);
@@ -1039,7 +1046,6 @@ extern struct inode * ext4_orphan_get(struct super_block *, unsigned long);
1039extern unsigned long ext4_count_free_inodes(struct super_block *); 1046extern unsigned long ext4_count_free_inodes(struct super_block *);
1040extern unsigned long ext4_count_dirs(struct super_block *); 1047extern unsigned long ext4_count_dirs(struct super_block *);
1041extern void ext4_check_inodes_bitmap(struct super_block *); 1048extern void ext4_check_inodes_bitmap(struct super_block *);
1042extern unsigned long ext4_count_free(struct buffer_head *, unsigned);
1043 1049
1044/* mballoc.c */ 1050/* mballoc.c */
1045extern long ext4_mb_stats; 1051extern long ext4_mb_stats;
@@ -1054,12 +1060,13 @@ extern int __init init_ext4_mballoc(void);
1054extern void exit_ext4_mballoc(void); 1060extern void exit_ext4_mballoc(void);
1055extern void ext4_mb_free_blocks(handle_t *, struct inode *, 1061extern void ext4_mb_free_blocks(handle_t *, struct inode *,
1056 unsigned long, unsigned long, int, unsigned long *); 1062 unsigned long, unsigned long, int, unsigned long *);
1057extern int ext4_mb_add_more_groupinfo(struct super_block *sb, 1063extern int ext4_mb_add_groupinfo(struct super_block *sb,
1058 ext4_group_t i, struct ext4_group_desc *desc); 1064 ext4_group_t i, struct ext4_group_desc *desc);
1059extern void ext4_mb_update_group_info(struct ext4_group_info *grp, 1065extern void ext4_mb_update_group_info(struct ext4_group_info *grp,
1060 ext4_grpblk_t add); 1066 ext4_grpblk_t add);
1061 1067extern int ext4_mb_get_buddy_cache_lock(struct super_block *, ext4_group_t);
1062 1068extern void ext4_mb_put_buddy_cache_lock(struct super_block *,
1069 ext4_group_t, int);
1063/* inode.c */ 1070/* inode.c */
1064int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode, 1071int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
1065 struct buffer_head *bh, ext4_fsblk_t blocknr); 1072 struct buffer_head *bh, ext4_fsblk_t blocknr);
@@ -1069,10 +1076,6 @@ struct buffer_head *ext4_bread(handle_t *, struct inode *,
1069 ext4_lblk_t, int, int *); 1076 ext4_lblk_t, int, int *);
1070int ext4_get_block(struct inode *inode, sector_t iblock, 1077int ext4_get_block(struct inode *inode, sector_t iblock,
1071 struct buffer_head *bh_result, int create); 1078 struct buffer_head *bh_result, int create);
1072int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
1073 ext4_lblk_t iblock, unsigned long maxblocks,
1074 struct buffer_head *bh_result,
1075 int create, int extend_disksize);
1076 1079
1077extern struct inode *ext4_iget(struct super_block *, unsigned long); 1080extern struct inode *ext4_iget(struct super_block *, unsigned long);
1078extern int ext4_write_inode(struct inode *, int); 1081extern int ext4_write_inode(struct inode *, int);
@@ -1123,6 +1126,9 @@ extern void ext4_abort(struct super_block *, const char *, const char *, ...)
1123 __attribute__ ((format (printf, 3, 4))); 1126 __attribute__ ((format (printf, 3, 4)));
1124extern void ext4_warning(struct super_block *, const char *, const char *, ...) 1127extern void ext4_warning(struct super_block *, const char *, const char *, ...)
1125 __attribute__ ((format (printf, 3, 4))); 1128 __attribute__ ((format (printf, 3, 4)));
1129extern void ext4_grp_locked_error(struct super_block *, ext4_group_t,
1130 const char *, const char *, ...)
1131 __attribute__ ((format (printf, 4, 5)));
1126extern void ext4_update_dynamic_rev(struct super_block *sb); 1132extern void ext4_update_dynamic_rev(struct super_block *sb);
1127extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb, 1133extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb,
1128 __u32 compat); 1134 __u32 compat);
@@ -1136,12 +1142,28 @@ extern ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
1136 struct ext4_group_desc *bg); 1142 struct ext4_group_desc *bg);
1137extern ext4_fsblk_t ext4_inode_table(struct super_block *sb, 1143extern ext4_fsblk_t ext4_inode_table(struct super_block *sb,
1138 struct ext4_group_desc *bg); 1144 struct ext4_group_desc *bg);
1145extern __u32 ext4_free_blks_count(struct super_block *sb,
1146 struct ext4_group_desc *bg);
1147extern __u32 ext4_free_inodes_count(struct super_block *sb,
1148 struct ext4_group_desc *bg);
1149extern __u32 ext4_used_dirs_count(struct super_block *sb,
1150 struct ext4_group_desc *bg);
1151extern __u32 ext4_itable_unused_count(struct super_block *sb,
1152 struct ext4_group_desc *bg);
1139extern void ext4_block_bitmap_set(struct super_block *sb, 1153extern void ext4_block_bitmap_set(struct super_block *sb,
1140 struct ext4_group_desc *bg, ext4_fsblk_t blk); 1154 struct ext4_group_desc *bg, ext4_fsblk_t blk);
1141extern void ext4_inode_bitmap_set(struct super_block *sb, 1155extern void ext4_inode_bitmap_set(struct super_block *sb,
1142 struct ext4_group_desc *bg, ext4_fsblk_t blk); 1156 struct ext4_group_desc *bg, ext4_fsblk_t blk);
1143extern void ext4_inode_table_set(struct super_block *sb, 1157extern void ext4_inode_table_set(struct super_block *sb,
1144 struct ext4_group_desc *bg, ext4_fsblk_t blk); 1158 struct ext4_group_desc *bg, ext4_fsblk_t blk);
1159extern void ext4_free_blks_set(struct super_block *sb,
1160 struct ext4_group_desc *bg, __u32 count);
1161extern void ext4_free_inodes_set(struct super_block *sb,
1162 struct ext4_group_desc *bg, __u32 count);
1163extern void ext4_used_dirs_set(struct super_block *sb,
1164 struct ext4_group_desc *bg, __u32 count);
1165extern void ext4_itable_unused_set(struct super_block *sb,
1166 struct ext4_group_desc *bg, __u32 count);
1145 1167
1146static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es) 1168static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
1147{ 1169{
@@ -1246,6 +1268,50 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
1246 return ; 1268 return ;
1247} 1269}
1248 1270
1271struct ext4_group_info {
1272 unsigned long bb_state;
1273 struct rb_root bb_free_root;
1274 unsigned short bb_first_free;
1275 unsigned short bb_free;
1276 unsigned short bb_fragments;
1277 struct list_head bb_prealloc_list;
1278#ifdef DOUBLE_CHECK
1279 void *bb_bitmap;
1280#endif
1281 struct rw_semaphore alloc_sem;
1282 unsigned short bb_counters[];
1283};
1284
1285#define EXT4_GROUP_INFO_NEED_INIT_BIT 0
1286#define EXT4_GROUP_INFO_LOCKED_BIT 1
1287
1288#define EXT4_MB_GRP_NEED_INIT(grp) \
1289 (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
1290
1291static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
1292{
1293 struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
1294
1295 bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
1296}
1297
1298static inline void ext4_unlock_group(struct super_block *sb,
1299 ext4_group_t group)
1300{
1301 struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
1302
1303 bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
1304}
1305
1306static inline int ext4_is_group_locked(struct super_block *sb,
1307 ext4_group_t group)
1308{
1309 struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
1310
1311 return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
1312 &(grinfo->bb_state));
1313}
1314
1249/* 1315/*
1250 * Inodes and files operations 1316 * Inodes and files operations
1251 */ 1317 */
@@ -1271,18 +1337,38 @@ extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
1271extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, 1337extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks,
1272 int chunk); 1338 int chunk);
1273extern int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, 1339extern int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
1274 ext4_lblk_t iblock, 1340 ext4_lblk_t iblock, unsigned int max_blocks,
1275 unsigned long max_blocks, struct buffer_head *bh_result, 1341 struct buffer_head *bh_result,
1276 int create, int extend_disksize); 1342 int create, int extend_disksize);
1277extern void ext4_ext_truncate(struct inode *); 1343extern void ext4_ext_truncate(struct inode *);
1278extern void ext4_ext_init(struct super_block *); 1344extern void ext4_ext_init(struct super_block *);
1279extern void ext4_ext_release(struct super_block *); 1345extern void ext4_ext_release(struct super_block *);
1280extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset, 1346extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
1281 loff_t len); 1347 loff_t len);
1282extern int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, 1348extern int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode,
1283 sector_t block, unsigned long max_blocks, 1349 sector_t block, unsigned int max_blocks,
1284 struct buffer_head *bh, int create, 1350 struct buffer_head *bh, int create,
1285 int extend_disksize, int flag); 1351 int extend_disksize, int flag);
1352extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1353 __u64 start, __u64 len);
1354
1355/*
1356 * Add new method to test wether block and inode bitmaps are properly
1357 * initialized. With uninit_bg reading the block from disk is not enough
1358 * to mark the bitmap uptodate. We need to also zero-out the bitmap
1359 */
1360#define BH_BITMAP_UPTODATE BH_JBDPrivateStart
1361
1362static inline int bitmap_uptodate(struct buffer_head *bh)
1363{
1364 return (buffer_uptodate(bh) &&
1365 test_bit(BH_BITMAP_UPTODATE, &(bh)->b_state));
1366}
1367static inline void set_bitmap_uptodate(struct buffer_head *bh)
1368{
1369 set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state);
1370}
1371
1286#endif /* __KERNEL__ */ 1372#endif /* __KERNEL__ */
1287 1373
1288#endif /* _EXT4_H */ 1374#endif /* _EXT4_H */
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index bec7ce59fc0d..18cb67b2cbbc 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -194,11 +194,6 @@ static inline unsigned short ext_depth(struct inode *inode)
194 return le16_to_cpu(ext_inode_hdr(inode)->eh_depth); 194 return le16_to_cpu(ext_inode_hdr(inode)->eh_depth);
195} 195}
196 196
197static inline void ext4_ext_tree_changed(struct inode *inode)
198{
199 EXT4_I(inode)->i_ext_generation++;
200}
201
202static inline void 197static inline void
203ext4_ext_invalidate_cache(struct inode *inode) 198ext4_ext_invalidate_cache(struct inode *inode)
204{ 199{
diff --git a/fs/ext4/ext4_i.h b/fs/ext4/ext4_i.h
index 5c124c0ac6d3..e69acc16f5c4 100644
--- a/fs/ext4/ext4_i.h
+++ b/fs/ext4/ext4_i.h
@@ -31,7 +31,7 @@ typedef unsigned long long ext4_fsblk_t;
31typedef __u32 ext4_lblk_t; 31typedef __u32 ext4_lblk_t;
32 32
33/* data type for block group number */ 33/* data type for block group number */
34typedef unsigned long ext4_group_t; 34typedef unsigned int ext4_group_t;
35 35
36#define rsv_start rsv_window._rsv_start 36#define rsv_start rsv_window._rsv_start
37#define rsv_end rsv_window._rsv_end 37#define rsv_end rsv_window._rsv_end
@@ -100,9 +100,6 @@ struct ext4_inode_info {
100 */ 100 */
101 loff_t i_disksize; 101 loff_t i_disksize;
102 102
103 /* on-disk additional length */
104 __u16 i_extra_isize;
105
106 /* 103 /*
107 * i_data_sem is for serialising ext4_truncate() against 104 * i_data_sem is for serialising ext4_truncate() against
108 * ext4_getblock(). In the 2.4 ext2 design, great chunks of inode's 105 * ext4_getblock(). In the 2.4 ext2 design, great chunks of inode's
@@ -117,7 +114,6 @@ struct ext4_inode_info {
117 struct inode vfs_inode; 114 struct inode vfs_inode;
118 struct jbd2_inode jinode; 115 struct jbd2_inode jinode;
119 116
120 unsigned long i_ext_generation;
121 struct ext4_ext_cache i_cached_extent; 117 struct ext4_ext_cache i_cached_extent;
122 /* 118 /*
123 * File creation time. Its function is same as that of 119 * File creation time. Its function is same as that of
@@ -130,10 +126,14 @@ struct ext4_inode_info {
130 spinlock_t i_prealloc_lock; 126 spinlock_t i_prealloc_lock;
131 127
132 /* allocation reservation info for delalloc */ 128 /* allocation reservation info for delalloc */
133 unsigned long i_reserved_data_blocks; 129 unsigned int i_reserved_data_blocks;
134 unsigned long i_reserved_meta_blocks; 130 unsigned int i_reserved_meta_blocks;
135 unsigned long i_allocated_meta_blocks; 131 unsigned int i_allocated_meta_blocks;
136 unsigned short i_delalloc_reserved_flag; 132 unsigned short i_delalloc_reserved_flag;
133
134 /* on-disk additional length */
135 __u16 i_extra_isize;
136
137 spinlock_t i_block_reservation_lock; 137 spinlock_t i_block_reservation_lock;
138}; 138};
139 139
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index c75384b34f2c..ad13a84644e1 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -7,53 +7,96 @@
7int __ext4_journal_get_undo_access(const char *where, handle_t *handle, 7int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
8 struct buffer_head *bh) 8 struct buffer_head *bh)
9{ 9{
10 int err = jbd2_journal_get_undo_access(handle, bh); 10 int err = 0;
11 if (err) 11
12 ext4_journal_abort_handle(where, __func__, bh, handle, err); 12 if (ext4_handle_valid(handle)) {
13 err = jbd2_journal_get_undo_access(handle, bh);
14 if (err)
15 ext4_journal_abort_handle(where, __func__, bh,
16 handle, err);
17 }
13 return err; 18 return err;
14} 19}
15 20
16int __ext4_journal_get_write_access(const char *where, handle_t *handle, 21int __ext4_journal_get_write_access(const char *where, handle_t *handle,
17 struct buffer_head *bh) 22 struct buffer_head *bh)
18{ 23{
19 int err = jbd2_journal_get_write_access(handle, bh); 24 int err = 0;
20 if (err) 25
21 ext4_journal_abort_handle(where, __func__, bh, handle, err); 26 if (ext4_handle_valid(handle)) {
27 err = jbd2_journal_get_write_access(handle, bh);
28 if (err)
29 ext4_journal_abort_handle(where, __func__, bh,
30 handle, err);
31 }
22 return err; 32 return err;
23} 33}
24 34
25int __ext4_journal_forget(const char *where, handle_t *handle, 35int __ext4_journal_forget(const char *where, handle_t *handle,
26 struct buffer_head *bh) 36 struct buffer_head *bh)
27{ 37{
28 int err = jbd2_journal_forget(handle, bh); 38 int err = 0;
29 if (err) 39
30 ext4_journal_abort_handle(where, __func__, bh, handle, err); 40 if (ext4_handle_valid(handle)) {
41 err = jbd2_journal_forget(handle, bh);
42 if (err)
43 ext4_journal_abort_handle(where, __func__, bh,
44 handle, err);
45 }
31 return err; 46 return err;
32} 47}
33 48
34int __ext4_journal_revoke(const char *where, handle_t *handle, 49int __ext4_journal_revoke(const char *where, handle_t *handle,
35 ext4_fsblk_t blocknr, struct buffer_head *bh) 50 ext4_fsblk_t blocknr, struct buffer_head *bh)
36{ 51{
37 int err = jbd2_journal_revoke(handle, blocknr, bh); 52 int err = 0;
38 if (err) 53
39 ext4_journal_abort_handle(where, __func__, bh, handle, err); 54 if (ext4_handle_valid(handle)) {
55 err = jbd2_journal_revoke(handle, blocknr, bh);
56 if (err)
57 ext4_journal_abort_handle(where, __func__, bh,
58 handle, err);
59 }
40 return err; 60 return err;
41} 61}
42 62
43int __ext4_journal_get_create_access(const char *where, 63int __ext4_journal_get_create_access(const char *where,
44 handle_t *handle, struct buffer_head *bh) 64 handle_t *handle, struct buffer_head *bh)
45{ 65{
46 int err = jbd2_journal_get_create_access(handle, bh); 66 int err = 0;
47 if (err) 67
48 ext4_journal_abort_handle(where, __func__, bh, handle, err); 68 if (ext4_handle_valid(handle)) {
69 err = jbd2_journal_get_create_access(handle, bh);
70 if (err)
71 ext4_journal_abort_handle(where, __func__, bh,
72 handle, err);
73 }
49 return err; 74 return err;
50} 75}
51 76
52int __ext4_journal_dirty_metadata(const char *where, 77int __ext4_handle_dirty_metadata(const char *where, handle_t *handle,
53 handle_t *handle, struct buffer_head *bh) 78 struct inode *inode, struct buffer_head *bh)
54{ 79{
55 int err = jbd2_journal_dirty_metadata(handle, bh); 80 int err = 0;
56 if (err) 81
57 ext4_journal_abort_handle(where, __func__, bh, handle, err); 82 if (ext4_handle_valid(handle)) {
83 err = jbd2_journal_dirty_metadata(handle, bh);
84 if (err)
85 ext4_journal_abort_handle(where, __func__, bh,
86 handle, err);
87 } else {
88 mark_buffer_dirty(bh);
89 if (inode && inode_needs_sync(inode)) {
90 sync_dirty_buffer(bh);
91 if (buffer_req(bh) && !buffer_uptodate(bh)) {
92 ext4_error(inode->i_sb, __func__,
93 "IO error syncing inode, "
94 "inode=%lu, block=%llu",
95 inode->i_ino,
96 (unsigned long long) bh->b_blocknr);
97 err = -EIO;
98 }
99 }
100 }
58 return err; 101 return err;
59} 102}
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index b455c685a98b..be2f426f6805 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -32,8 +32,8 @@
32 * 5 levels of tree + root which are stored in the inode. */ 32 * 5 levels of tree + root which are stored in the inode. */
33 33
34#define EXT4_SINGLEDATA_TRANS_BLOCKS(sb) \ 34#define EXT4_SINGLEDATA_TRANS_BLOCKS(sb) \
35 (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS) \ 35 (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS) \
36 || test_opt(sb, EXTENTS) ? 27U : 8U) 36 ? 27U : 8U)
37 37
38/* Extended attribute operations touch at most two data buffers, 38/* Extended attribute operations touch at most two data buffers,
39 * two bitmap buffers, and two group summaries, in addition to the inode 39 * two bitmap buffers, and two group summaries, in addition to the inode
@@ -122,12 +122,6 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode);
122 * been done yet. 122 * been done yet.
123 */ 123 */
124 124
125static inline void ext4_journal_release_buffer(handle_t *handle,
126 struct buffer_head *bh)
127{
128 jbd2_journal_release_buffer(handle, bh);
129}
130
131void ext4_journal_abort_handle(const char *caller, const char *err_fn, 125void ext4_journal_abort_handle(const char *caller, const char *err_fn,
132 struct buffer_head *bh, handle_t *handle, int err); 126 struct buffer_head *bh, handle_t *handle, int err);
133 127
@@ -146,8 +140,8 @@ int __ext4_journal_revoke(const char *where, handle_t *handle,
146int __ext4_journal_get_create_access(const char *where, 140int __ext4_journal_get_create_access(const char *where,
147 handle_t *handle, struct buffer_head *bh); 141 handle_t *handle, struct buffer_head *bh);
148 142
149int __ext4_journal_dirty_metadata(const char *where, 143int __ext4_handle_dirty_metadata(const char *where, handle_t *handle,
150 handle_t *handle, struct buffer_head *bh); 144 struct inode *inode, struct buffer_head *bh);
151 145
152#define ext4_journal_get_undo_access(handle, bh) \ 146#define ext4_journal_get_undo_access(handle, bh) \
153 __ext4_journal_get_undo_access(__func__, (handle), (bh)) 147 __ext4_journal_get_undo_access(__func__, (handle), (bh))
@@ -157,14 +151,57 @@ int __ext4_journal_dirty_metadata(const char *where,
157 __ext4_journal_revoke(__func__, (handle), (blocknr), (bh)) 151 __ext4_journal_revoke(__func__, (handle), (blocknr), (bh))
158#define ext4_journal_get_create_access(handle, bh) \ 152#define ext4_journal_get_create_access(handle, bh) \
159 __ext4_journal_get_create_access(__func__, (handle), (bh)) 153 __ext4_journal_get_create_access(__func__, (handle), (bh))
160#define ext4_journal_dirty_metadata(handle, bh) \
161 __ext4_journal_dirty_metadata(__func__, (handle), (bh))
162#define ext4_journal_forget(handle, bh) \ 154#define ext4_journal_forget(handle, bh) \
163 __ext4_journal_forget(__func__, (handle), (bh)) 155 __ext4_journal_forget(__func__, (handle), (bh))
156#define ext4_handle_dirty_metadata(handle, inode, bh) \
157 __ext4_handle_dirty_metadata(__func__, (handle), (inode), (bh))
164 158
165handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks); 159handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks);
166int __ext4_journal_stop(const char *where, handle_t *handle); 160int __ext4_journal_stop(const char *where, handle_t *handle);
167 161
162#define EXT4_NOJOURNAL_HANDLE ((handle_t *) 0x1)
163
164static inline int ext4_handle_valid(handle_t *handle)
165{
166 if (handle == EXT4_NOJOURNAL_HANDLE)
167 return 0;
168 return 1;
169}
170
171static inline void ext4_handle_sync(handle_t *handle)
172{
173 if (ext4_handle_valid(handle))
174 handle->h_sync = 1;
175}
176
177static inline void ext4_handle_release_buffer(handle_t *handle,
178 struct buffer_head *bh)
179{
180 if (ext4_handle_valid(handle))
181 jbd2_journal_release_buffer(handle, bh);
182}
183
184static inline int ext4_handle_is_aborted(handle_t *handle)
185{
186 if (ext4_handle_valid(handle))
187 return is_handle_aborted(handle);
188 return 0;
189}
190
191static inline int ext4_handle_has_enough_credits(handle_t *handle, int needed)
192{
193 if (ext4_handle_valid(handle) && handle->h_buffer_credits < needed)
194 return 0;
195 return 1;
196}
197
198static inline void ext4_journal_release_buffer(handle_t *handle,
199 struct buffer_head *bh)
200{
201 if (ext4_handle_valid(handle))
202 jbd2_journal_release_buffer(handle, bh);
203}
204
168static inline handle_t *ext4_journal_start(struct inode *inode, int nblocks) 205static inline handle_t *ext4_journal_start(struct inode *inode, int nblocks)
169{ 206{
170 return ext4_journal_start_sb(inode->i_sb, nblocks); 207 return ext4_journal_start_sb(inode->i_sb, nblocks);
@@ -180,27 +217,37 @@ static inline handle_t *ext4_journal_current_handle(void)
180 217
181static inline int ext4_journal_extend(handle_t *handle, int nblocks) 218static inline int ext4_journal_extend(handle_t *handle, int nblocks)
182{ 219{
183 return jbd2_journal_extend(handle, nblocks); 220 if (ext4_handle_valid(handle))
221 return jbd2_journal_extend(handle, nblocks);
222 return 0;
184} 223}
185 224
186static inline int ext4_journal_restart(handle_t *handle, int nblocks) 225static inline int ext4_journal_restart(handle_t *handle, int nblocks)
187{ 226{
188 return jbd2_journal_restart(handle, nblocks); 227 if (ext4_handle_valid(handle))
228 return jbd2_journal_restart(handle, nblocks);
229 return 0;
189} 230}
190 231
191static inline int ext4_journal_blocks_per_page(struct inode *inode) 232static inline int ext4_journal_blocks_per_page(struct inode *inode)
192{ 233{
193 return jbd2_journal_blocks_per_page(inode); 234 if (EXT4_JOURNAL(inode) != NULL)
235 return jbd2_journal_blocks_per_page(inode);
236 return 0;
194} 237}
195 238
196static inline int ext4_journal_force_commit(journal_t *journal) 239static inline int ext4_journal_force_commit(journal_t *journal)
197{ 240{
198 return jbd2_journal_force_commit(journal); 241 if (journal)
242 return jbd2_journal_force_commit(journal);
243 return 0;
199} 244}
200 245
201static inline int ext4_jbd2_file_inode(handle_t *handle, struct inode *inode) 246static inline int ext4_jbd2_file_inode(handle_t *handle, struct inode *inode)
202{ 247{
203 return jbd2_journal_file_inode(handle, &EXT4_I(inode)->jinode); 248 if (ext4_handle_valid(handle))
249 return jbd2_journal_file_inode(handle, &EXT4_I(inode)->jinode);
250 return 0;
204} 251}
205 252
206/* super.c */ 253/* super.c */
@@ -208,6 +255,8 @@ int ext4_force_commit(struct super_block *sb);
208 255
209static inline int ext4_should_journal_data(struct inode *inode) 256static inline int ext4_should_journal_data(struct inode *inode)
210{ 257{
258 if (EXT4_JOURNAL(inode) == NULL)
259 return 0;
211 if (!S_ISREG(inode->i_mode)) 260 if (!S_ISREG(inode->i_mode))
212 return 1; 261 return 1;
213 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) 262 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
@@ -219,6 +268,8 @@ static inline int ext4_should_journal_data(struct inode *inode)
219 268
220static inline int ext4_should_order_data(struct inode *inode) 269static inline int ext4_should_order_data(struct inode *inode)
221{ 270{
271 if (EXT4_JOURNAL(inode) == NULL)
272 return 0;
222 if (!S_ISREG(inode->i_mode)) 273 if (!S_ISREG(inode->i_mode))
223 return 0; 274 return 0;
224 if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL) 275 if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
@@ -230,6 +281,8 @@ static inline int ext4_should_order_data(struct inode *inode)
230 281
231static inline int ext4_should_writeback_data(struct inode *inode) 282static inline int ext4_should_writeback_data(struct inode *inode)
232{ 283{
284 if (EXT4_JOURNAL(inode) == NULL)
285 return 0;
233 if (!S_ISREG(inode->i_mode)) 286 if (!S_ISREG(inode->i_mode))
234 return 0; 287 return 0;
235 if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL) 288 if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
diff --git a/fs/ext4/ext4_sb.h b/fs/ext4/ext4_sb.h
index b21f16713db0..039b6ea1a042 100644
--- a/fs/ext4/ext4_sb.h
+++ b/fs/ext4/ext4_sb.h
@@ -57,6 +57,7 @@ struct ext4_sb_info {
57 u32 s_next_generation; 57 u32 s_next_generation;
58 u32 s_hash_seed[4]; 58 u32 s_hash_seed[4];
59 int s_def_hash_version; 59 int s_def_hash_version;
60 int s_hash_unsigned; /* 3 if hash should be signed, 0 if not */
60 struct percpu_counter s_freeblocks_counter; 61 struct percpu_counter s_freeblocks_counter;
61 struct percpu_counter s_freeinodes_counter; 62 struct percpu_counter s_freeinodes_counter;
62 struct percpu_counter s_dirs_counter; 63 struct percpu_counter s_dirs_counter;
@@ -73,6 +74,8 @@ struct ext4_sb_info {
73 struct journal_s *s_journal; 74 struct journal_s *s_journal;
74 struct list_head s_orphan; 75 struct list_head s_orphan;
75 unsigned long s_commit_interval; 76 unsigned long s_commit_interval;
77 u32 s_max_batch_time;
78 u32 s_min_batch_time;
76 struct block_device *journal_bdev; 79 struct block_device *journal_bdev;
77#ifdef CONFIG_JBD2_DEBUG 80#ifdef CONFIG_JBD2_DEBUG
78 struct timer_list turn_ro_timer; /* For turning read-only (crash simulation) */ 81 struct timer_list turn_ro_timer; /* For turning read-only (crash simulation) */
@@ -101,7 +104,8 @@ struct ext4_sb_info {
101 spinlock_t s_reserve_lock; 104 spinlock_t s_reserve_lock;
102 spinlock_t s_md_lock; 105 spinlock_t s_md_lock;
103 tid_t s_last_transaction; 106 tid_t s_last_transaction;
104 unsigned short *s_mb_offsets, *s_mb_maxs; 107 unsigned short *s_mb_offsets;
108 unsigned int *s_mb_maxs;
105 109
106 /* tunables */ 110 /* tunables */
107 unsigned long s_stripe; 111 unsigned long s_stripe;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index ea2ce3c0ae66..54bf0623a9ae 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -97,6 +97,8 @@ static int ext4_ext_journal_restart(handle_t *handle, int needed)
97{ 97{
98 int err; 98 int err;
99 99
100 if (!ext4_handle_valid(handle))
101 return 0;
100 if (handle->h_buffer_credits > needed) 102 if (handle->h_buffer_credits > needed)
101 return 0; 103 return 0;
102 err = ext4_journal_extend(handle, needed); 104 err = ext4_journal_extend(handle, needed);
@@ -134,7 +136,7 @@ static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
134 int err; 136 int err;
135 if (path->p_bh) { 137 if (path->p_bh) {
136 /* path points to block */ 138 /* path points to block */
137 err = ext4_journal_dirty_metadata(handle, path->p_bh); 139 err = ext4_handle_dirty_metadata(handle, inode, path->p_bh);
138 } else { 140 } else {
139 /* path points to leaf/index in inode body */ 141 /* path points to leaf/index in inode body */
140 err = ext4_mark_inode_dirty(handle, inode); 142 err = ext4_mark_inode_dirty(handle, inode);
@@ -191,7 +193,7 @@ ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
191 ext4_fsblk_t goal, newblock; 193 ext4_fsblk_t goal, newblock;
192 194
193 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 195 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
194 newblock = ext4_new_meta_block(handle, inode, goal, err); 196 newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err);
195 return newblock; 197 return newblock;
196} 198}
197 199
@@ -780,7 +782,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
780 set_buffer_uptodate(bh); 782 set_buffer_uptodate(bh);
781 unlock_buffer(bh); 783 unlock_buffer(bh);
782 784
783 err = ext4_journal_dirty_metadata(handle, bh); 785 err = ext4_handle_dirty_metadata(handle, inode, bh);
784 if (err) 786 if (err)
785 goto cleanup; 787 goto cleanup;
786 brelse(bh); 788 brelse(bh);
@@ -859,7 +861,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
859 set_buffer_uptodate(bh); 861 set_buffer_uptodate(bh);
860 unlock_buffer(bh); 862 unlock_buffer(bh);
861 863
862 err = ext4_journal_dirty_metadata(handle, bh); 864 err = ext4_handle_dirty_metadata(handle, inode, bh);
863 if (err) 865 if (err)
864 goto cleanup; 866 goto cleanup;
865 brelse(bh); 867 brelse(bh);
@@ -955,7 +957,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
955 set_buffer_uptodate(bh); 957 set_buffer_uptodate(bh);
956 unlock_buffer(bh); 958 unlock_buffer(bh);
957 959
958 err = ext4_journal_dirty_metadata(handle, bh); 960 err = ext4_handle_dirty_metadata(handle, inode, bh);
959 if (err) 961 if (err)
960 goto out; 962 goto out;
961 963
@@ -1160,15 +1162,13 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
1160 while (--depth >= 0) { 1162 while (--depth >= 0) {
1161 ix = path[depth].p_idx; 1163 ix = path[depth].p_idx;
1162 if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) 1164 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1163 break; 1165 goto got_index;
1164 } 1166 }
1165 1167
1166 if (depth < 0) { 1168 /* we've gone up to the root and found no index to the right */
1167 /* we've gone up to the root and 1169 return 0;
1168 * found no index to the right */
1169 return 0;
1170 }
1171 1170
1171got_index:
1172 /* we've found index to the right, let's 1172 /* we've found index to the right, let's
1173 * follow it and find the closest allocated 1173 * follow it and find the closest allocated
1174 * block to the right */ 1174 * block to the right */
@@ -1201,7 +1201,6 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
1201 *phys = ext_pblock(ex); 1201 *phys = ext_pblock(ex);
1202 put_bh(bh); 1202 put_bh(bh);
1203 return 0; 1203 return 0;
1204
1205} 1204}
1206 1205
1207/* 1206/*
@@ -1622,7 +1621,6 @@ cleanup:
1622 ext4_ext_drop_refs(npath); 1621 ext4_ext_drop_refs(npath);
1623 kfree(npath); 1622 kfree(npath);
1624 } 1623 }
1625 ext4_ext_tree_changed(inode);
1626 ext4_ext_invalidate_cache(inode); 1624 ext4_ext_invalidate_cache(inode);
1627 return err; 1625 return err;
1628} 1626}
@@ -2233,7 +2231,6 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
2233 } 2231 }
2234 } 2232 }
2235out: 2233out:
2236 ext4_ext_tree_changed(inode);
2237 ext4_ext_drop_refs(path); 2234 ext4_ext_drop_refs(path);
2238 kfree(path); 2235 kfree(path);
2239 ext4_journal_stop(handle); 2236 ext4_journal_stop(handle);
@@ -2250,7 +2247,7 @@ void ext4_ext_init(struct super_block *sb)
2250 * possible initialization would be here 2247 * possible initialization would be here
2251 */ 2248 */
2252 2249
2253 if (test_opt(sb, EXTENTS)) { 2250 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2254 printk(KERN_INFO "EXT4-fs: file extents enabled"); 2251 printk(KERN_INFO "EXT4-fs: file extents enabled");
2255#ifdef AGGRESSIVE_TEST 2252#ifdef AGGRESSIVE_TEST
2256 printk(", aggressive tests"); 2253 printk(", aggressive tests");
@@ -2275,7 +2272,7 @@ void ext4_ext_init(struct super_block *sb)
2275 */ 2272 */
2276void ext4_ext_release(struct super_block *sb) 2273void ext4_ext_release(struct super_block *sb)
2277{ 2274{
2278 if (!test_opt(sb, EXTENTS)) 2275 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2279 return; 2276 return;
2280 2277
2281#ifdef EXTENTS_STATS 2278#ifdef EXTENTS_STATS
@@ -2380,7 +2377,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2380 struct inode *inode, 2377 struct inode *inode,
2381 struct ext4_ext_path *path, 2378 struct ext4_ext_path *path,
2382 ext4_lblk_t iblock, 2379 ext4_lblk_t iblock,
2383 unsigned long max_blocks) 2380 unsigned int max_blocks)
2384{ 2381{
2385 struct ext4_extent *ex, newex, orig_ex; 2382 struct ext4_extent *ex, newex, orig_ex;
2386 struct ext4_extent *ex1 = NULL; 2383 struct ext4_extent *ex1 = NULL;
@@ -2536,7 +2533,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2536 */ 2533 */
2537 newdepth = ext_depth(inode); 2534 newdepth = ext_depth(inode);
2538 /* 2535 /*
2539 * update the extent length after successfull insert of the 2536 * update the extent length after successful insert of the
2540 * split extent 2537 * split extent
2541 */ 2538 */
2542 orig_ex.ee_len = cpu_to_le16(ee_len - 2539 orig_ex.ee_len = cpu_to_le16(ee_len -
@@ -2678,26 +2675,26 @@ fix_extent_len:
2678 */ 2675 */
2679int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, 2676int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
2680 ext4_lblk_t iblock, 2677 ext4_lblk_t iblock,
2681 unsigned long max_blocks, struct buffer_head *bh_result, 2678 unsigned int max_blocks, struct buffer_head *bh_result,
2682 int create, int extend_disksize) 2679 int create, int extend_disksize)
2683{ 2680{
2684 struct ext4_ext_path *path = NULL; 2681 struct ext4_ext_path *path = NULL;
2685 struct ext4_extent_header *eh; 2682 struct ext4_extent_header *eh;
2686 struct ext4_extent newex, *ex; 2683 struct ext4_extent newex, *ex;
2687 ext4_fsblk_t goal, newblock; 2684 ext4_fsblk_t newblock;
2688 int err = 0, depth, ret; 2685 int err = 0, depth, ret, cache_type;
2689 unsigned long allocated = 0; 2686 unsigned int allocated = 0;
2690 struct ext4_allocation_request ar; 2687 struct ext4_allocation_request ar;
2691 loff_t disksize; 2688 loff_t disksize;
2692 2689
2693 __clear_bit(BH_New, &bh_result->b_state); 2690 __clear_bit(BH_New, &bh_result->b_state);
2694 ext_debug("blocks %u/%lu requested for inode %u\n", 2691 ext_debug("blocks %u/%u requested for inode %u\n",
2695 iblock, max_blocks, inode->i_ino); 2692 iblock, max_blocks, inode->i_ino);
2696 2693
2697 /* check in cache */ 2694 /* check in cache */
2698 goal = ext4_ext_in_cache(inode, iblock, &newex); 2695 cache_type = ext4_ext_in_cache(inode, iblock, &newex);
2699 if (goal) { 2696 if (cache_type) {
2700 if (goal == EXT4_EXT_CACHE_GAP) { 2697 if (cache_type == EXT4_EXT_CACHE_GAP) {
2701 if (!create) { 2698 if (!create) {
2702 /* 2699 /*
2703 * block isn't allocated yet and 2700 * block isn't allocated yet and
@@ -2706,7 +2703,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
2706 goto out2; 2703 goto out2;
2707 } 2704 }
2708 /* we should allocate requested block */ 2705 /* we should allocate requested block */
2709 } else if (goal == EXT4_EXT_CACHE_EXTENT) { 2706 } else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
2710 /* block is already allocated */ 2707 /* block is already allocated */
2711 newblock = iblock 2708 newblock = iblock
2712 - le32_to_cpu(newex.ee_block) 2709 - le32_to_cpu(newex.ee_block)
@@ -2854,7 +2851,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
2854 if (!newblock) 2851 if (!newblock)
2855 goto out2; 2852 goto out2;
2856 ext_debug("allocate new block: goal %llu, found %llu/%lu\n", 2853 ext_debug("allocate new block: goal %llu, found %llu/%lu\n",
2857 goal, newblock, allocated); 2854 ar.goal, newblock, allocated);
2858 2855
2859 /* try to insert new extent into found leaf and return */ 2856 /* try to insert new extent into found leaf and return */
2860 ext4_ext_store_pblock(&newex, newblock); 2857 ext4_ext_store_pblock(&newex, newblock);
@@ -2950,7 +2947,7 @@ void ext4_ext_truncate(struct inode *inode)
2950 * transaction synchronous. 2947 * transaction synchronous.
2951 */ 2948 */
2952 if (IS_SYNC(inode)) 2949 if (IS_SYNC(inode))
2953 handle->h_sync = 1; 2950 ext4_handle_sync(handle);
2954 2951
2955out_stop: 2952out_stop:
2956 up_write(&EXT4_I(inode)->i_data_sem); 2953 up_write(&EXT4_I(inode)->i_data_sem);
@@ -3004,7 +3001,7 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
3004 handle_t *handle; 3001 handle_t *handle;
3005 ext4_lblk_t block; 3002 ext4_lblk_t block;
3006 loff_t new_size; 3003 loff_t new_size;
3007 unsigned long max_blocks; 3004 unsigned int max_blocks;
3008 int ret = 0; 3005 int ret = 0;
3009 int ret2 = 0; 3006 int ret2 = 0;
3010 int retries = 0; 3007 int retries = 0;
@@ -3083,7 +3080,7 @@ retry:
3083/* 3080/*
3084 * Callback function called for each extent to gather FIEMAP information. 3081 * Callback function called for each extent to gather FIEMAP information.
3085 */ 3082 */
3086int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path, 3083static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
3087 struct ext4_ext_cache *newex, struct ext4_extent *ex, 3084 struct ext4_ext_cache *newex, struct ext4_extent *ex,
3088 void *data) 3085 void *data)
3089{ 3086{
@@ -3152,7 +3149,8 @@ int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
3152/* fiemap flags we can handle specified here */ 3149/* fiemap flags we can handle specified here */
3153#define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) 3150#define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
3154 3151
3155int ext4_xattr_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo) 3152static int ext4_xattr_fiemap(struct inode *inode,
3153 struct fiemap_extent_info *fieinfo)
3156{ 3154{
3157 __u64 physical = 0; 3155 __u64 physical = 0;
3158 __u64 length; 3156 __u64 length;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 6bd11fba71f7..f731cb545a03 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -140,9 +140,6 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
140 return 0; 140 return 0;
141} 141}
142 142
143extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
144 __u64 start, __u64 len);
145
146const struct file_operations ext4_file_operations = { 143const struct file_operations ext4_file_operations = {
147 .llseek = generic_file_llseek, 144 .llseek = generic_file_llseek,
148 .read = do_sync_read, 145 .read = do_sync_read,
diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
index 556ca8eba3db..ac8f168c8ab4 100644
--- a/fs/ext4/hash.c
+++ b/fs/ext4/hash.c
@@ -35,23 +35,71 @@ static void TEA_transform(__u32 buf[4], __u32 const in[])
35 35
36 36
37/* The old legacy hash */ 37/* The old legacy hash */
38static __u32 dx_hack_hash(const char *name, int len) 38static __u32 dx_hack_hash_unsigned(const char *name, int len)
39{ 39{
40 __u32 hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; 40 __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
41 const unsigned char *ucp = (const unsigned char *) name;
42
43 while (len--) {
44 hash = hash1 + (hash0 ^ (((int) *ucp++) * 7152373));
45
46 if (hash & 0x80000000)
47 hash -= 0x7fffffff;
48 hash1 = hash0;
49 hash0 = hash;
50 }
51 return hash0 << 1;
52}
53
54static __u32 dx_hack_hash_signed(const char *name, int len)
55{
56 __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
57 const signed char *scp = (const signed char *) name;
58
41 while (len--) { 59 while (len--) {
42 __u32 hash = hash1 + (hash0 ^ (*name++ * 7152373)); 60 hash = hash1 + (hash0 ^ (((int) *scp++) * 7152373));
43 61
44 if (hash & 0x80000000) hash -= 0x7fffffff; 62 if (hash & 0x80000000)
63 hash -= 0x7fffffff;
45 hash1 = hash0; 64 hash1 = hash0;
46 hash0 = hash; 65 hash0 = hash;
47 } 66 }
48 return (hash0 << 1); 67 return hash0 << 1;
68}
69
70static void str2hashbuf_signed(const char *msg, int len, __u32 *buf, int num)
71{
72 __u32 pad, val;
73 int i;
74 const signed char *scp = (const signed char *) msg;
75
76 pad = (__u32)len | ((__u32)len << 8);
77 pad |= pad << 16;
78
79 val = pad;
80 if (len > num*4)
81 len = num * 4;
82 for (i = 0; i < len; i++) {
83 if ((i % 4) == 0)
84 val = pad;
85 val = ((int) scp[i]) + (val << 8);
86 if ((i % 4) == 3) {
87 *buf++ = val;
88 val = pad;
89 num--;
90 }
91 }
92 if (--num >= 0)
93 *buf++ = val;
94 while (--num >= 0)
95 *buf++ = pad;
49} 96}
50 97
51static void str2hashbuf(const char *msg, int len, __u32 *buf, int num) 98static void str2hashbuf_unsigned(const char *msg, int len, __u32 *buf, int num)
52{ 99{
53 __u32 pad, val; 100 __u32 pad, val;
54 int i; 101 int i;
102 const unsigned char *ucp = (const unsigned char *) msg;
55 103
56 pad = (__u32)len | ((__u32)len << 8); 104 pad = (__u32)len | ((__u32)len << 8);
57 pad |= pad << 16; 105 pad |= pad << 16;
@@ -62,7 +110,7 @@ static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
62 for (i = 0; i < len; i++) { 110 for (i = 0; i < len; i++) {
63 if ((i % 4) == 0) 111 if ((i % 4) == 0)
64 val = pad; 112 val = pad;
65 val = msg[i] + (val << 8); 113 val = ((int) ucp[i]) + (val << 8);
66 if ((i % 4) == 3) { 114 if ((i % 4) == 3) {
67 *buf++ = val; 115 *buf++ = val;
68 val = pad; 116 val = pad;
@@ -95,6 +143,8 @@ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
95 const char *p; 143 const char *p;
96 int i; 144 int i;
97 __u32 in[8], buf[4]; 145 __u32 in[8], buf[4];
146 void (*str2hashbuf)(const char *, int, __u32 *, int) =
147 str2hashbuf_signed;
98 148
99 /* Initialize the default seed for the hash checksum functions */ 149 /* Initialize the default seed for the hash checksum functions */
100 buf[0] = 0x67452301; 150 buf[0] = 0x67452301;
@@ -113,13 +163,18 @@ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
113 } 163 }
114 164
115 switch (hinfo->hash_version) { 165 switch (hinfo->hash_version) {
166 case DX_HASH_LEGACY_UNSIGNED:
167 hash = dx_hack_hash_unsigned(name, len);
168 break;
116 case DX_HASH_LEGACY: 169 case DX_HASH_LEGACY:
117 hash = dx_hack_hash(name, len); 170 hash = dx_hack_hash_signed(name, len);
118 break; 171 break;
172 case DX_HASH_HALF_MD4_UNSIGNED:
173 str2hashbuf = str2hashbuf_unsigned;
119 case DX_HASH_HALF_MD4: 174 case DX_HASH_HALF_MD4:
120 p = name; 175 p = name;
121 while (len > 0) { 176 while (len > 0) {
122 str2hashbuf(p, len, in, 8); 177 (*str2hashbuf)(p, len, in, 8);
123 half_md4_transform(buf, in); 178 half_md4_transform(buf, in);
124 len -= 32; 179 len -= 32;
125 p += 32; 180 p += 32;
@@ -127,10 +182,12 @@ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
127 minor_hash = buf[2]; 182 minor_hash = buf[2];
128 hash = buf[1]; 183 hash = buf[1];
129 break; 184 break;
185 case DX_HASH_TEA_UNSIGNED:
186 str2hashbuf = str2hashbuf_unsigned;
130 case DX_HASH_TEA: 187 case DX_HASH_TEA:
131 p = name; 188 p = name;
132 while (len > 0) { 189 while (len > 0) {
133 str2hashbuf(p, len, in, 4); 190 (*str2hashbuf)(p, len, in, 4);
134 TEA_transform(buf, in); 191 TEA_transform(buf, in);
135 len -= 16; 192 len -= 16;
136 p += 16; 193 p += 16;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 6e6052879aa2..4fb86a0061d0 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -74,17 +74,17 @@ unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
74 /* If checksum is bad mark all blocks and inodes use to prevent 74 /* If checksum is bad mark all blocks and inodes use to prevent
75 * allocation, essentially implementing a per-group read-only flag. */ 75 * allocation, essentially implementing a per-group read-only flag. */
76 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { 76 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
77 ext4_error(sb, __func__, "Checksum bad for group %lu\n", 77 ext4_error(sb, __func__, "Checksum bad for group %u",
78 block_group); 78 block_group);
79 gdp->bg_free_blocks_count = 0; 79 ext4_free_blks_set(sb, gdp, 0);
80 gdp->bg_free_inodes_count = 0; 80 ext4_free_inodes_set(sb, gdp, 0);
81 gdp->bg_itable_unused = 0; 81 ext4_itable_unused_set(sb, gdp, 0);
82 memset(bh->b_data, 0xff, sb->s_blocksize); 82 memset(bh->b_data, 0xff, sb->s_blocksize);
83 return 0; 83 return 0;
84 } 84 }
85 85
86 memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); 86 memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
87 mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb), 87 mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
88 bh->b_data); 88 bh->b_data);
89 89
90 return EXT4_INODES_PER_GROUP(sb); 90 return EXT4_INODES_PER_GROUP(sb);
@@ -111,29 +111,49 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
111 if (unlikely(!bh)) { 111 if (unlikely(!bh)) {
112 ext4_error(sb, __func__, 112 ext4_error(sb, __func__,
113 "Cannot read inode bitmap - " 113 "Cannot read inode bitmap - "
114 "block_group = %lu, inode_bitmap = %llu", 114 "block_group = %u, inode_bitmap = %llu",
115 block_group, bitmap_blk); 115 block_group, bitmap_blk);
116 return NULL; 116 return NULL;
117 } 117 }
118 if (buffer_uptodate(bh) && 118 if (bitmap_uptodate(bh))
119 !(desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
120 return bh; 119 return bh;
121 120
122 lock_buffer(bh); 121 lock_buffer(bh);
122 if (bitmap_uptodate(bh)) {
123 unlock_buffer(bh);
124 return bh;
125 }
123 spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group)); 126 spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
124 if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { 127 if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
125 ext4_init_inode_bitmap(sb, bh, block_group, desc); 128 ext4_init_inode_bitmap(sb, bh, block_group, desc);
129 set_bitmap_uptodate(bh);
126 set_buffer_uptodate(bh); 130 set_buffer_uptodate(bh);
127 unlock_buffer(bh);
128 spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); 131 spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
132 unlock_buffer(bh);
129 return bh; 133 return bh;
130 } 134 }
131 spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); 135 spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
136 if (buffer_uptodate(bh)) {
137 /*
138 * if not uninit if bh is uptodate,
139 * bitmap is also uptodate
140 */
141 set_bitmap_uptodate(bh);
142 unlock_buffer(bh);
143 return bh;
144 }
145 /*
146 * submit the buffer_head for read. We can
147 * safely mark the bitmap as uptodate now.
148 * We do it here so the bitmap uptodate bit
149 * get set with buffer lock held.
150 */
151 set_bitmap_uptodate(bh);
132 if (bh_submit_read(bh) < 0) { 152 if (bh_submit_read(bh) < 0) {
133 put_bh(bh); 153 put_bh(bh);
134 ext4_error(sb, __func__, 154 ext4_error(sb, __func__,
135 "Cannot read inode bitmap - " 155 "Cannot read inode bitmap - "
136 "block_group = %lu, inode_bitmap = %llu", 156 "block_group = %u, inode_bitmap = %llu",
137 block_group, bitmap_blk); 157 block_group, bitmap_blk);
138 return NULL; 158 return NULL;
139 } 159 }
@@ -168,7 +188,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
168 struct ext4_group_desc *gdp; 188 struct ext4_group_desc *gdp;
169 struct ext4_super_block *es; 189 struct ext4_super_block *es;
170 struct ext4_sb_info *sbi; 190 struct ext4_sb_info *sbi;
171 int fatal = 0, err; 191 int fatal = 0, err, count;
172 ext4_group_t flex_group; 192 ext4_group_t flex_group;
173 193
174 if (atomic_read(&inode->i_count) > 1) { 194 if (atomic_read(&inode->i_count) > 1) {
@@ -190,6 +210,11 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
190 210
191 ino = inode->i_ino; 211 ino = inode->i_ino;
192 ext4_debug("freeing inode %lu\n", ino); 212 ext4_debug("freeing inode %lu\n", ino);
213 trace_mark(ext4_free_inode,
214 "dev %s ino %lu mode %d uid %lu gid %lu bocks %llu",
215 sb->s_id, inode->i_ino, inode->i_mode,
216 (unsigned long) inode->i_uid, (unsigned long) inode->i_gid,
217 (unsigned long long) inode->i_blocks);
193 218
194 /* 219 /*
195 * Note: we must free any quota before locking the superblock, 220 * Note: we must free any quota before locking the superblock,
@@ -236,9 +261,12 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
236 261
237 if (gdp) { 262 if (gdp) {
238 spin_lock(sb_bgl_lock(sbi, block_group)); 263 spin_lock(sb_bgl_lock(sbi, block_group));
239 le16_add_cpu(&gdp->bg_free_inodes_count, 1); 264 count = ext4_free_inodes_count(sb, gdp) + 1;
240 if (is_directory) 265 ext4_free_inodes_set(sb, gdp, count);
241 le16_add_cpu(&gdp->bg_used_dirs_count, -1); 266 if (is_directory) {
267 count = ext4_used_dirs_count(sb, gdp) - 1;
268 ext4_used_dirs_set(sb, gdp, count);
269 }
242 gdp->bg_checksum = ext4_group_desc_csum(sbi, 270 gdp->bg_checksum = ext4_group_desc_csum(sbi,
243 block_group, gdp); 271 block_group, gdp);
244 spin_unlock(sb_bgl_lock(sbi, block_group)); 272 spin_unlock(sb_bgl_lock(sbi, block_group));
@@ -253,12 +281,12 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
253 spin_unlock(sb_bgl_lock(sbi, flex_group)); 281 spin_unlock(sb_bgl_lock(sbi, flex_group));
254 } 282 }
255 } 283 }
256 BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata"); 284 BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
257 err = ext4_journal_dirty_metadata(handle, bh2); 285 err = ext4_handle_dirty_metadata(handle, NULL, bh2);
258 if (!fatal) fatal = err; 286 if (!fatal) fatal = err;
259 } 287 }
260 BUFFER_TRACE(bitmap_bh, "call ext4_journal_dirty_metadata"); 288 BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
261 err = ext4_journal_dirty_metadata(handle, bitmap_bh); 289 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
262 if (!fatal) 290 if (!fatal)
263 fatal = err; 291 fatal = err;
264 sb->s_dirt = 1; 292 sb->s_dirt = 1;
@@ -291,13 +319,13 @@ static int find_group_dir(struct super_block *sb, struct inode *parent,
291 319
292 for (group = 0; group < ngroups; group++) { 320 for (group = 0; group < ngroups; group++) {
293 desc = ext4_get_group_desc(sb, group, NULL); 321 desc = ext4_get_group_desc(sb, group, NULL);
294 if (!desc || !desc->bg_free_inodes_count) 322 if (!desc || !ext4_free_inodes_count(sb, desc))
295 continue; 323 continue;
296 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei) 324 if (ext4_free_inodes_count(sb, desc) < avefreei)
297 continue; 325 continue;
298 if (!best_desc || 326 if (!best_desc ||
299 (le16_to_cpu(desc->bg_free_blocks_count) > 327 (ext4_free_blks_count(sb, desc) >
300 le16_to_cpu(best_desc->bg_free_blocks_count))) { 328 ext4_free_blks_count(sb, best_desc))) {
301 *best_group = group; 329 *best_group = group;
302 best_desc = desc; 330 best_desc = desc;
303 ret = 0; 331 ret = 0;
@@ -369,7 +397,7 @@ found_flexbg:
369 for (i = best_flex * flex_size; i < ngroups && 397 for (i = best_flex * flex_size; i < ngroups &&
370 i < (best_flex + 1) * flex_size; i++) { 398 i < (best_flex + 1) * flex_size; i++) {
371 desc = ext4_get_group_desc(sb, i, &bh); 399 desc = ext4_get_group_desc(sb, i, &bh);
372 if (le16_to_cpu(desc->bg_free_inodes_count)) { 400 if (ext4_free_inodes_count(sb, desc)) {
373 *best_group = i; 401 *best_group = i;
374 goto out; 402 goto out;
375 } 403 }
@@ -443,17 +471,17 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
443 for (i = 0; i < ngroups; i++) { 471 for (i = 0; i < ngroups; i++) {
444 grp = (parent_group + i) % ngroups; 472 grp = (parent_group + i) % ngroups;
445 desc = ext4_get_group_desc(sb, grp, NULL); 473 desc = ext4_get_group_desc(sb, grp, NULL);
446 if (!desc || !desc->bg_free_inodes_count) 474 if (!desc || !ext4_free_inodes_count(sb, desc))
447 continue; 475 continue;
448 if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir) 476 if (ext4_used_dirs_count(sb, desc) >= best_ndir)
449 continue; 477 continue;
450 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei) 478 if (ext4_free_inodes_count(sb, desc) < avefreei)
451 continue; 479 continue;
452 if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb) 480 if (ext4_free_blks_count(sb, desc) < avefreeb)
453 continue; 481 continue;
454 *group = grp; 482 *group = grp;
455 ret = 0; 483 ret = 0;
456 best_ndir = le16_to_cpu(desc->bg_used_dirs_count); 484 best_ndir = ext4_used_dirs_count(sb, desc);
457 } 485 }
458 if (ret == 0) 486 if (ret == 0)
459 return ret; 487 return ret;
@@ -479,13 +507,13 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
479 for (i = 0; i < ngroups; i++) { 507 for (i = 0; i < ngroups; i++) {
480 *group = (parent_group + i) % ngroups; 508 *group = (parent_group + i) % ngroups;
481 desc = ext4_get_group_desc(sb, *group, NULL); 509 desc = ext4_get_group_desc(sb, *group, NULL);
482 if (!desc || !desc->bg_free_inodes_count) 510 if (!desc || !ext4_free_inodes_count(sb, desc))
483 continue; 511 continue;
484 if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs) 512 if (ext4_used_dirs_count(sb, desc) >= max_dirs)
485 continue; 513 continue;
486 if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes) 514 if (ext4_free_inodes_count(sb, desc) < min_inodes)
487 continue; 515 continue;
488 if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks) 516 if (ext4_free_blks_count(sb, desc) < min_blocks)
489 continue; 517 continue;
490 return 0; 518 return 0;
491 } 519 }
@@ -494,8 +522,8 @@ fallback:
494 for (i = 0; i < ngroups; i++) { 522 for (i = 0; i < ngroups; i++) {
495 *group = (parent_group + i) % ngroups; 523 *group = (parent_group + i) % ngroups;
496 desc = ext4_get_group_desc(sb, *group, NULL); 524 desc = ext4_get_group_desc(sb, *group, NULL);
497 if (desc && desc->bg_free_inodes_count && 525 if (desc && ext4_free_inodes_count(sb, desc) &&
498 le16_to_cpu(desc->bg_free_inodes_count) >= avefreei) 526 ext4_free_inodes_count(sb, desc) >= avefreei)
499 return 0; 527 return 0;
500 } 528 }
501 529
@@ -524,8 +552,8 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
524 */ 552 */
525 *group = parent_group; 553 *group = parent_group;
526 desc = ext4_get_group_desc(sb, *group, NULL); 554 desc = ext4_get_group_desc(sb, *group, NULL);
527 if (desc && le16_to_cpu(desc->bg_free_inodes_count) && 555 if (desc && ext4_free_inodes_count(sb, desc) &&
528 le16_to_cpu(desc->bg_free_blocks_count)) 556 ext4_free_blks_count(sb, desc))
529 return 0; 557 return 0;
530 558
531 /* 559 /*
@@ -548,8 +576,8 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
548 if (*group >= ngroups) 576 if (*group >= ngroups)
549 *group -= ngroups; 577 *group -= ngroups;
550 desc = ext4_get_group_desc(sb, *group, NULL); 578 desc = ext4_get_group_desc(sb, *group, NULL);
551 if (desc && le16_to_cpu(desc->bg_free_inodes_count) && 579 if (desc && ext4_free_inodes_count(sb, desc) &&
552 le16_to_cpu(desc->bg_free_blocks_count)) 580 ext4_free_blks_count(sb, desc))
553 return 0; 581 return 0;
554 } 582 }
555 583
@@ -562,7 +590,7 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
562 if (++*group >= ngroups) 590 if (++*group >= ngroups)
563 *group = 0; 591 *group = 0;
564 desc = ext4_get_group_desc(sb, *group, NULL); 592 desc = ext4_get_group_desc(sb, *group, NULL);
565 if (desc && le16_to_cpu(desc->bg_free_inodes_count)) 593 if (desc && ext4_free_inodes_count(sb, desc))
566 return 0; 594 return 0;
567 } 595 }
568 596
@@ -570,6 +598,79 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
570} 598}
571 599
572/* 600/*
601 * claim the inode from the inode bitmap. If the group
602 * is uninit we need to take the groups's sb_bgl_lock
603 * and clear the uninit flag. The inode bitmap update
604 * and group desc uninit flag clear should be done
605 * after holding sb_bgl_lock so that ext4_read_inode_bitmap
606 * doesn't race with the ext4_claim_inode
607 */
608static int ext4_claim_inode(struct super_block *sb,
609 struct buffer_head *inode_bitmap_bh,
610 unsigned long ino, ext4_group_t group, int mode)
611{
612 int free = 0, retval = 0, count;
613 struct ext4_sb_info *sbi = EXT4_SB(sb);
614 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
615
616 spin_lock(sb_bgl_lock(sbi, group));
617 if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
618 /* not a free inode */
619 retval = 1;
620 goto err_ret;
621 }
622 ino++;
623 if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
624 ino > EXT4_INODES_PER_GROUP(sb)) {
625 spin_unlock(sb_bgl_lock(sbi, group));
626 ext4_error(sb, __func__,
627 "reserved inode or inode > inodes count - "
628 "block_group = %u, inode=%lu", group,
629 ino + group * EXT4_INODES_PER_GROUP(sb));
630 return 1;
631 }
632 /* If we didn't allocate from within the initialized part of the inode
633 * table then we need to initialize up to this inode. */
634 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
635
636 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
637 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
638 /* When marking the block group with
639 * ~EXT4_BG_INODE_UNINIT we don't want to depend
640 * on the value of bg_itable_unused even though
641 * mke2fs could have initialized the same for us.
642 * Instead we calculated the value below
643 */
644
645 free = 0;
646 } else {
647 free = EXT4_INODES_PER_GROUP(sb) -
648 ext4_itable_unused_count(sb, gdp);
649 }
650
651 /*
652 * Check the relative inode number against the last used
653 * relative inode number in this group. if it is greater
654 * we need to update the bg_itable_unused count
655 *
656 */
657 if (ino > free)
658 ext4_itable_unused_set(sb, gdp,
659 (EXT4_INODES_PER_GROUP(sb) - ino));
660 }
661 count = ext4_free_inodes_count(sb, gdp) - 1;
662 ext4_free_inodes_set(sb, gdp, count);
663 if (S_ISDIR(mode)) {
664 count = ext4_used_dirs_count(sb, gdp) + 1;
665 ext4_used_dirs_set(sb, gdp, count);
666 }
667 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
668err_ret:
669 spin_unlock(sb_bgl_lock(sbi, group));
670 return retval;
671}
672
673/*
573 * There are two policies for allocating an inode. If the new inode is 674 * There are two policies for allocating an inode. If the new inode is
574 * a directory, then a forward search is made for a block group with both 675 * a directory, then a forward search is made for a block group with both
575 * free space and a low directory-to-inode ratio; if that fails, then of 676 * free space and a low directory-to-inode ratio; if that fails, then of
@@ -582,8 +683,8 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
582struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode) 683struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
583{ 684{
584 struct super_block *sb; 685 struct super_block *sb;
585 struct buffer_head *bitmap_bh = NULL; 686 struct buffer_head *inode_bitmap_bh = NULL;
586 struct buffer_head *bh2; 687 struct buffer_head *group_desc_bh;
587 ext4_group_t group = 0; 688 ext4_group_t group = 0;
588 unsigned long ino = 0; 689 unsigned long ino = 0;
589 struct inode *inode; 690 struct inode *inode;
@@ -602,6 +703,8 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
602 return ERR_PTR(-EPERM); 703 return ERR_PTR(-EPERM);
603 704
604 sb = dir->i_sb; 705 sb = dir->i_sb;
706 trace_mark(ext4_request_inode, "dev %s dir %lu mode %d", sb->s_id,
707 dir->i_ino, mode);
605 inode = new_inode(sb); 708 inode = new_inode(sb);
606 if (!inode) 709 if (!inode)
607 return ERR_PTR(-ENOMEM); 710 return ERR_PTR(-ENOMEM);
@@ -631,40 +734,52 @@ got_group:
631 for (i = 0; i < sbi->s_groups_count; i++) { 734 for (i = 0; i < sbi->s_groups_count; i++) {
632 err = -EIO; 735 err = -EIO;
633 736
634 gdp = ext4_get_group_desc(sb, group, &bh2); 737 gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
635 if (!gdp) 738 if (!gdp)
636 goto fail; 739 goto fail;
637 740
638 brelse(bitmap_bh); 741 brelse(inode_bitmap_bh);
639 bitmap_bh = ext4_read_inode_bitmap(sb, group); 742 inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
640 if (!bitmap_bh) 743 if (!inode_bitmap_bh)
641 goto fail; 744 goto fail;
642 745
643 ino = 0; 746 ino = 0;
644 747
645repeat_in_this_group: 748repeat_in_this_group:
646 ino = ext4_find_next_zero_bit((unsigned long *) 749 ino = ext4_find_next_zero_bit((unsigned long *)
647 bitmap_bh->b_data, EXT4_INODES_PER_GROUP(sb), ino); 750 inode_bitmap_bh->b_data,
751 EXT4_INODES_PER_GROUP(sb), ino);
752
648 if (ino < EXT4_INODES_PER_GROUP(sb)) { 753 if (ino < EXT4_INODES_PER_GROUP(sb)) {
649 754
650 BUFFER_TRACE(bitmap_bh, "get_write_access"); 755 BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
651 err = ext4_journal_get_write_access(handle, bitmap_bh); 756 err = ext4_journal_get_write_access(handle,
757 inode_bitmap_bh);
652 if (err) 758 if (err)
653 goto fail; 759 goto fail;
654 760
655 if (!ext4_set_bit_atomic(sb_bgl_lock(sbi, group), 761 BUFFER_TRACE(group_desc_bh, "get_write_access");
656 ino, bitmap_bh->b_data)) { 762 err = ext4_journal_get_write_access(handle,
763 group_desc_bh);
764 if (err)
765 goto fail;
766 if (!ext4_claim_inode(sb, inode_bitmap_bh,
767 ino, group, mode)) {
657 /* we won it */ 768 /* we won it */
658 BUFFER_TRACE(bitmap_bh, 769 BUFFER_TRACE(inode_bitmap_bh,
659 "call ext4_journal_dirty_metadata"); 770 "call ext4_handle_dirty_metadata");
660 err = ext4_journal_dirty_metadata(handle, 771 err = ext4_handle_dirty_metadata(handle,
661 bitmap_bh); 772 inode,
773 inode_bitmap_bh);
662 if (err) 774 if (err)
663 goto fail; 775 goto fail;
776 /* zero bit is inode number 1*/
777 ino++;
664 goto got; 778 goto got;
665 } 779 }
666 /* we lost it */ 780 /* we lost it */
667 jbd2_journal_release_buffer(handle, bitmap_bh); 781 ext4_handle_release_buffer(handle, inode_bitmap_bh);
782 ext4_handle_release_buffer(handle, group_desc_bh);
668 783
669 if (++ino < EXT4_INODES_PER_GROUP(sb)) 784 if (++ino < EXT4_INODES_PER_GROUP(sb))
670 goto repeat_in_this_group; 785 goto repeat_in_this_group;
@@ -684,30 +799,16 @@ repeat_in_this_group:
684 goto out; 799 goto out;
685 800
686got: 801got:
687 ino++;
688 if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
689 ino > EXT4_INODES_PER_GROUP(sb)) {
690 ext4_error(sb, __func__,
691 "reserved inode or inode > inodes count - "
692 "block_group = %lu, inode=%lu", group,
693 ino + group * EXT4_INODES_PER_GROUP(sb));
694 err = -EIO;
695 goto fail;
696 }
697
698 BUFFER_TRACE(bh2, "get_write_access");
699 err = ext4_journal_get_write_access(handle, bh2);
700 if (err) goto fail;
701
702 /* We may have to initialize the block bitmap if it isn't already */ 802 /* We may have to initialize the block bitmap if it isn't already */
703 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) && 803 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
704 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 804 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
705 struct buffer_head *block_bh = ext4_read_block_bitmap(sb, group); 805 struct buffer_head *block_bitmap_bh;
706 806
707 BUFFER_TRACE(block_bh, "get block bitmap access"); 807 block_bitmap_bh = ext4_read_block_bitmap(sb, group);
708 err = ext4_journal_get_write_access(handle, block_bh); 808 BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
809 err = ext4_journal_get_write_access(handle, block_bitmap_bh);
709 if (err) { 810 if (err) {
710 brelse(block_bh); 811 brelse(block_bitmap_bh);
711 goto fail; 812 goto fail;
712 } 813 }
713 814
@@ -715,9 +816,9 @@ got:
715 spin_lock(sb_bgl_lock(sbi, group)); 816 spin_lock(sb_bgl_lock(sbi, group));
716 /* recheck and clear flag under lock if we still need to */ 817 /* recheck and clear flag under lock if we still need to */
717 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 818 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
718 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
719 free = ext4_free_blocks_after_init(sb, group, gdp); 819 free = ext4_free_blocks_after_init(sb, group, gdp);
720 gdp->bg_free_blocks_count = cpu_to_le16(free); 820 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
821 ext4_free_blks_set(sb, gdp, free);
721 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, 822 gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
722 gdp); 823 gdp);
723 } 824 }
@@ -725,55 +826,19 @@ got:
725 826
726 /* Don't need to dirty bitmap block if we didn't change it */ 827 /* Don't need to dirty bitmap block if we didn't change it */
727 if (free) { 828 if (free) {
728 BUFFER_TRACE(block_bh, "dirty block bitmap"); 829 BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
729 err = ext4_journal_dirty_metadata(handle, block_bh); 830 err = ext4_handle_dirty_metadata(handle,
831 NULL, block_bitmap_bh);
730 } 832 }
731 833
732 brelse(block_bh); 834 brelse(block_bitmap_bh);
733 if (err) 835 if (err)
734 goto fail; 836 goto fail;
735 } 837 }
736 838 BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
737 spin_lock(sb_bgl_lock(sbi, group)); 839 err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
738 /* If we didn't allocate from within the initialized part of the inode 840 if (err)
739 * table then we need to initialize up to this inode. */ 841 goto fail;
740 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
741 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
742 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
743
744 /* When marking the block group with
745 * ~EXT4_BG_INODE_UNINIT we don't want to depend
746 * on the value of bg_itable_unused even though
747 * mke2fs could have initialized the same for us.
748 * Instead we calculated the value below
749 */
750
751 free = 0;
752 } else {
753 free = EXT4_INODES_PER_GROUP(sb) -
754 le16_to_cpu(gdp->bg_itable_unused);
755 }
756
757 /*
758 * Check the relative inode number against the last used
759 * relative inode number in this group. if it is greater
760 * we need to update the bg_itable_unused count
761 *
762 */
763 if (ino > free)
764 gdp->bg_itable_unused =
765 cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino);
766 }
767
768 le16_add_cpu(&gdp->bg_free_inodes_count, -1);
769 if (S_ISDIR(mode)) {
770 le16_add_cpu(&gdp->bg_used_dirs_count, 1);
771 }
772 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
773 spin_unlock(sb_bgl_lock(sbi, group));
774 BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
775 err = ext4_journal_dirty_metadata(handle, bh2);
776 if (err) goto fail;
777 842
778 percpu_counter_dec(&sbi->s_freeinodes_counter); 843 percpu_counter_dec(&sbi->s_freeinodes_counter);
779 if (S_ISDIR(mode)) 844 if (S_ISDIR(mode))
@@ -825,7 +890,7 @@ got:
825 890
826 ext4_set_inode_flags(inode); 891 ext4_set_inode_flags(inode);
827 if (IS_DIRSYNC(inode)) 892 if (IS_DIRSYNC(inode))
828 handle->h_sync = 1; 893 ext4_handle_sync(handle);
829 if (insert_inode_locked(inode) < 0) { 894 if (insert_inode_locked(inode) < 0) {
830 err = -EINVAL; 895 err = -EINVAL;
831 goto fail_drop; 896 goto fail_drop;
@@ -852,7 +917,7 @@ got:
852 if (err) 917 if (err)
853 goto fail_free_drop; 918 goto fail_free_drop;
854 919
855 if (test_opt(sb, EXTENTS)) { 920 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
856 /* set extent flag only for directory, file and normal symlink*/ 921 /* set extent flag only for directory, file and normal symlink*/
857 if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) { 922 if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
858 EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL; 923 EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
@@ -867,6 +932,8 @@ got:
867 } 932 }
868 933
869 ext4_debug("allocating inode %lu\n", inode->i_ino); 934 ext4_debug("allocating inode %lu\n", inode->i_ino);
935 trace_mark(ext4_allocate_inode, "dev %s ino %lu dir %lu mode %d",
936 sb->s_id, inode->i_ino, dir->i_ino, mode);
870 goto really_out; 937 goto really_out;
871fail: 938fail:
872 ext4_std_error(sb, err); 939 ext4_std_error(sb, err);
@@ -874,7 +941,7 @@ out:
874 iput(inode); 941 iput(inode);
875 ret = ERR_PTR(err); 942 ret = ERR_PTR(err);
876really_out: 943really_out:
877 brelse(bitmap_bh); 944 brelse(inode_bitmap_bh);
878 return ret; 945 return ret;
879 946
880fail_free_drop: 947fail_free_drop:
@@ -886,7 +953,7 @@ fail_drop:
886 inode->i_nlink = 0; 953 inode->i_nlink = 0;
887 unlock_new_inode(inode); 954 unlock_new_inode(inode);
888 iput(inode); 955 iput(inode);
889 brelse(bitmap_bh); 956 brelse(inode_bitmap_bh);
890 return ERR_PTR(err); 957 return ERR_PTR(err);
891} 958}
892 959
@@ -985,7 +1052,7 @@ unsigned long ext4_count_free_inodes(struct super_block *sb)
985 gdp = ext4_get_group_desc(sb, i, NULL); 1052 gdp = ext4_get_group_desc(sb, i, NULL);
986 if (!gdp) 1053 if (!gdp)
987 continue; 1054 continue;
988 desc_count += le16_to_cpu(gdp->bg_free_inodes_count); 1055 desc_count += ext4_free_inodes_count(sb, gdp);
989 brelse(bitmap_bh); 1056 brelse(bitmap_bh);
990 bitmap_bh = ext4_read_inode_bitmap(sb, i); 1057 bitmap_bh = ext4_read_inode_bitmap(sb, i);
991 if (!bitmap_bh) 1058 if (!bitmap_bh)
@@ -993,7 +1060,7 @@ unsigned long ext4_count_free_inodes(struct super_block *sb)
993 1060
994 x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8); 1061 x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
995 printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n", 1062 printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
996 i, le16_to_cpu(gdp->bg_free_inodes_count), x); 1063 i, ext4_free_inodes_count(sb, gdp), x);
997 bitmap_count += x; 1064 bitmap_count += x;
998 } 1065 }
999 brelse(bitmap_bh); 1066 brelse(bitmap_bh);
@@ -1007,7 +1074,7 @@ unsigned long ext4_count_free_inodes(struct super_block *sb)
1007 gdp = ext4_get_group_desc(sb, i, NULL); 1074 gdp = ext4_get_group_desc(sb, i, NULL);
1008 if (!gdp) 1075 if (!gdp)
1009 continue; 1076 continue;
1010 desc_count += le16_to_cpu(gdp->bg_free_inodes_count); 1077 desc_count += ext4_free_inodes_count(sb, gdp);
1011 cond_resched(); 1078 cond_resched();
1012 } 1079 }
1013 return desc_count; 1080 return desc_count;
@@ -1024,8 +1091,7 @@ unsigned long ext4_count_dirs(struct super_block * sb)
1024 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL); 1091 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1025 if (!gdp) 1092 if (!gdp)
1026 continue; 1093 continue;
1027 count += le16_to_cpu(gdp->bg_used_dirs_count); 1094 count += ext4_used_dirs_count(sb, gdp);
1028 } 1095 }
1029 return count; 1096 return count;
1030} 1097}
1031
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 98d3fe7057ef..a6444cee0c7e 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -72,12 +72,17 @@ static int ext4_inode_is_fast_symlink(struct inode *inode)
72 * "bh" may be NULL: a metadata block may have been freed from memory 72 * "bh" may be NULL: a metadata block may have been freed from memory
73 * but there may still be a record of it in the journal, and that record 73 * but there may still be a record of it in the journal, and that record
74 * still needs to be revoked. 74 * still needs to be revoked.
75 *
76 * If the handle isn't valid we're not journaling so there's nothing to do.
75 */ 77 */
76int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode, 78int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
77 struct buffer_head *bh, ext4_fsblk_t blocknr) 79 struct buffer_head *bh, ext4_fsblk_t blocknr)
78{ 80{
79 int err; 81 int err;
80 82
83 if (!ext4_handle_valid(handle))
84 return 0;
85
81 might_sleep(); 86 might_sleep();
82 87
83 BUFFER_TRACE(bh, "enter"); 88 BUFFER_TRACE(bh, "enter");
@@ -170,7 +175,9 @@ static handle_t *start_transaction(struct inode *inode)
170 */ 175 */
171static int try_to_extend_transaction(handle_t *handle, struct inode *inode) 176static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
172{ 177{
173 if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS) 178 if (!ext4_handle_valid(handle))
179 return 0;
180 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
174 return 0; 181 return 0;
175 if (!ext4_journal_extend(handle, blocks_for_truncate(inode))) 182 if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
176 return 0; 183 return 0;
@@ -184,6 +191,7 @@ static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
184 */ 191 */
185static int ext4_journal_test_restart(handle_t *handle, struct inode *inode) 192static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
186{ 193{
194 BUG_ON(EXT4_JOURNAL(inode) == NULL);
187 jbd_debug(2, "restarting handle %p\n", handle); 195 jbd_debug(2, "restarting handle %p\n", handle);
188 return ext4_journal_restart(handle, blocks_for_truncate(inode)); 196 return ext4_journal_restart(handle, blocks_for_truncate(inode));
189} 197}
@@ -216,7 +224,7 @@ void ext4_delete_inode(struct inode *inode)
216 } 224 }
217 225
218 if (IS_SYNC(inode)) 226 if (IS_SYNC(inode))
219 handle->h_sync = 1; 227 ext4_handle_sync(handle);
220 inode->i_size = 0; 228 inode->i_size = 0;
221 err = ext4_mark_inode_dirty(handle, inode); 229 err = ext4_mark_inode_dirty(handle, inode);
222 if (err) { 230 if (err) {
@@ -233,7 +241,7 @@ void ext4_delete_inode(struct inode *inode)
233 * enough credits left in the handle to remove the inode from 241 * enough credits left in the handle to remove the inode from
234 * the orphan list and set the dtime field. 242 * the orphan list and set the dtime field.
235 */ 243 */
236 if (handle->h_buffer_credits < 3) { 244 if (!ext4_handle_has_enough_credits(handle, 3)) {
237 err = ext4_journal_extend(handle, 3); 245 err = ext4_journal_extend(handle, 3);
238 if (err > 0) 246 if (err > 0)
239 err = ext4_journal_restart(handle, 3); 247 err = ext4_journal_restart(handle, 3);
@@ -506,10 +514,10 @@ static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
506 * return the total number of blocks to be allocate, including the 514 * return the total number of blocks to be allocate, including the
507 * direct and indirect blocks. 515 * direct and indirect blocks.
508 */ 516 */
509static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks, 517static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
510 int blocks_to_boundary) 518 int blocks_to_boundary)
511{ 519{
512 unsigned long count = 0; 520 unsigned int count = 0;
513 521
514 /* 522 /*
515 * Simple case, [t,d]Indirect block(s) has not allocated yet 523 * Simple case, [t,d]Indirect block(s) has not allocated yet
@@ -547,6 +555,7 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
547 int indirect_blks, int blks, 555 int indirect_blks, int blks,
548 ext4_fsblk_t new_blocks[4], int *err) 556 ext4_fsblk_t new_blocks[4], int *err)
549{ 557{
558 struct ext4_allocation_request ar;
550 int target, i; 559 int target, i;
551 unsigned long count = 0, blk_allocated = 0; 560 unsigned long count = 0, blk_allocated = 0;
552 int index = 0; 561 int index = 0;
@@ -595,10 +604,17 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
595 if (!target) 604 if (!target)
596 goto allocated; 605 goto allocated;
597 /* Now allocate data blocks */ 606 /* Now allocate data blocks */
598 count = target; 607 memset(&ar, 0, sizeof(ar));
599 /* allocating blocks for data blocks */ 608 ar.inode = inode;
600 current_block = ext4_new_blocks(handle, inode, iblock, 609 ar.goal = goal;
601 goal, &count, err); 610 ar.len = target;
611 ar.logical = iblock;
612 if (S_ISREG(inode->i_mode))
613 /* enable in-core preallocation only for regular files */
614 ar.flags = EXT4_MB_HINT_DATA;
615
616 current_block = ext4_mb_new_blocks(handle, &ar, err);
617
602 if (*err && (target == blks)) { 618 if (*err && (target == blks)) {
603 /* 619 /*
604 * if the allocation failed and we didn't allocate 620 * if the allocation failed and we didn't allocate
@@ -614,7 +630,7 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
614 */ 630 */
615 new_blocks[index] = current_block; 631 new_blocks[index] = current_block;
616 } 632 }
617 blk_allocated += count; 633 blk_allocated += ar.len;
618 } 634 }
619allocated: 635allocated:
620 /* total number of blocks allocated for direct blocks */ 636 /* total number of blocks allocated for direct blocks */
@@ -709,8 +725,8 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
709 set_buffer_uptodate(bh); 725 set_buffer_uptodate(bh);
710 unlock_buffer(bh); 726 unlock_buffer(bh);
711 727
712 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); 728 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
713 err = ext4_journal_dirty_metadata(handle, bh); 729 err = ext4_handle_dirty_metadata(handle, inode, bh);
714 if (err) 730 if (err)
715 goto failed; 731 goto failed;
716 } 732 }
@@ -792,8 +808,8 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
792 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. 808 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
793 */ 809 */
794 jbd_debug(5, "splicing indirect only\n"); 810 jbd_debug(5, "splicing indirect only\n");
795 BUFFER_TRACE(where->bh, "call ext4_journal_dirty_metadata"); 811 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
796 err = ext4_journal_dirty_metadata(handle, where->bh); 812 err = ext4_handle_dirty_metadata(handle, inode, where->bh);
797 if (err) 813 if (err)
798 goto err_out; 814 goto err_out;
799 } else { 815 } else {
@@ -840,10 +856,10 @@ err_out:
840 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 856 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
841 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 857 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
842 */ 858 */
843int ext4_get_blocks_handle(handle_t *handle, struct inode *inode, 859static int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
844 ext4_lblk_t iblock, unsigned long maxblocks, 860 ext4_lblk_t iblock, unsigned int maxblocks,
845 struct buffer_head *bh_result, 861 struct buffer_head *bh_result,
846 int create, int extend_disksize) 862 int create, int extend_disksize)
847{ 863{
848 int err = -EIO; 864 int err = -EIO;
849 ext4_lblk_t offsets[4]; 865 ext4_lblk_t offsets[4];
@@ -1045,7 +1061,7 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
1045 * It returns the error in case of allocation failure. 1061 * It returns the error in case of allocation failure.
1046 */ 1062 */
1047int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, 1063int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
1048 unsigned long max_blocks, struct buffer_head *bh, 1064 unsigned int max_blocks, struct buffer_head *bh,
1049 int create, int extend_disksize, int flag) 1065 int create, int extend_disksize, int flag)
1050{ 1066{
1051 int retval; 1067 int retval;
@@ -1221,8 +1237,8 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1221 set_buffer_uptodate(bh); 1237 set_buffer_uptodate(bh);
1222 } 1238 }
1223 unlock_buffer(bh); 1239 unlock_buffer(bh);
1224 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); 1240 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1225 err = ext4_journal_dirty_metadata(handle, bh); 1241 err = ext4_handle_dirty_metadata(handle, inode, bh);
1226 if (!fatal) 1242 if (!fatal)
1227 fatal = err; 1243 fatal = err;
1228 } else { 1244 } else {
@@ -1335,6 +1351,10 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
1335 pgoff_t index; 1351 pgoff_t index;
1336 unsigned from, to; 1352 unsigned from, to;
1337 1353
1354 trace_mark(ext4_write_begin,
1355 "dev %s ino %lu pos %llu len %u flags %u",
1356 inode->i_sb->s_id, inode->i_ino,
1357 (unsigned long long) pos, len, flags);
1338 index = pos >> PAGE_CACHE_SHIFT; 1358 index = pos >> PAGE_CACHE_SHIFT;
1339 from = pos & (PAGE_CACHE_SIZE - 1); 1359 from = pos & (PAGE_CACHE_SIZE - 1);
1340 to = from + len; 1360 to = from + len;
@@ -1387,7 +1407,7 @@ static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1387 if (!buffer_mapped(bh) || buffer_freed(bh)) 1407 if (!buffer_mapped(bh) || buffer_freed(bh))
1388 return 0; 1408 return 0;
1389 set_buffer_uptodate(bh); 1409 set_buffer_uptodate(bh);
1390 return ext4_journal_dirty_metadata(handle, bh); 1410 return ext4_handle_dirty_metadata(handle, NULL, bh);
1391} 1411}
1392 1412
1393/* 1413/*
@@ -1406,6 +1426,10 @@ static int ext4_ordered_write_end(struct file *file,
1406 struct inode *inode = mapping->host; 1426 struct inode *inode = mapping->host;
1407 int ret = 0, ret2; 1427 int ret = 0, ret2;
1408 1428
1429 trace_mark(ext4_ordered_write_end,
1430 "dev %s ino %lu pos %llu len %u copied %u",
1431 inode->i_sb->s_id, inode->i_ino,
1432 (unsigned long long) pos, len, copied);
1409 ret = ext4_jbd2_file_inode(handle, inode); 1433 ret = ext4_jbd2_file_inode(handle, inode);
1410 1434
1411 if (ret == 0) { 1435 if (ret == 0) {
@@ -1444,6 +1468,10 @@ static int ext4_writeback_write_end(struct file *file,
1444 int ret = 0, ret2; 1468 int ret = 0, ret2;
1445 loff_t new_i_size; 1469 loff_t new_i_size;
1446 1470
1471 trace_mark(ext4_writeback_write_end,
1472 "dev %s ino %lu pos %llu len %u copied %u",
1473 inode->i_sb->s_id, inode->i_ino,
1474 (unsigned long long) pos, len, copied);
1447 new_i_size = pos + copied; 1475 new_i_size = pos + copied;
1448 if (new_i_size > EXT4_I(inode)->i_disksize) { 1476 if (new_i_size > EXT4_I(inode)->i_disksize) {
1449 ext4_update_i_disksize(inode, new_i_size); 1477 ext4_update_i_disksize(inode, new_i_size);
@@ -1479,6 +1507,10 @@ static int ext4_journalled_write_end(struct file *file,
1479 unsigned from, to; 1507 unsigned from, to;
1480 loff_t new_i_size; 1508 loff_t new_i_size;
1481 1509
1510 trace_mark(ext4_journalled_write_end,
1511 "dev %s ino %lu pos %llu len %u copied %u",
1512 inode->i_sb->s_id, inode->i_ino,
1513 (unsigned long long) pos, len, copied);
1482 from = pos & (PAGE_CACHE_SIZE - 1); 1514 from = pos & (PAGE_CACHE_SIZE - 1);
1483 to = from + len; 1515 to = from + len;
1484 1516
@@ -1625,7 +1657,7 @@ struct mpage_da_data {
1625 get_block_t *get_block; 1657 get_block_t *get_block;
1626 struct writeback_control *wbc; 1658 struct writeback_control *wbc;
1627 int io_done; 1659 int io_done;
1628 long pages_written; 1660 int pages_written;
1629 int retval; 1661 int retval;
1630}; 1662};
1631 1663
@@ -1645,35 +1677,39 @@ struct mpage_da_data {
1645 */ 1677 */
1646static int mpage_da_submit_io(struct mpage_da_data *mpd) 1678static int mpage_da_submit_io(struct mpage_da_data *mpd)
1647{ 1679{
1648 struct address_space *mapping = mpd->inode->i_mapping;
1649 int ret = 0, err, nr_pages, i;
1650 unsigned long index, end;
1651 struct pagevec pvec;
1652 long pages_skipped; 1680 long pages_skipped;
1681 struct pagevec pvec;
1682 unsigned long index, end;
1683 int ret = 0, err, nr_pages, i;
1684 struct inode *inode = mpd->inode;
1685 struct address_space *mapping = inode->i_mapping;
1653 1686
1654 BUG_ON(mpd->next_page <= mpd->first_page); 1687 BUG_ON(mpd->next_page <= mpd->first_page);
1655 pagevec_init(&pvec, 0); 1688 /*
1689 * We need to start from the first_page to the next_page - 1
1690 * to make sure we also write the mapped dirty buffer_heads.
1691 * If we look at mpd->lbh.b_blocknr we would only be looking
1692 * at the currently mapped buffer_heads.
1693 */
1656 index = mpd->first_page; 1694 index = mpd->first_page;
1657 end = mpd->next_page - 1; 1695 end = mpd->next_page - 1;
1658 1696
1697 pagevec_init(&pvec, 0);
1659 while (index <= end) { 1698 while (index <= end) {
1660 /* 1699 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1661 * We can use PAGECACHE_TAG_DIRTY lookup here because
1662 * even though we have cleared the dirty flag on the page
1663 * We still keep the page in the radix tree with tag
1664 * PAGECACHE_TAG_DIRTY. See clear_page_dirty_for_io.
1665 * The PAGECACHE_TAG_DIRTY is cleared in set_page_writeback
1666 * which is called via the below writepage callback.
1667 */
1668 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1669 PAGECACHE_TAG_DIRTY,
1670 min(end - index,
1671 (pgoff_t)PAGEVEC_SIZE-1) + 1);
1672 if (nr_pages == 0) 1700 if (nr_pages == 0)
1673 break; 1701 break;
1674 for (i = 0; i < nr_pages; i++) { 1702 for (i = 0; i < nr_pages; i++) {
1675 struct page *page = pvec.pages[i]; 1703 struct page *page = pvec.pages[i];
1676 1704
1705 index = page->index;
1706 if (index > end)
1707 break;
1708 index++;
1709
1710 BUG_ON(!PageLocked(page));
1711 BUG_ON(PageWriteback(page));
1712
1677 pages_skipped = mpd->wbc->pages_skipped; 1713 pages_skipped = mpd->wbc->pages_skipped;
1678 err = mapping->a_ops->writepage(page, mpd->wbc); 1714 err = mapping->a_ops->writepage(page, mpd->wbc);
1679 if (!err && (pages_skipped == mpd->wbc->pages_skipped)) 1715 if (!err && (pages_skipped == mpd->wbc->pages_skipped))
@@ -1831,13 +1867,13 @@ static void ext4_print_free_blocks(struct inode *inode)
1831 ext4_count_free_blocks(inode->i_sb)); 1867 ext4_count_free_blocks(inode->i_sb));
1832 printk(KERN_EMERG "Free/Dirty block details\n"); 1868 printk(KERN_EMERG "Free/Dirty block details\n");
1833 printk(KERN_EMERG "free_blocks=%lld\n", 1869 printk(KERN_EMERG "free_blocks=%lld\n",
1834 percpu_counter_sum(&sbi->s_freeblocks_counter)); 1870 (long long)percpu_counter_sum(&sbi->s_freeblocks_counter));
1835 printk(KERN_EMERG "dirty_blocks=%lld\n", 1871 printk(KERN_EMERG "dirty_blocks=%lld\n",
1836 percpu_counter_sum(&sbi->s_dirtyblocks_counter)); 1872 (long long)percpu_counter_sum(&sbi->s_dirtyblocks_counter));
1837 printk(KERN_EMERG "Block reservation details\n"); 1873 printk(KERN_EMERG "Block reservation details\n");
1838 printk(KERN_EMERG "i_reserved_data_blocks=%lu\n", 1874 printk(KERN_EMERG "i_reserved_data_blocks=%u\n",
1839 EXT4_I(inode)->i_reserved_data_blocks); 1875 EXT4_I(inode)->i_reserved_data_blocks);
1840 printk(KERN_EMERG "i_reserved_meta_blocks=%lu\n", 1876 printk(KERN_EMERG "i_reserved_meta_blocks=%u\n",
1841 EXT4_I(inode)->i_reserved_meta_blocks); 1877 EXT4_I(inode)->i_reserved_meta_blocks);
1842 return; 1878 return;
1843} 1879}
@@ -2087,11 +2123,29 @@ static int __mpage_da_writepage(struct page *page,
2087 bh = head; 2123 bh = head;
2088 do { 2124 do {
2089 BUG_ON(buffer_locked(bh)); 2125 BUG_ON(buffer_locked(bh));
2126 /*
2127 * We need to try to allocate
2128 * unmapped blocks in the same page.
2129 * Otherwise we won't make progress
2130 * with the page in ext4_da_writepage
2131 */
2090 if (buffer_dirty(bh) && 2132 if (buffer_dirty(bh) &&
2091 (!buffer_mapped(bh) || buffer_delay(bh))) { 2133 (!buffer_mapped(bh) || buffer_delay(bh))) {
2092 mpage_add_bh_to_extent(mpd, logical, bh); 2134 mpage_add_bh_to_extent(mpd, logical, bh);
2093 if (mpd->io_done) 2135 if (mpd->io_done)
2094 return MPAGE_DA_EXTENT_TAIL; 2136 return MPAGE_DA_EXTENT_TAIL;
2137 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2138 /*
2139 * mapped dirty buffer. We need to update
2140 * the b_state because we look at
2141 * b_state in mpage_da_map_blocks. We don't
2142 * update b_size because if we find an
2143 * unmapped buffer_head later we need to
2144 * use the b_state flag of that buffer_head.
2145 */
2146 if (mpd->lbh.b_size == 0)
2147 mpd->lbh.b_state =
2148 bh->b_state & BH_FLAGS;
2095 } 2149 }
2096 logical++; 2150 logical++;
2097 } while ((bh = bh->b_this_page) != head); 2151 } while ((bh = bh->b_this_page) != head);
@@ -2269,10 +2323,13 @@ static int ext4_da_writepage(struct page *page,
2269{ 2323{
2270 int ret = 0; 2324 int ret = 0;
2271 loff_t size; 2325 loff_t size;
2272 unsigned long len; 2326 unsigned int len;
2273 struct buffer_head *page_bufs; 2327 struct buffer_head *page_bufs;
2274 struct inode *inode = page->mapping->host; 2328 struct inode *inode = page->mapping->host;
2275 2329
2330 trace_mark(ext4_da_writepage,
2331 "dev %s ino %lu page_index %lu",
2332 inode->i_sb->s_id, inode->i_ino, page->index);
2276 size = i_size_read(inode); 2333 size = i_size_read(inode);
2277 if (page->index == size >> PAGE_CACHE_SHIFT) 2334 if (page->index == size >> PAGE_CACHE_SHIFT)
2278 len = size & ~PAGE_CACHE_MASK; 2335 len = size & ~PAGE_CACHE_MASK;
@@ -2378,10 +2435,25 @@ static int ext4_da_writepages(struct address_space *mapping,
2378 struct mpage_da_data mpd; 2435 struct mpage_da_data mpd;
2379 struct inode *inode = mapping->host; 2436 struct inode *inode = mapping->host;
2380 int no_nrwrite_index_update; 2437 int no_nrwrite_index_update;
2381 long pages_written = 0, pages_skipped; 2438 int pages_written = 0;
2439 long pages_skipped;
2382 int needed_blocks, ret = 0, nr_to_writebump = 0; 2440 int needed_blocks, ret = 0, nr_to_writebump = 0;
2383 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2441 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2384 2442
2443 trace_mark(ext4_da_writepages,
2444 "dev %s ino %lu nr_t_write %ld "
2445 "pages_skipped %ld range_start %llu "
2446 "range_end %llu nonblocking %d "
2447 "for_kupdate %d for_reclaim %d "
2448 "for_writepages %d range_cyclic %d",
2449 inode->i_sb->s_id, inode->i_ino,
2450 wbc->nr_to_write, wbc->pages_skipped,
2451 (unsigned long long) wbc->range_start,
2452 (unsigned long long) wbc->range_end,
2453 wbc->nonblocking, wbc->for_kupdate,
2454 wbc->for_reclaim, wbc->for_writepages,
2455 wbc->range_cyclic);
2456
2385 /* 2457 /*
2386 * No pages to write? This is mainly a kludge to avoid starting 2458 * No pages to write? This is mainly a kludge to avoid starting
2387 * a transaction for special inodes like journal inode on last iput() 2459 * a transaction for special inodes like journal inode on last iput()
@@ -2389,6 +2461,20 @@ static int ext4_da_writepages(struct address_space *mapping,
2389 */ 2461 */
2390 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 2462 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2391 return 0; 2463 return 0;
2464
2465 /*
2466 * If the filesystem has aborted, it is read-only, so return
2467 * right away instead of dumping stack traces later on that
2468 * will obscure the real source of the problem. We test
2469 * EXT4_MOUNT_ABORT instead of sb->s_flag's MS_RDONLY because
2470 * the latter could be true if the filesystem is mounted
2471 * read-only, and in that case, ext4_da_writepages should
2472 * *never* be called, so if that ever happens, we would want
2473 * the stack trace.
2474 */
2475 if (unlikely(sbi->s_mount_opt & EXT4_MOUNT_ABORT))
2476 return -EROFS;
2477
2392 /* 2478 /*
2393 * Make sure nr_to_write is >= sbi->s_mb_stream_request 2479 * Make sure nr_to_write is >= sbi->s_mb_stream_request
2394 * This make sure small files blocks are allocated in 2480 * This make sure small files blocks are allocated in
@@ -2433,7 +2519,7 @@ static int ext4_da_writepages(struct address_space *mapping,
2433 handle = ext4_journal_start(inode, needed_blocks); 2519 handle = ext4_journal_start(inode, needed_blocks);
2434 if (IS_ERR(handle)) { 2520 if (IS_ERR(handle)) {
2435 ret = PTR_ERR(handle); 2521 ret = PTR_ERR(handle);
2436 printk(KERN_EMERG "%s: jbd2_start: " 2522 printk(KERN_CRIT "%s: jbd2_start: "
2437 "%ld pages, ino %lu; err %d\n", __func__, 2523 "%ld pages, ino %lu; err %d\n", __func__,
2438 wbc->nr_to_write, inode->i_ino, ret); 2524 wbc->nr_to_write, inode->i_ino, ret);
2439 dump_stack(); 2525 dump_stack();
@@ -2486,6 +2572,14 @@ out_writepages:
2486 if (!no_nrwrite_index_update) 2572 if (!no_nrwrite_index_update)
2487 wbc->no_nrwrite_index_update = 0; 2573 wbc->no_nrwrite_index_update = 0;
2488 wbc->nr_to_write -= nr_to_writebump; 2574 wbc->nr_to_write -= nr_to_writebump;
2575 trace_mark(ext4_da_writepage_result,
2576 "dev %s ino %lu ret %d pages_written %d "
2577 "pages_skipped %ld congestion %d "
2578 "more_io %d no_nrwrite_index_update %d",
2579 inode->i_sb->s_id, inode->i_ino, ret,
2580 pages_written, wbc->pages_skipped,
2581 wbc->encountered_congestion, wbc->more_io,
2582 wbc->no_nrwrite_index_update);
2489 return ret; 2583 return ret;
2490} 2584}
2491 2585
@@ -2537,6 +2631,11 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2537 len, flags, pagep, fsdata); 2631 len, flags, pagep, fsdata);
2538 } 2632 }
2539 *fsdata = (void *)0; 2633 *fsdata = (void *)0;
2634
2635 trace_mark(ext4_da_write_begin,
2636 "dev %s ino %lu pos %llu len %u flags %u",
2637 inode->i_sb->s_id, inode->i_ino,
2638 (unsigned long long) pos, len, flags);
2540retry: 2639retry:
2541 /* 2640 /*
2542 * With delayed allocation, we don't log the i_disksize update 2641 * With delayed allocation, we don't log the i_disksize update
@@ -2626,6 +2725,10 @@ static int ext4_da_write_end(struct file *file,
2626 } 2725 }
2627 } 2726 }
2628 2727
2728 trace_mark(ext4_da_write_end,
2729 "dev %s ino %lu pos %llu len %u copied %u",
2730 inode->i_sb->s_id, inode->i_ino,
2731 (unsigned long long) pos, len, copied);
2629 start = pos & (PAGE_CACHE_SIZE - 1); 2732 start = pos & (PAGE_CACHE_SIZE - 1);
2630 end = start + copied - 1; 2733 end = start + copied - 1;
2631 2734
@@ -2718,7 +2821,10 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
2718 filemap_write_and_wait(mapping); 2821 filemap_write_and_wait(mapping);
2719 } 2822 }
2720 2823
2721 if (EXT4_I(inode)->i_state & EXT4_STATE_JDATA) { 2824 BUG_ON(!EXT4_JOURNAL(inode) &&
2825 EXT4_I(inode)->i_state & EXT4_STATE_JDATA);
2826
2827 if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
2722 /* 2828 /*
2723 * This is a REALLY heavyweight approach, but the use of 2829 * This is a REALLY heavyweight approach, but the use of
2724 * bmap on dirty files is expected to be extremely rare: 2830 * bmap on dirty files is expected to be extremely rare:
@@ -2836,6 +2942,9 @@ static int ext4_normal_writepage(struct page *page,
2836 loff_t size = i_size_read(inode); 2942 loff_t size = i_size_read(inode);
2837 loff_t len; 2943 loff_t len;
2838 2944
2945 trace_mark(ext4_normal_writepage,
2946 "dev %s ino %lu page_index %lu",
2947 inode->i_sb->s_id, inode->i_ino, page->index);
2839 J_ASSERT(PageLocked(page)); 2948 J_ASSERT(PageLocked(page));
2840 if (page->index == size >> PAGE_CACHE_SHIFT) 2949 if (page->index == size >> PAGE_CACHE_SHIFT)
2841 len = size & ~PAGE_CACHE_MASK; 2950 len = size & ~PAGE_CACHE_MASK;
@@ -2921,6 +3030,9 @@ static int ext4_journalled_writepage(struct page *page,
2921 loff_t size = i_size_read(inode); 3030 loff_t size = i_size_read(inode);
2922 loff_t len; 3031 loff_t len;
2923 3032
3033 trace_mark(ext4_journalled_writepage,
3034 "dev %s ino %lu page_index %lu",
3035 inode->i_sb->s_id, inode->i_ino, page->index);
2924 J_ASSERT(PageLocked(page)); 3036 J_ASSERT(PageLocked(page));
2925 if (page->index == size >> PAGE_CACHE_SHIFT) 3037 if (page->index == size >> PAGE_CACHE_SHIFT)
2926 len = size & ~PAGE_CACHE_MASK; 3038 len = size & ~PAGE_CACHE_MASK;
@@ -2989,7 +3101,10 @@ static void ext4_invalidatepage(struct page *page, unsigned long offset)
2989 if (offset == 0) 3101 if (offset == 0)
2990 ClearPageChecked(page); 3102 ClearPageChecked(page);
2991 3103
2992 jbd2_journal_invalidatepage(journal, page, offset); 3104 if (journal)
3105 jbd2_journal_invalidatepage(journal, page, offset);
3106 else
3107 block_invalidatepage(page, offset);
2993} 3108}
2994 3109
2995static int ext4_releasepage(struct page *page, gfp_t wait) 3110static int ext4_releasepage(struct page *page, gfp_t wait)
@@ -2999,7 +3114,10 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
2999 WARN_ON(PageChecked(page)); 3114 WARN_ON(PageChecked(page));
3000 if (!page_has_buffers(page)) 3115 if (!page_has_buffers(page))
3001 return 0; 3116 return 0;
3002 return jbd2_journal_try_to_free_buffers(journal, page, wait); 3117 if (journal)
3118 return jbd2_journal_try_to_free_buffers(journal, page, wait);
3119 else
3120 return try_to_free_buffers(page);
3003} 3121}
3004 3122
3005/* 3123/*
@@ -3271,7 +3389,7 @@ int ext4_block_truncate_page(handle_t *handle,
3271 3389
3272 err = 0; 3390 err = 0;
3273 if (ext4_should_journal_data(inode)) { 3391 if (ext4_should_journal_data(inode)) {
3274 err = ext4_journal_dirty_metadata(handle, bh); 3392 err = ext4_handle_dirty_metadata(handle, inode, bh);
3275 } else { 3393 } else {
3276 if (ext4_should_order_data(inode)) 3394 if (ext4_should_order_data(inode))
3277 err = ext4_jbd2_file_inode(handle, inode); 3395 err = ext4_jbd2_file_inode(handle, inode);
@@ -3395,8 +3513,8 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
3395 __le32 *p; 3513 __le32 *p;
3396 if (try_to_extend_transaction(handle, inode)) { 3514 if (try_to_extend_transaction(handle, inode)) {
3397 if (bh) { 3515 if (bh) {
3398 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); 3516 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
3399 ext4_journal_dirty_metadata(handle, bh); 3517 ext4_handle_dirty_metadata(handle, inode, bh);
3400 } 3518 }
3401 ext4_mark_inode_dirty(handle, inode); 3519 ext4_mark_inode_dirty(handle, inode);
3402 ext4_journal_test_restart(handle, inode); 3520 ext4_journal_test_restart(handle, inode);
@@ -3496,7 +3614,7 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
3496 count, block_to_free_p, p); 3614 count, block_to_free_p, p);
3497 3615
3498 if (this_bh) { 3616 if (this_bh) {
3499 BUFFER_TRACE(this_bh, "call ext4_journal_dirty_metadata"); 3617 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
3500 3618
3501 /* 3619 /*
3502 * The buffer head should have an attached journal head at this 3620 * The buffer head should have an attached journal head at this
@@ -3505,7 +3623,7 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
3505 * the block was cleared. Check for this instead of OOPSing. 3623 * the block was cleared. Check for this instead of OOPSing.
3506 */ 3624 */
3507 if (bh2jh(this_bh)) 3625 if (bh2jh(this_bh))
3508 ext4_journal_dirty_metadata(handle, this_bh); 3626 ext4_handle_dirty_metadata(handle, inode, this_bh);
3509 else 3627 else
3510 ext4_error(inode->i_sb, __func__, 3628 ext4_error(inode->i_sb, __func__,
3511 "circular indirect block detected, " 3629 "circular indirect block detected, "
@@ -3535,7 +3653,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
3535 ext4_fsblk_t nr; 3653 ext4_fsblk_t nr;
3536 __le32 *p; 3654 __le32 *p;
3537 3655
3538 if (is_handle_aborted(handle)) 3656 if (ext4_handle_is_aborted(handle))
3539 return; 3657 return;
3540 3658
3541 if (depth--) { 3659 if (depth--) {
@@ -3605,7 +3723,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
3605 * will merely complain about releasing a free block, 3723 * will merely complain about releasing a free block,
3606 * rather than leaking blocks. 3724 * rather than leaking blocks.
3607 */ 3725 */
3608 if (is_handle_aborted(handle)) 3726 if (ext4_handle_is_aborted(handle))
3609 return; 3727 return;
3610 if (try_to_extend_transaction(handle, inode)) { 3728 if (try_to_extend_transaction(handle, inode)) {
3611 ext4_mark_inode_dirty(handle, inode); 3729 ext4_mark_inode_dirty(handle, inode);
@@ -3624,9 +3742,10 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
3624 parent_bh)){ 3742 parent_bh)){
3625 *p = 0; 3743 *p = 0;
3626 BUFFER_TRACE(parent_bh, 3744 BUFFER_TRACE(parent_bh,
3627 "call ext4_journal_dirty_metadata"); 3745 "call ext4_handle_dirty_metadata");
3628 ext4_journal_dirty_metadata(handle, 3746 ext4_handle_dirty_metadata(handle,
3629 parent_bh); 3747 inode,
3748 parent_bh);
3630 } 3749 }
3631 } 3750 }
3632 } 3751 }
@@ -3814,7 +3933,7 @@ do_indirects:
3814 * synchronous 3933 * synchronous
3815 */ 3934 */
3816 if (IS_SYNC(inode)) 3935 if (IS_SYNC(inode))
3817 handle->h_sync = 1; 3936 ext4_handle_sync(handle);
3818out_stop: 3937out_stop:
3819 /* 3938 /*
3820 * If this was a simple ftruncate(), and the file will remain alive 3939 * If this was a simple ftruncate(), and the file will remain alive
@@ -3844,7 +3963,7 @@ static int __ext4_get_inode_loc(struct inode *inode,
3844 ext4_fsblk_t block; 3963 ext4_fsblk_t block;
3845 int inodes_per_block, inode_offset; 3964 int inodes_per_block, inode_offset;
3846 3965
3847 iloc->bh = 0; 3966 iloc->bh = NULL;
3848 if (!ext4_valid_inum(sb, inode->i_ino)) 3967 if (!ext4_valid_inum(sb, inode->i_ino))
3849 return -EIO; 3968 return -EIO;
3850 3969
@@ -3951,7 +4070,7 @@ make_io:
3951 num = EXT4_INODES_PER_GROUP(sb); 4070 num = EXT4_INODES_PER_GROUP(sb);
3952 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 4071 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3953 EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) 4072 EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
3954 num -= le16_to_cpu(gdp->bg_itable_unused); 4073 num -= ext4_itable_unused_count(sb, gdp);
3955 table += num / inodes_per_block; 4074 table += num / inodes_per_block;
3956 if (end > table) 4075 if (end > table)
3957 end = table; 4076 end = table;
@@ -4313,8 +4432,8 @@ static int ext4_do_update_inode(handle_t *handle,
4313 EXT4_SET_RO_COMPAT_FEATURE(sb, 4432 EXT4_SET_RO_COMPAT_FEATURE(sb,
4314 EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 4433 EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
4315 sb->s_dirt = 1; 4434 sb->s_dirt = 1;
4316 handle->h_sync = 1; 4435 ext4_handle_sync(handle);
4317 err = ext4_journal_dirty_metadata(handle, 4436 err = ext4_handle_dirty_metadata(handle, inode,
4318 EXT4_SB(sb)->s_sbh); 4437 EXT4_SB(sb)->s_sbh);
4319 } 4438 }
4320 } 4439 }
@@ -4341,9 +4460,8 @@ static int ext4_do_update_inode(handle_t *handle,
4341 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 4460 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
4342 } 4461 }
4343 4462
4344 4463 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4345 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); 4464 rc = ext4_handle_dirty_metadata(handle, inode, bh);
4346 rc = ext4_journal_dirty_metadata(handle, bh);
4347 if (!err) 4465 if (!err)
4348 err = rc; 4466 err = rc;
4349 ei->i_state &= ~EXT4_STATE_NEW; 4467 ei->i_state &= ~EXT4_STATE_NEW;
@@ -4406,6 +4524,25 @@ int ext4_write_inode(struct inode *inode, int wait)
4406 return ext4_force_commit(inode->i_sb); 4524 return ext4_force_commit(inode->i_sb);
4407} 4525}
4408 4526
4527int __ext4_write_dirty_metadata(struct inode *inode, struct buffer_head *bh)
4528{
4529 int err = 0;
4530
4531 mark_buffer_dirty(bh);
4532 if (inode && inode_needs_sync(inode)) {
4533 sync_dirty_buffer(bh);
4534 if (buffer_req(bh) && !buffer_uptodate(bh)) {
4535 ext4_error(inode->i_sb, __func__,
4536 "IO error syncing inode, "
4537 "inode=%lu, block=%llu",
4538 inode->i_ino,
4539 (unsigned long long)bh->b_blocknr);
4540 err = -EIO;
4541 }
4542 }
4543 return err;
4544}
4545
4409/* 4546/*
4410 * ext4_setattr() 4547 * ext4_setattr()
4411 * 4548 *
@@ -4710,16 +4847,15 @@ int
4710ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 4847ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
4711 struct ext4_iloc *iloc) 4848 struct ext4_iloc *iloc)
4712{ 4849{
4713 int err = 0; 4850 int err;
4714 if (handle) { 4851
4715 err = ext4_get_inode_loc(inode, iloc); 4852 err = ext4_get_inode_loc(inode, iloc);
4716 if (!err) { 4853 if (!err) {
4717 BUFFER_TRACE(iloc->bh, "get_write_access"); 4854 BUFFER_TRACE(iloc->bh, "get_write_access");
4718 err = ext4_journal_get_write_access(handle, iloc->bh); 4855 err = ext4_journal_get_write_access(handle, iloc->bh);
4719 if (err) { 4856 if (err) {
4720 brelse(iloc->bh); 4857 brelse(iloc->bh);
4721 iloc->bh = NULL; 4858 iloc->bh = NULL;
4722 }
4723 } 4859 }
4724 } 4860 }
4725 ext4_std_error(inode->i_sb, err); 4861 ext4_std_error(inode->i_sb, err);
@@ -4791,7 +4927,8 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4791 4927
4792 might_sleep(); 4928 might_sleep();
4793 err = ext4_reserve_inode_write(handle, inode, &iloc); 4929 err = ext4_reserve_inode_write(handle, inode, &iloc);
4794 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 4930 if (ext4_handle_valid(handle) &&
4931 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
4795 !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) { 4932 !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
4796 /* 4933 /*
4797 * We need extra buffer credits since we may write into EA block 4934 * We need extra buffer credits since we may write into EA block
@@ -4843,6 +4980,11 @@ void ext4_dirty_inode(struct inode *inode)
4843 handle_t *current_handle = ext4_journal_current_handle(); 4980 handle_t *current_handle = ext4_journal_current_handle();
4844 handle_t *handle; 4981 handle_t *handle;
4845 4982
4983 if (!ext4_handle_valid(current_handle)) {
4984 ext4_mark_inode_dirty(current_handle, inode);
4985 return;
4986 }
4987
4846 handle = ext4_journal_start(inode, 2); 4988 handle = ext4_journal_start(inode, 2);
4847 if (IS_ERR(handle)) 4989 if (IS_ERR(handle))
4848 goto out; 4990 goto out;
@@ -4880,8 +5022,9 @@ static int ext4_pin_inode(handle_t *handle, struct inode *inode)
4880 BUFFER_TRACE(iloc.bh, "get_write_access"); 5022 BUFFER_TRACE(iloc.bh, "get_write_access");
4881 err = jbd2_journal_get_write_access(handle, iloc.bh); 5023 err = jbd2_journal_get_write_access(handle, iloc.bh);
4882 if (!err) 5024 if (!err)
4883 err = ext4_journal_dirty_metadata(handle, 5025 err = ext4_handle_dirty_metadata(handle,
4884 iloc.bh); 5026 inode,
5027 iloc.bh);
4885 brelse(iloc.bh); 5028 brelse(iloc.bh);
4886 } 5029 }
4887 } 5030 }
@@ -4907,6 +5050,8 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
4907 */ 5050 */
4908 5051
4909 journal = EXT4_JOURNAL(inode); 5052 journal = EXT4_JOURNAL(inode);
5053 if (!journal)
5054 return 0;
4910 if (is_journal_aborted(journal)) 5055 if (is_journal_aborted(journal))
4911 return -EROFS; 5056 return -EROFS;
4912 5057
@@ -4936,7 +5081,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
4936 return PTR_ERR(handle); 5081 return PTR_ERR(handle);
4937 5082
4938 err = ext4_mark_inode_dirty(handle, inode); 5083 err = ext4_mark_inode_dirty(handle, inode);
4939 handle->h_sync = 1; 5084 ext4_handle_sync(handle);
4940 ext4_journal_stop(handle); 5085 ext4_journal_stop(handle);
4941 ext4_std_error(inode->i_sb, err); 5086 ext4_std_error(inode->i_sb, err);
4942 5087
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index dc99b4776d58..42dc83fb247a 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -99,7 +99,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
99 goto flags_out; 99 goto flags_out;
100 } 100 }
101 if (IS_SYNC(inode)) 101 if (IS_SYNC(inode))
102 handle->h_sync = 1; 102 ext4_handle_sync(handle);
103 err = ext4_reserve_inode_write(handle, inode, &iloc); 103 err = ext4_reserve_inode_write(handle, inode, &iloc);
104 if (err) 104 if (err)
105 goto flags_err; 105 goto flags_err;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 444ad998f72e..918aec0c8a11 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -100,7 +100,7 @@
100 * inode as: 100 * inode as:
101 * 101 *
102 * { page } 102 * { page }
103 * [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]... 103 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
104 * 104 *
105 * 105 *
106 * one block each for bitmap and buddy information. So for each group we 106 * one block each for bitmap and buddy information. So for each group we
@@ -330,6 +330,18 @@
330 * object 330 * object
331 * 331 *
332 */ 332 */
333static struct kmem_cache *ext4_pspace_cachep;
334static struct kmem_cache *ext4_ac_cachep;
335static struct kmem_cache *ext4_free_ext_cachep;
336static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
337 ext4_group_t group);
338static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
339 ext4_group_t group);
340static int ext4_mb_init_per_dev_proc(struct super_block *sb);
341static int ext4_mb_destroy_per_dev_proc(struct super_block *sb);
342static void release_blocks_on_commit(journal_t *journal, transaction_t *txn);
343
344
333 345
334static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 346static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
335{ 347{
@@ -445,9 +457,9 @@ static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
445 blocknr += first + i; 457 blocknr += first + i;
446 blocknr += 458 blocknr +=
447 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 459 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
448 460 ext4_grp_locked_error(sb, e4b->bd_group,
449 ext4_error(sb, __func__, "double-free of inode" 461 __func__, "double-free of inode"
450 " %lu's block %llu(bit %u in group %lu)\n", 462 " %lu's block %llu(bit %u in group %u)",
451 inode ? inode->i_ino : 0, blocknr, 463 inode ? inode->i_ino : 0, blocknr,
452 first + i, e4b->bd_group); 464 first + i, e4b->bd_group);
453 } 465 }
@@ -477,7 +489,7 @@ static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
477 b2 = (unsigned char *) bitmap; 489 b2 = (unsigned char *) bitmap;
478 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 490 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
479 if (b1[i] != b2[i]) { 491 if (b1[i] != b2[i]) {
480 printk(KERN_ERR "corruption in group %lu " 492 printk(KERN_ERR "corruption in group %u "
481 "at byte %u(%u): %x in copy != %x " 493 "at byte %u(%u): %x in copy != %x "
482 "on disk/prealloc\n", 494 "on disk/prealloc\n",
483 e4b->bd_group, i, i * 8, b1[i], b2[i]); 495 e4b->bd_group, i, i * 8, b1[i], b2[i]);
@@ -690,8 +702,8 @@ static void ext4_mb_generate_buddy(struct super_block *sb,
690 grp->bb_fragments = fragments; 702 grp->bb_fragments = fragments;
691 703
692 if (free != grp->bb_free) { 704 if (free != grp->bb_free) {
693 ext4_error(sb, __func__, 705 ext4_grp_locked_error(sb, group, __func__,
694 "EXT4-fs: group %lu: %u blocks in bitmap, %u in gd\n", 706 "EXT4-fs: group %u: %u blocks in bitmap, %u in gd",
695 group, free, grp->bb_free); 707 group, free, grp->bb_free);
696 /* 708 /*
697 * If we intent to continue, we consider group descritor 709 * If we intent to continue, we consider group descritor
@@ -716,7 +728,7 @@ static void ext4_mb_generate_buddy(struct super_block *sb,
716 * stored in the inode as 728 * stored in the inode as
717 * 729 *
718 * { page } 730 * { page }
719 * [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]... 731 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
720 * 732 *
721 * 733 *
722 * one block each for bitmap and buddy information. 734 * one block each for bitmap and buddy information.
@@ -782,25 +794,45 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
782 if (bh[i] == NULL) 794 if (bh[i] == NULL)
783 goto out; 795 goto out;
784 796
785 if (buffer_uptodate(bh[i]) && 797 if (bitmap_uptodate(bh[i]))
786 !(desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))
787 continue; 798 continue;
788 799
789 lock_buffer(bh[i]); 800 lock_buffer(bh[i]);
801 if (bitmap_uptodate(bh[i])) {
802 unlock_buffer(bh[i]);
803 continue;
804 }
790 spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i)); 805 spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
791 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 806 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
792 ext4_init_block_bitmap(sb, bh[i], 807 ext4_init_block_bitmap(sb, bh[i],
793 first_group + i, desc); 808 first_group + i, desc);
809 set_bitmap_uptodate(bh[i]);
794 set_buffer_uptodate(bh[i]); 810 set_buffer_uptodate(bh[i]);
795 unlock_buffer(bh[i]);
796 spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i)); 811 spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
812 unlock_buffer(bh[i]);
797 continue; 813 continue;
798 } 814 }
799 spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i)); 815 spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
816 if (buffer_uptodate(bh[i])) {
817 /*
818 * if not uninit if bh is uptodate,
819 * bitmap is also uptodate
820 */
821 set_bitmap_uptodate(bh[i]);
822 unlock_buffer(bh[i]);
823 continue;
824 }
800 get_bh(bh[i]); 825 get_bh(bh[i]);
826 /*
827 * submit the buffer_head for read. We can
828 * safely mark the bitmap as uptodate now.
829 * We do it here so the bitmap uptodate bit
830 * get set with buffer lock held.
831 */
832 set_bitmap_uptodate(bh[i]);
801 bh[i]->b_end_io = end_buffer_read_sync; 833 bh[i]->b_end_io = end_buffer_read_sync;
802 submit_bh(READ, bh[i]); 834 submit_bh(READ, bh[i]);
803 mb_debug("read bitmap for group %lu\n", first_group + i); 835 mb_debug("read bitmap for group %u\n", first_group + i);
804 } 836 }
805 837
806 /* wait for I/O completion */ 838 /* wait for I/O completion */
@@ -814,6 +846,8 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
814 846
815 err = 0; 847 err = 0;
816 first_block = page->index * blocks_per_page; 848 first_block = page->index * blocks_per_page;
849 /* init the page */
850 memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
817 for (i = 0; i < blocks_per_page; i++) { 851 for (i = 0; i < blocks_per_page; i++) {
818 int group; 852 int group;
819 struct ext4_group_info *grinfo; 853 struct ext4_group_info *grinfo;
@@ -840,7 +874,6 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
840 BUG_ON(incore == NULL); 874 BUG_ON(incore == NULL);
841 mb_debug("put buddy for group %u in page %lu/%x\n", 875 mb_debug("put buddy for group %u in page %lu/%x\n",
842 group, page->index, i * blocksize); 876 group, page->index, i * blocksize);
843 memset(data, 0xff, blocksize);
844 grinfo = ext4_get_group_info(sb, group); 877 grinfo = ext4_get_group_info(sb, group);
845 grinfo->bb_fragments = 0; 878 grinfo->bb_fragments = 0;
846 memset(grinfo->bb_counters, 0, 879 memset(grinfo->bb_counters, 0,
@@ -848,7 +881,9 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
848 /* 881 /*
849 * incore got set to the group block bitmap below 882 * incore got set to the group block bitmap below
850 */ 883 */
884 ext4_lock_group(sb, group);
851 ext4_mb_generate_buddy(sb, data, incore, group); 885 ext4_mb_generate_buddy(sb, data, incore, group);
886 ext4_unlock_group(sb, group);
852 incore = NULL; 887 incore = NULL;
853 } else { 888 } else {
854 /* this is block of bitmap */ 889 /* this is block of bitmap */
@@ -862,6 +897,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
862 897
863 /* mark all preallocated blks used in in-core bitmap */ 898 /* mark all preallocated blks used in in-core bitmap */
864 ext4_mb_generate_from_pa(sb, data, group); 899 ext4_mb_generate_from_pa(sb, data, group);
900 ext4_mb_generate_from_freelist(sb, data, group);
865 ext4_unlock_group(sb, group); 901 ext4_unlock_group(sb, group);
866 902
867 /* set incore so that the buddy information can be 903 /* set incore so that the buddy information can be
@@ -886,18 +922,20 @@ static noinline_for_stack int
886ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 922ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
887 struct ext4_buddy *e4b) 923 struct ext4_buddy *e4b)
888{ 924{
889 struct ext4_sb_info *sbi = EXT4_SB(sb);
890 struct inode *inode = sbi->s_buddy_cache;
891 int blocks_per_page; 925 int blocks_per_page;
892 int block; 926 int block;
893 int pnum; 927 int pnum;
894 int poff; 928 int poff;
895 struct page *page; 929 struct page *page;
896 int ret; 930 int ret;
931 struct ext4_group_info *grp;
932 struct ext4_sb_info *sbi = EXT4_SB(sb);
933 struct inode *inode = sbi->s_buddy_cache;
897 934
898 mb_debug("load group %lu\n", group); 935 mb_debug("load group %u\n", group);
899 936
900 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 937 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
938 grp = ext4_get_group_info(sb, group);
901 939
902 e4b->bd_blkbits = sb->s_blocksize_bits; 940 e4b->bd_blkbits = sb->s_blocksize_bits;
903 e4b->bd_info = ext4_get_group_info(sb, group); 941 e4b->bd_info = ext4_get_group_info(sb, group);
@@ -905,6 +943,15 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
905 e4b->bd_group = group; 943 e4b->bd_group = group;
906 e4b->bd_buddy_page = NULL; 944 e4b->bd_buddy_page = NULL;
907 e4b->bd_bitmap_page = NULL; 945 e4b->bd_bitmap_page = NULL;
946 e4b->alloc_semp = &grp->alloc_sem;
947
948 /* Take the read lock on the group alloc
949 * sem. This would make sure a parallel
950 * ext4_mb_init_group happening on other
951 * groups mapped by the page is blocked
952 * till we are done with allocation
953 */
954 down_read(e4b->alloc_semp);
908 955
909 /* 956 /*
910 * the buddy cache inode stores the block bitmap 957 * the buddy cache inode stores the block bitmap
@@ -920,6 +967,14 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
920 page = find_get_page(inode->i_mapping, pnum); 967 page = find_get_page(inode->i_mapping, pnum);
921 if (page == NULL || !PageUptodate(page)) { 968 if (page == NULL || !PageUptodate(page)) {
922 if (page) 969 if (page)
970 /*
971 * drop the page reference and try
972 * to get the page with lock. If we
973 * are not uptodate that implies
974 * somebody just created the page but
975 * is yet to initialize the same. So
976 * wait for it to initialize.
977 */
923 page_cache_release(page); 978 page_cache_release(page);
924 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 979 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
925 if (page) { 980 if (page) {
@@ -985,6 +1040,9 @@ err:
985 page_cache_release(e4b->bd_buddy_page); 1040 page_cache_release(e4b->bd_buddy_page);
986 e4b->bd_buddy = NULL; 1041 e4b->bd_buddy = NULL;
987 e4b->bd_bitmap = NULL; 1042 e4b->bd_bitmap = NULL;
1043
1044 /* Done with the buddy cache */
1045 up_read(e4b->alloc_semp);
988 return ret; 1046 return ret;
989} 1047}
990 1048
@@ -994,6 +1052,9 @@ static void ext4_mb_release_desc(struct ext4_buddy *e4b)
994 page_cache_release(e4b->bd_bitmap_page); 1052 page_cache_release(e4b->bd_bitmap_page);
995 if (e4b->bd_buddy_page) 1053 if (e4b->bd_buddy_page)
996 page_cache_release(e4b->bd_buddy_page); 1054 page_cache_release(e4b->bd_buddy_page);
1055 /* Done with the buddy cache */
1056 if (e4b->alloc_semp)
1057 up_read(e4b->alloc_semp);
997} 1058}
998 1059
999 1060
@@ -1031,7 +1092,10 @@ static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
1031 cur += 32; 1092 cur += 32;
1032 continue; 1093 continue;
1033 } 1094 }
1034 mb_clear_bit_atomic(lock, cur, bm); 1095 if (lock)
1096 mb_clear_bit_atomic(lock, cur, bm);
1097 else
1098 mb_clear_bit(cur, bm);
1035 cur++; 1099 cur++;
1036 } 1100 }
1037} 1101}
@@ -1049,7 +1113,10 @@ static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
1049 cur += 32; 1113 cur += 32;
1050 continue; 1114 continue;
1051 } 1115 }
1052 mb_set_bit_atomic(lock, cur, bm); 1116 if (lock)
1117 mb_set_bit_atomic(lock, cur, bm);
1118 else
1119 mb_set_bit(cur, bm);
1053 cur++; 1120 cur++;
1054 } 1121 }
1055} 1122}
@@ -1094,12 +1161,11 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1094 blocknr += block; 1161 blocknr += block;
1095 blocknr += 1162 blocknr +=
1096 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 1163 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
1097 ext4_unlock_group(sb, e4b->bd_group); 1164 ext4_grp_locked_error(sb, e4b->bd_group,
1098 ext4_error(sb, __func__, "double-free of inode" 1165 __func__, "double-free of inode"
1099 " %lu's block %llu(bit %u in group %lu)\n", 1166 " %lu's block %llu(bit %u in group %u)",
1100 inode ? inode->i_ino : 0, blocknr, block, 1167 inode ? inode->i_ino : 0, blocknr, block,
1101 e4b->bd_group); 1168 e4b->bd_group);
1102 ext4_lock_group(sb, e4b->bd_group);
1103 } 1169 }
1104 mb_clear_bit(block, EXT4_MB_BITMAP(e4b)); 1170 mb_clear_bit(block, EXT4_MB_BITMAP(e4b));
1105 e4b->bd_info->bb_counters[order]++; 1171 e4b->bd_info->bb_counters[order]++;
@@ -1296,13 +1362,20 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1296 ac->ac_tail = ret & 0xffff; 1362 ac->ac_tail = ret & 0xffff;
1297 ac->ac_buddy = ret >> 16; 1363 ac->ac_buddy = ret >> 16;
1298 1364
1299 /* XXXXXXX: SUCH A HORRIBLE **CK */ 1365 /*
1300 /*FIXME!! Why ? */ 1366 * take the page reference. We want the page to be pinned
1367 * so that we don't get a ext4_mb_init_cache_call for this
1368 * group until we update the bitmap. That would mean we
1369 * double allocate blocks. The reference is dropped
1370 * in ext4_mb_release_context
1371 */
1301 ac->ac_bitmap_page = e4b->bd_bitmap_page; 1372 ac->ac_bitmap_page = e4b->bd_bitmap_page;
1302 get_page(ac->ac_bitmap_page); 1373 get_page(ac->ac_bitmap_page);
1303 ac->ac_buddy_page = e4b->bd_buddy_page; 1374 ac->ac_buddy_page = e4b->bd_buddy_page;
1304 get_page(ac->ac_buddy_page); 1375 get_page(ac->ac_buddy_page);
1305 1376 /* on allocation we use ac to track the held semaphore */
1377 ac->alloc_semp = e4b->alloc_semp;
1378 e4b->alloc_semp = NULL;
1306 /* store last allocated for subsequent stream allocation */ 1379 /* store last allocated for subsequent stream allocation */
1307 if ((ac->ac_flags & EXT4_MB_HINT_DATA)) { 1380 if ((ac->ac_flags & EXT4_MB_HINT_DATA)) {
1308 spin_lock(&sbi->s_md_lock); 1381 spin_lock(&sbi->s_md_lock);
@@ -1326,6 +1399,8 @@ static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1326 struct ext4_free_extent ex; 1399 struct ext4_free_extent ex;
1327 int max; 1400 int max;
1328 1401
1402 if (ac->ac_status == AC_STATUS_FOUND)
1403 return;
1329 /* 1404 /*
1330 * We don't want to scan for a whole year 1405 * We don't want to scan for a whole year
1331 */ 1406 */
@@ -1575,8 +1650,9 @@ static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1575 * free blocks even though group info says we 1650 * free blocks even though group info says we
1576 * we have free blocks 1651 * we have free blocks
1577 */ 1652 */
1578 ext4_error(sb, __func__, "%d free blocks as per " 1653 ext4_grp_locked_error(sb, e4b->bd_group,
1579 "group info. But bitmap says 0\n", 1654 __func__, "%d free blocks as per "
1655 "group info. But bitmap says 0",
1580 free); 1656 free);
1581 break; 1657 break;
1582 } 1658 }
@@ -1584,8 +1660,9 @@ static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1584 mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex); 1660 mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
1585 BUG_ON(ex.fe_len <= 0); 1661 BUG_ON(ex.fe_len <= 0);
1586 if (free < ex.fe_len) { 1662 if (free < ex.fe_len) {
1587 ext4_error(sb, __func__, "%d free blocks as per " 1663 ext4_grp_locked_error(sb, e4b->bd_group,
1588 "group info. But got %d blocks\n", 1664 __func__, "%d free blocks as per "
1665 "group info. But got %d blocks",
1589 free, ex.fe_len); 1666 free, ex.fe_len);
1590 /* 1667 /*
1591 * The number of free blocks differs. This mostly 1668 * The number of free blocks differs. This mostly
@@ -1692,6 +1769,173 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1692 return 0; 1769 return 0;
1693} 1770}
1694 1771
1772/*
1773 * lock the group_info alloc_sem of all the groups
1774 * belonging to the same buddy cache page. This
1775 * make sure other parallel operation on the buddy
1776 * cache doesn't happen whild holding the buddy cache
1777 * lock
1778 */
1779int ext4_mb_get_buddy_cache_lock(struct super_block *sb, ext4_group_t group)
1780{
1781 int i;
1782 int block, pnum;
1783 int blocks_per_page;
1784 int groups_per_page;
1785 ext4_group_t first_group;
1786 struct ext4_group_info *grp;
1787
1788 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1789 /*
1790 * the buddy cache inode stores the block bitmap
1791 * and buddy information in consecutive blocks.
1792 * So for each group we need two blocks.
1793 */
1794 block = group * 2;
1795 pnum = block / blocks_per_page;
1796 first_group = pnum * blocks_per_page / 2;
1797
1798 groups_per_page = blocks_per_page >> 1;
1799 if (groups_per_page == 0)
1800 groups_per_page = 1;
1801 /* read all groups the page covers into the cache */
1802 for (i = 0; i < groups_per_page; i++) {
1803
1804 if ((first_group + i) >= EXT4_SB(sb)->s_groups_count)
1805 break;
1806 grp = ext4_get_group_info(sb, first_group + i);
1807 /* take all groups write allocation
1808 * semaphore. This make sure there is
1809 * no block allocation going on in any
1810 * of that groups
1811 */
1812 down_write_nested(&grp->alloc_sem, i);
1813 }
1814 return i;
1815}
1816
1817void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
1818 ext4_group_t group, int locked_group)
1819{
1820 int i;
1821 int block, pnum;
1822 int blocks_per_page;
1823 ext4_group_t first_group;
1824 struct ext4_group_info *grp;
1825
1826 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1827 /*
1828 * the buddy cache inode stores the block bitmap
1829 * and buddy information in consecutive blocks.
1830 * So for each group we need two blocks.
1831 */
1832 block = group * 2;
1833 pnum = block / blocks_per_page;
1834 first_group = pnum * blocks_per_page / 2;
1835 /* release locks on all the groups */
1836 for (i = 0; i < locked_group; i++) {
1837
1838 grp = ext4_get_group_info(sb, first_group + i);
1839 /* take all groups write allocation
1840 * semaphore. This make sure there is
1841 * no block allocation going on in any
1842 * of that groups
1843 */
1844 up_write(&grp->alloc_sem);
1845 }
1846
1847}
1848
1849static int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
1850{
1851
1852 int ret;
1853 void *bitmap;
1854 int blocks_per_page;
1855 int block, pnum, poff;
1856 int num_grp_locked = 0;
1857 struct ext4_group_info *this_grp;
1858 struct ext4_sb_info *sbi = EXT4_SB(sb);
1859 struct inode *inode = sbi->s_buddy_cache;
1860 struct page *page = NULL, *bitmap_page = NULL;
1861
1862 mb_debug("init group %lu\n", group);
1863 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1864 this_grp = ext4_get_group_info(sb, group);
1865 /*
1866 * This ensures we don't add group
1867 * to this buddy cache via resize
1868 */
1869 num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, group);
1870 if (!EXT4_MB_GRP_NEED_INIT(this_grp)) {
1871 /*
1872 * somebody initialized the group
1873 * return without doing anything
1874 */
1875 ret = 0;
1876 goto err;
1877 }
1878 /*
1879 * the buddy cache inode stores the block bitmap
1880 * and buddy information in consecutive blocks.
1881 * So for each group we need two blocks.
1882 */
1883 block = group * 2;
1884 pnum = block / blocks_per_page;
1885 poff = block % blocks_per_page;
1886 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1887 if (page) {
1888 BUG_ON(page->mapping != inode->i_mapping);
1889 ret = ext4_mb_init_cache(page, NULL);
1890 if (ret) {
1891 unlock_page(page);
1892 goto err;
1893 }
1894 unlock_page(page);
1895 }
1896 if (page == NULL || !PageUptodate(page)) {
1897 ret = -EIO;
1898 goto err;
1899 }
1900 mark_page_accessed(page);
1901 bitmap_page = page;
1902 bitmap = page_address(page) + (poff * sb->s_blocksize);
1903
1904 /* init buddy cache */
1905 block++;
1906 pnum = block / blocks_per_page;
1907 poff = block % blocks_per_page;
1908 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1909 if (page == bitmap_page) {
1910 /*
1911 * If both the bitmap and buddy are in
1912 * the same page we don't need to force
1913 * init the buddy
1914 */
1915 unlock_page(page);
1916 } else if (page) {
1917 BUG_ON(page->mapping != inode->i_mapping);
1918 ret = ext4_mb_init_cache(page, bitmap);
1919 if (ret) {
1920 unlock_page(page);
1921 goto err;
1922 }
1923 unlock_page(page);
1924 }
1925 if (page == NULL || !PageUptodate(page)) {
1926 ret = -EIO;
1927 goto err;
1928 }
1929 mark_page_accessed(page);
1930err:
1931 ext4_mb_put_buddy_cache_lock(sb, group, num_grp_locked);
1932 if (bitmap_page)
1933 page_cache_release(bitmap_page);
1934 if (page)
1935 page_cache_release(page);
1936 return ret;
1937}
1938
1695static noinline_for_stack int 1939static noinline_for_stack int
1696ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 1940ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
1697{ 1941{
@@ -1775,7 +2019,7 @@ repeat:
1775 group = 0; 2019 group = 0;
1776 2020
1777 /* quick check to skip empty groups */ 2021 /* quick check to skip empty groups */
1778 grp = ext4_get_group_info(ac->ac_sb, group); 2022 grp = ext4_get_group_info(sb, group);
1779 if (grp->bb_free == 0) 2023 if (grp->bb_free == 0)
1780 continue; 2024 continue;
1781 2025
@@ -1788,10 +2032,9 @@ repeat:
1788 * we need full data about the group 2032 * we need full data about the group
1789 * to make a good selection 2033 * to make a good selection
1790 */ 2034 */
1791 err = ext4_mb_load_buddy(sb, group, &e4b); 2035 err = ext4_mb_init_group(sb, group);
1792 if (err) 2036 if (err)
1793 goto out; 2037 goto out;
1794 ext4_mb_release_desc(&e4b);
1795 } 2038 }
1796 2039
1797 /* 2040 /*
@@ -1932,13 +2175,13 @@ static int ext4_mb_seq_history_show(struct seq_file *seq, void *v)
1932 if (hs->op == EXT4_MB_HISTORY_ALLOC) { 2175 if (hs->op == EXT4_MB_HISTORY_ALLOC) {
1933 fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u " 2176 fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u "
1934 "%-5u %-5s %-5u %-6u\n"; 2177 "%-5u %-5s %-5u %-6u\n";
1935 sprintf(buf2, "%lu/%d/%u@%u", hs->result.fe_group, 2178 sprintf(buf2, "%u/%d/%u@%u", hs->result.fe_group,
1936 hs->result.fe_start, hs->result.fe_len, 2179 hs->result.fe_start, hs->result.fe_len,
1937 hs->result.fe_logical); 2180 hs->result.fe_logical);
1938 sprintf(buf, "%lu/%d/%u@%u", hs->orig.fe_group, 2181 sprintf(buf, "%u/%d/%u@%u", hs->orig.fe_group,
1939 hs->orig.fe_start, hs->orig.fe_len, 2182 hs->orig.fe_start, hs->orig.fe_len,
1940 hs->orig.fe_logical); 2183 hs->orig.fe_logical);
1941 sprintf(buf3, "%lu/%d/%u@%u", hs->goal.fe_group, 2184 sprintf(buf3, "%u/%d/%u@%u", hs->goal.fe_group,
1942 hs->goal.fe_start, hs->goal.fe_len, 2185 hs->goal.fe_start, hs->goal.fe_len,
1943 hs->goal.fe_logical); 2186 hs->goal.fe_logical);
1944 seq_printf(seq, fmt, hs->pid, hs->ino, buf, buf3, buf2, 2187 seq_printf(seq, fmt, hs->pid, hs->ino, buf, buf3, buf2,
@@ -1947,20 +2190,20 @@ static int ext4_mb_seq_history_show(struct seq_file *seq, void *v)
1947 hs->buddy ? 1 << hs->buddy : 0); 2190 hs->buddy ? 1 << hs->buddy : 0);
1948 } else if (hs->op == EXT4_MB_HISTORY_PREALLOC) { 2191 } else if (hs->op == EXT4_MB_HISTORY_PREALLOC) {
1949 fmt = "%-5u %-8u %-23s %-23s %-23s\n"; 2192 fmt = "%-5u %-8u %-23s %-23s %-23s\n";
1950 sprintf(buf2, "%lu/%d/%u@%u", hs->result.fe_group, 2193 sprintf(buf2, "%u/%d/%u@%u", hs->result.fe_group,
1951 hs->result.fe_start, hs->result.fe_len, 2194 hs->result.fe_start, hs->result.fe_len,
1952 hs->result.fe_logical); 2195 hs->result.fe_logical);
1953 sprintf(buf, "%lu/%d/%u@%u", hs->orig.fe_group, 2196 sprintf(buf, "%u/%d/%u@%u", hs->orig.fe_group,
1954 hs->orig.fe_start, hs->orig.fe_len, 2197 hs->orig.fe_start, hs->orig.fe_len,
1955 hs->orig.fe_logical); 2198 hs->orig.fe_logical);
1956 seq_printf(seq, fmt, hs->pid, hs->ino, buf, "", buf2); 2199 seq_printf(seq, fmt, hs->pid, hs->ino, buf, "", buf2);
1957 } else if (hs->op == EXT4_MB_HISTORY_DISCARD) { 2200 } else if (hs->op == EXT4_MB_HISTORY_DISCARD) {
1958 sprintf(buf2, "%lu/%d/%u", hs->result.fe_group, 2201 sprintf(buf2, "%u/%d/%u", hs->result.fe_group,
1959 hs->result.fe_start, hs->result.fe_len); 2202 hs->result.fe_start, hs->result.fe_len);
1960 seq_printf(seq, "%-5u %-8u %-23s discard\n", 2203 seq_printf(seq, "%-5u %-8u %-23s discard\n",
1961 hs->pid, hs->ino, buf2); 2204 hs->pid, hs->ino, buf2);
1962 } else if (hs->op == EXT4_MB_HISTORY_FREE) { 2205 } else if (hs->op == EXT4_MB_HISTORY_FREE) {
1963 sprintf(buf2, "%lu/%d/%u", hs->result.fe_group, 2206 sprintf(buf2, "%u/%d/%u", hs->result.fe_group,
1964 hs->result.fe_start, hs->result.fe_len); 2207 hs->result.fe_start, hs->result.fe_len);
1965 seq_printf(seq, "%-5u %-8u %-23s free\n", 2208 seq_printf(seq, "%-5u %-8u %-23s free\n",
1966 hs->pid, hs->ino, buf2); 2209 hs->pid, hs->ino, buf2);
@@ -2073,7 +2316,7 @@ static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2073 return NULL; 2316 return NULL;
2074 2317
2075 group = *pos + 1; 2318 group = *pos + 1;
2076 return (void *) group; 2319 return (void *) ((unsigned long) group);
2077} 2320}
2078 2321
2079static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 2322static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
@@ -2086,13 +2329,13 @@ static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2086 if (*pos < 0 || *pos >= sbi->s_groups_count) 2329 if (*pos < 0 || *pos >= sbi->s_groups_count)
2087 return NULL; 2330 return NULL;
2088 group = *pos + 1; 2331 group = *pos + 1;
2089 return (void *) group;; 2332 return (void *) ((unsigned long) group);
2090} 2333}
2091 2334
2092static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 2335static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2093{ 2336{
2094 struct super_block *sb = seq->private; 2337 struct super_block *sb = seq->private;
2095 long group = (long) v; 2338 ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2096 int i; 2339 int i;
2097 int err; 2340 int err;
2098 struct ext4_buddy e4b; 2341 struct ext4_buddy e4b;
@@ -2114,7 +2357,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2114 sizeof(struct ext4_group_info); 2357 sizeof(struct ext4_group_info);
2115 err = ext4_mb_load_buddy(sb, group, &e4b); 2358 err = ext4_mb_load_buddy(sb, group, &e4b);
2116 if (err) { 2359 if (err) {
2117 seq_printf(seq, "#%-5lu: I/O error\n", group); 2360 seq_printf(seq, "#%-5u: I/O error\n", group);
2118 return 0; 2361 return 0;
2119 } 2362 }
2120 ext4_lock_group(sb, group); 2363 ext4_lock_group(sb, group);
@@ -2122,7 +2365,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2122 ext4_unlock_group(sb, group); 2365 ext4_unlock_group(sb, group);
2123 ext4_mb_release_desc(&e4b); 2366 ext4_mb_release_desc(&e4b);
2124 2367
2125 seq_printf(seq, "#%-5lu: %-5u %-5u %-5u [", group, sg.info.bb_free, 2368 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2126 sg.info.bb_fragments, sg.info.bb_first_free); 2369 sg.info.bb_fragments, sg.info.bb_first_free);
2127 for (i = 0; i <= 13; i++) 2370 for (i = 0; i <= 13; i++)
2128 seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ? 2371 seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
@@ -2296,10 +2539,11 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2296 ext4_free_blocks_after_init(sb, group, desc); 2539 ext4_free_blocks_after_init(sb, group, desc);
2297 } else { 2540 } else {
2298 meta_group_info[i]->bb_free = 2541 meta_group_info[i]->bb_free =
2299 le16_to_cpu(desc->bg_free_blocks_count); 2542 ext4_free_blks_count(sb, desc);
2300 } 2543 }
2301 2544
2302 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 2545 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2546 init_rwsem(&meta_group_info[i]->alloc_sem);
2303 meta_group_info[i]->bb_free_root.rb_node = NULL;; 2547 meta_group_info[i]->bb_free_root.rb_node = NULL;;
2304 2548
2305#ifdef DOUBLE_CHECK 2549#ifdef DOUBLE_CHECK
@@ -2327,54 +2571,6 @@ exit_meta_group_info:
2327} /* ext4_mb_add_groupinfo */ 2571} /* ext4_mb_add_groupinfo */
2328 2572
2329/* 2573/*
2330 * Add a group to the existing groups.
2331 * This function is used for online resize
2332 */
2333int ext4_mb_add_more_groupinfo(struct super_block *sb, ext4_group_t group,
2334 struct ext4_group_desc *desc)
2335{
2336 struct ext4_sb_info *sbi = EXT4_SB(sb);
2337 struct inode *inode = sbi->s_buddy_cache;
2338 int blocks_per_page;
2339 int block;
2340 int pnum;
2341 struct page *page;
2342 int err;
2343
2344 /* Add group based on group descriptor*/
2345 err = ext4_mb_add_groupinfo(sb, group, desc);
2346 if (err)
2347 return err;
2348
2349 /*
2350 * Cache pages containing dynamic mb_alloc datas (buddy and bitmap
2351 * datas) are set not up to date so that they will be re-initilaized
2352 * during the next call to ext4_mb_load_buddy
2353 */
2354
2355 /* Set buddy page as not up to date */
2356 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
2357 block = group * 2;
2358 pnum = block / blocks_per_page;
2359 page = find_get_page(inode->i_mapping, pnum);
2360 if (page != NULL) {
2361 ClearPageUptodate(page);
2362 page_cache_release(page);
2363 }
2364
2365 /* Set bitmap page as not up to date */
2366 block++;
2367 pnum = block / blocks_per_page;
2368 page = find_get_page(inode->i_mapping, pnum);
2369 if (page != NULL) {
2370 ClearPageUptodate(page);
2371 page_cache_release(page);
2372 }
2373
2374 return 0;
2375}
2376
2377/*
2378 * Update an existing group. 2574 * Update an existing group.
2379 * This function is used for online resize 2575 * This function is used for online resize
2380 */ 2576 */
@@ -2457,7 +2653,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
2457 desc = ext4_get_group_desc(sb, i, NULL); 2653 desc = ext4_get_group_desc(sb, i, NULL);
2458 if (desc == NULL) { 2654 if (desc == NULL) {
2459 printk(KERN_ERR 2655 printk(KERN_ERR
2460 "EXT4-fs: can't read descriptor %lu\n", i); 2656 "EXT4-fs: can't read descriptor %u\n", i);
2461 goto err_freebuddy; 2657 goto err_freebuddy;
2462 } 2658 }
2463 if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 2659 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
@@ -2493,6 +2689,8 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
2493 if (sbi->s_mb_offsets == NULL) { 2689 if (sbi->s_mb_offsets == NULL) {
2494 return -ENOMEM; 2690 return -ENOMEM;
2495 } 2691 }
2692
2693 i = (sb->s_blocksize_bits + 2) * sizeof(unsigned int);
2496 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 2694 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2497 if (sbi->s_mb_maxs == NULL) { 2695 if (sbi->s_mb_maxs == NULL) {
2498 kfree(sbi->s_mb_maxs); 2696 kfree(sbi->s_mb_maxs);
@@ -2551,7 +2749,8 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
2551 ext4_mb_init_per_dev_proc(sb); 2749 ext4_mb_init_per_dev_proc(sb);
2552 ext4_mb_history_init(sb); 2750 ext4_mb_history_init(sb);
2553 2751
2554 sbi->s_journal->j_commit_callback = release_blocks_on_commit; 2752 if (sbi->s_journal)
2753 sbi->s_journal->j_commit_callback = release_blocks_on_commit;
2555 2754
2556 printk(KERN_INFO "EXT4-fs: mballoc enabled\n"); 2755 printk(KERN_INFO "EXT4-fs: mballoc enabled\n");
2557 return 0; 2756 return 0;
@@ -2652,7 +2851,7 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
2652 list_for_each_safe(l, ltmp, &txn->t_private_list) { 2851 list_for_each_safe(l, ltmp, &txn->t_private_list) {
2653 entry = list_entry(l, struct ext4_free_data, list); 2852 entry = list_entry(l, struct ext4_free_data, list);
2654 2853
2655 mb_debug("gonna free %u blocks in group %lu (0x%p):", 2854 mb_debug("gonna free %u blocks in group %u (0x%p):",
2656 entry->count, entry->group, entry); 2855 entry->count, entry->group, entry);
2657 2856
2658 err = ext4_mb_load_buddy(sb, entry->group, &e4b); 2857 err = ext4_mb_load_buddy(sb, entry->group, &e4b);
@@ -2679,8 +2878,9 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
2679 discard_block = (ext4_fsblk_t) entry->group * EXT4_BLOCKS_PER_GROUP(sb) 2878 discard_block = (ext4_fsblk_t) entry->group * EXT4_BLOCKS_PER_GROUP(sb)
2680 + entry->start_blk 2879 + entry->start_blk
2681 + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 2880 + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
2682 trace_mark(ext4_discard_blocks, "dev %s blk %llu count %u", sb->s_id, 2881 trace_mark(ext4_discard_blocks, "dev %s blk %llu count %u",
2683 (unsigned long long) discard_block, entry->count); 2882 sb->s_id, (unsigned long long) discard_block,
2883 entry->count);
2684 sb_issue_discard(sb, discard_block, entry->count); 2884 sb_issue_discard(sb, discard_block, entry->count);
2685 2885
2686 kmem_cache_free(ext4_free_ext_cachep, entry); 2886 kmem_cache_free(ext4_free_ext_cachep, entry);
@@ -2791,7 +2991,7 @@ void exit_ext4_mballoc(void)
2791 */ 2991 */
2792static noinline_for_stack int 2992static noinline_for_stack int
2793ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, 2993ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2794 handle_t *handle, unsigned long reserv_blks) 2994 handle_t *handle, unsigned int reserv_blks)
2795{ 2995{
2796 struct buffer_head *bitmap_bh = NULL; 2996 struct buffer_head *bitmap_bh = NULL;
2797 struct ext4_super_block *es; 2997 struct ext4_super_block *es;
@@ -2824,7 +3024,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2824 if (!gdp) 3024 if (!gdp)
2825 goto out_err; 3025 goto out_err;
2826 3026
2827 ext4_debug("using block group %lu(%d)\n", ac->ac_b_ex.fe_group, 3027 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
2828 gdp->bg_free_blocks_count); 3028 gdp->bg_free_blocks_count);
2829 3029
2830 err = ext4_journal_get_write_access(handle, gdp_bh); 3030 err = ext4_journal_get_write_access(handle, gdp_bh);
@@ -2843,8 +3043,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2843 in_range(block + len - 1, ext4_inode_table(sb, gdp), 3043 in_range(block + len - 1, ext4_inode_table(sb, gdp),
2844 EXT4_SB(sb)->s_itb_per_group)) { 3044 EXT4_SB(sb)->s_itb_per_group)) {
2845 ext4_error(sb, __func__, 3045 ext4_error(sb, __func__,
2846 "Allocating block in system zone - block = %llu", 3046 "Allocating block %llu in system zone of %d group\n",
2847 block); 3047 block, ac->ac_b_ex.fe_group);
2848 /* File system mounted not to panic on error 3048 /* File system mounted not to panic on error
2849 * Fix the bitmap and repeat the block allocation 3049 * Fix the bitmap and repeat the block allocation
2850 * We leak some of the blocks here. 3050 * We leak some of the blocks here.
@@ -2852,7 +3052,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2852 mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), 3052 mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group),
2853 bitmap_bh->b_data, ac->ac_b_ex.fe_start, 3053 bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2854 ac->ac_b_ex.fe_len); 3054 ac->ac_b_ex.fe_len);
2855 err = ext4_journal_dirty_metadata(handle, bitmap_bh); 3055 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2856 if (!err) 3056 if (!err)
2857 err = -EAGAIN; 3057 err = -EAGAIN;
2858 goto out_err; 3058 goto out_err;
@@ -2866,18 +3066,17 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2866 } 3066 }
2867 } 3067 }
2868#endif 3068#endif
2869 mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), bitmap_bh->b_data,
2870 ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
2871
2872 spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group)); 3069 spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
3070 mb_set_bits(NULL, bitmap_bh->b_data,
3071 ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
2873 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 3072 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2874 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 3073 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
2875 gdp->bg_free_blocks_count = 3074 ext4_free_blks_set(sb, gdp,
2876 cpu_to_le16(ext4_free_blocks_after_init(sb, 3075 ext4_free_blocks_after_init(sb,
2877 ac->ac_b_ex.fe_group, 3076 ac->ac_b_ex.fe_group, gdp));
2878 gdp));
2879 } 3077 }
2880 le16_add_cpu(&gdp->bg_free_blocks_count, -ac->ac_b_ex.fe_len); 3078 len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len;
3079 ext4_free_blks_set(sb, gdp, len);
2881 gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp); 3080 gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
2882 spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group)); 3081 spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
2883 percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len); 3082 percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
@@ -2899,10 +3098,10 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2899 spin_unlock(sb_bgl_lock(sbi, flex_group)); 3098 spin_unlock(sb_bgl_lock(sbi, flex_group));
2900 } 3099 }
2901 3100
2902 err = ext4_journal_dirty_metadata(handle, bitmap_bh); 3101 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2903 if (err) 3102 if (err)
2904 goto out_err; 3103 goto out_err;
2905 err = ext4_journal_dirty_metadata(handle, gdp_bh); 3104 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
2906 3105
2907out_err: 3106out_err:
2908 sb->s_dirt = 1; 3107 sb->s_dirt = 1;
@@ -3031,7 +3230,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
3031 /* check we don't cross already preallocated blocks */ 3230 /* check we don't cross already preallocated blocks */
3032 rcu_read_lock(); 3231 rcu_read_lock();
3033 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3232 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3034 unsigned long pa_end; 3233 ext4_lblk_t pa_end;
3035 3234
3036 if (pa->pa_deleted) 3235 if (pa->pa_deleted)
3037 continue; 3236 continue;
@@ -3075,7 +3274,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
3075 /* XXX: extra loop to check we really don't overlap preallocations */ 3274 /* XXX: extra loop to check we really don't overlap preallocations */
3076 rcu_read_lock(); 3275 rcu_read_lock();
3077 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3276 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3078 unsigned long pa_end; 3277 ext4_lblk_t pa_end;
3079 spin_lock(&pa->pa_lock); 3278 spin_lock(&pa->pa_lock);
3080 if (pa->pa_deleted == 0) { 3279 if (pa->pa_deleted == 0) {
3081 pa_end = pa->pa_lstart + pa->pa_len; 3280 pa_end = pa->pa_lstart + pa->pa_len;
@@ -3307,6 +3506,32 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3307} 3506}
3308 3507
3309/* 3508/*
3509 * the function goes through all block freed in the group
3510 * but not yet committed and marks them used in in-core bitmap.
3511 * buddy must be generated from this bitmap
3512 * Need to be called with ext4 group lock (ext4_lock_group)
3513 */
3514static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3515 ext4_group_t group)
3516{
3517 struct rb_node *n;
3518 struct ext4_group_info *grp;
3519 struct ext4_free_data *entry;
3520
3521 grp = ext4_get_group_info(sb, group);
3522 n = rb_first(&(grp->bb_free_root));
3523
3524 while (n) {
3525 entry = rb_entry(n, struct ext4_free_data, node);
3526 mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
3527 bitmap, entry->start_blk,
3528 entry->count);
3529 n = rb_next(n);
3530 }
3531 return;
3532}
3533
3534/*
3310 * the function goes through all preallocation in this group and marks them 3535 * the function goes through all preallocation in this group and marks them
3311 * used in in-core bitmap. buddy must be generated from this bitmap 3536 * used in in-core bitmap. buddy must be generated from this bitmap
3312 * Need to be called with ext4 group lock (ext4_lock_group) 3537 * Need to be called with ext4 group lock (ext4_lock_group)
@@ -3346,7 +3571,7 @@ static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3346 preallocated += len; 3571 preallocated += len;
3347 count++; 3572 count++;
3348 } 3573 }
3349 mb_debug("prellocated %u for group %lu\n", preallocated, group); 3574 mb_debug("prellocated %u for group %u\n", preallocated, group);
3350} 3575}
3351 3576
3352static void ext4_mb_pa_callback(struct rcu_head *head) 3577static void ext4_mb_pa_callback(struct rcu_head *head)
@@ -3363,7 +3588,7 @@ static void ext4_mb_pa_callback(struct rcu_head *head)
3363static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 3588static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3364 struct super_block *sb, struct ext4_prealloc_space *pa) 3589 struct super_block *sb, struct ext4_prealloc_space *pa)
3365{ 3590{
3366 unsigned long grp; 3591 ext4_group_t grp;
3367 3592
3368 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) 3593 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3369 return; 3594 return;
@@ -3473,6 +3698,10 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
3473 3698
3474 mb_debug("new inode pa %p: %llu/%u for %u\n", pa, 3699 mb_debug("new inode pa %p: %llu/%u for %u\n", pa,
3475 pa->pa_pstart, pa->pa_len, pa->pa_lstart); 3700 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3701 trace_mark(ext4_mb_new_inode_pa,
3702 "dev %s ino %lu pstart %llu len %u lstart %u",
3703 sb->s_id, ac->ac_inode->i_ino,
3704 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3476 3705
3477 ext4_mb_use_inode_pa(ac, pa); 3706 ext4_mb_use_inode_pa(ac, pa);
3478 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 3707 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
@@ -3530,7 +3759,9 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
3530 pa->pa_linear = 1; 3759 pa->pa_linear = 1;
3531 3760
3532 mb_debug("new group pa %p: %llu/%u for %u\n", pa, 3761 mb_debug("new group pa %p: %llu/%u for %u\n", pa,
3533 pa->pa_pstart, pa->pa_len, pa->pa_lstart); 3762 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3763 trace_mark(ext4_mb_new_group_pa, "dev %s pstart %llu len %u lstart %u",
3764 sb->s_id, pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3534 3765
3535 ext4_mb_use_group_pa(ac, pa); 3766 ext4_mb_use_group_pa(ac, pa);
3536 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 3767 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
@@ -3579,16 +3810,18 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3579{ 3810{
3580 struct super_block *sb = e4b->bd_sb; 3811 struct super_block *sb = e4b->bd_sb;
3581 struct ext4_sb_info *sbi = EXT4_SB(sb); 3812 struct ext4_sb_info *sbi = EXT4_SB(sb);
3582 unsigned long end; 3813 unsigned int end;
3583 unsigned long next; 3814 unsigned int next;
3584 ext4_group_t group; 3815 ext4_group_t group;
3585 ext4_grpblk_t bit; 3816 ext4_grpblk_t bit;
3817 unsigned long long grp_blk_start;
3586 sector_t start; 3818 sector_t start;
3587 int err = 0; 3819 int err = 0;
3588 int free = 0; 3820 int free = 0;
3589 3821
3590 BUG_ON(pa->pa_deleted == 0); 3822 BUG_ON(pa->pa_deleted == 0);
3591 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 3823 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3824 grp_blk_start = pa->pa_pstart - bit;
3592 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 3825 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3593 end = bit + pa->pa_len; 3826 end = bit + pa->pa_len;
3594 3827
@@ -3618,6 +3851,10 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3618 ext4_mb_store_history(ac); 3851 ext4_mb_store_history(ac);
3619 } 3852 }
3620 3853
3854 trace_mark(ext4_mb_release_inode_pa,
3855 "dev %s ino %lu block %llu count %u",
3856 sb->s_id, pa->pa_inode->i_ino, grp_blk_start + bit,
3857 next - bit);
3621 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 3858 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3622 bit = next + 1; 3859 bit = next + 1;
3623 } 3860 }
@@ -3626,8 +3863,9 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3626 pa, (unsigned long) pa->pa_lstart, 3863 pa, (unsigned long) pa->pa_lstart,
3627 (unsigned long) pa->pa_pstart, 3864 (unsigned long) pa->pa_pstart,
3628 (unsigned long) pa->pa_len); 3865 (unsigned long) pa->pa_len);
3629 ext4_error(sb, __func__, "free %u, pa_free %u\n", 3866 ext4_grp_locked_error(sb, group,
3630 free, pa->pa_free); 3867 __func__, "free %u, pa_free %u",
3868 free, pa->pa_free);
3631 /* 3869 /*
3632 * pa is already deleted so we use the value obtained 3870 * pa is already deleted so we use the value obtained
3633 * from the bitmap and continue. 3871 * from the bitmap and continue.
@@ -3650,6 +3888,8 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3650 if (ac) 3888 if (ac)
3651 ac->ac_op = EXT4_MB_HISTORY_DISCARD; 3889 ac->ac_op = EXT4_MB_HISTORY_DISCARD;
3652 3890
3891 trace_mark(ext4_mb_release_group_pa, "dev %s pstart %llu len %d",
3892 sb->s_id, pa->pa_pstart, pa->pa_len);
3653 BUG_ON(pa->pa_deleted == 0); 3893 BUG_ON(pa->pa_deleted == 0);
3654 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 3894 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3655 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 3895 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
@@ -3692,7 +3932,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
3692 int busy = 0; 3932 int busy = 0;
3693 int free = 0; 3933 int free = 0;
3694 3934
3695 mb_debug("discard preallocation for group %lu\n", group); 3935 mb_debug("discard preallocation for group %u\n", group);
3696 3936
3697 if (list_empty(&grp->bb_prealloc_list)) 3937 if (list_empty(&grp->bb_prealloc_list))
3698 return 0; 3938 return 0;
@@ -3700,14 +3940,14 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
3700 bitmap_bh = ext4_read_block_bitmap(sb, group); 3940 bitmap_bh = ext4_read_block_bitmap(sb, group);
3701 if (bitmap_bh == NULL) { 3941 if (bitmap_bh == NULL) {
3702 ext4_error(sb, __func__, "Error in reading block " 3942 ext4_error(sb, __func__, "Error in reading block "
3703 "bitmap for %lu\n", group); 3943 "bitmap for %u", group);
3704 return 0; 3944 return 0;
3705 } 3945 }
3706 3946
3707 err = ext4_mb_load_buddy(sb, group, &e4b); 3947 err = ext4_mb_load_buddy(sb, group, &e4b);
3708 if (err) { 3948 if (err) {
3709 ext4_error(sb, __func__, "Error in loading buddy " 3949 ext4_error(sb, __func__, "Error in loading buddy "
3710 "information for %lu\n", group); 3950 "information for %u", group);
3711 put_bh(bitmap_bh); 3951 put_bh(bitmap_bh);
3712 return 0; 3952 return 0;
3713 } 3953 }
@@ -3815,6 +4055,8 @@ void ext4_discard_preallocations(struct inode *inode)
3815 } 4055 }
3816 4056
3817 mb_debug("discard preallocation for inode %lu\n", inode->i_ino); 4057 mb_debug("discard preallocation for inode %lu\n", inode->i_ino);
4058 trace_mark(ext4_discard_preallocations, "dev %s ino %lu", sb->s_id,
4059 inode->i_ino);
3818 4060
3819 INIT_LIST_HEAD(&list); 4061 INIT_LIST_HEAD(&list);
3820 4062
@@ -3874,14 +4116,14 @@ repeat:
3874 err = ext4_mb_load_buddy(sb, group, &e4b); 4116 err = ext4_mb_load_buddy(sb, group, &e4b);
3875 if (err) { 4117 if (err) {
3876 ext4_error(sb, __func__, "Error in loading buddy " 4118 ext4_error(sb, __func__, "Error in loading buddy "
3877 "information for %lu\n", group); 4119 "information for %u", group);
3878 continue; 4120 continue;
3879 } 4121 }
3880 4122
3881 bitmap_bh = ext4_read_block_bitmap(sb, group); 4123 bitmap_bh = ext4_read_block_bitmap(sb, group);
3882 if (bitmap_bh == NULL) { 4124 if (bitmap_bh == NULL) {
3883 ext4_error(sb, __func__, "Error in reading block " 4125 ext4_error(sb, __func__, "Error in reading block "
3884 "bitmap for %lu\n", group); 4126 "bitmap for %u", group);
3885 ext4_mb_release_desc(&e4b); 4127 ext4_mb_release_desc(&e4b);
3886 continue; 4128 continue;
3887 } 4129 }
@@ -4024,8 +4266,8 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4024 struct ext4_sb_info *sbi = EXT4_SB(sb); 4266 struct ext4_sb_info *sbi = EXT4_SB(sb);
4025 struct ext4_super_block *es = sbi->s_es; 4267 struct ext4_super_block *es = sbi->s_es;
4026 ext4_group_t group; 4268 ext4_group_t group;
4027 unsigned long len; 4269 unsigned int len;
4028 unsigned long goal; 4270 ext4_fsblk_t goal;
4029 ext4_grpblk_t block; 4271 ext4_grpblk_t block;
4030 4272
4031 /* we can't allocate > group size */ 4273 /* we can't allocate > group size */
@@ -4068,6 +4310,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4068 ac->ac_pa = NULL; 4310 ac->ac_pa = NULL;
4069 ac->ac_bitmap_page = NULL; 4311 ac->ac_bitmap_page = NULL;
4070 ac->ac_buddy_page = NULL; 4312 ac->ac_buddy_page = NULL;
4313 ac->alloc_semp = NULL;
4071 ac->ac_lg = NULL; 4314 ac->ac_lg = NULL;
4072 4315
4073 /* we have to define context: we'll we work with a file or 4316 /* we have to define context: we'll we work with a file or
@@ -4146,7 +4389,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
4146 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL); 4389 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4147 if (ext4_mb_load_buddy(sb, group, &e4b)) { 4390 if (ext4_mb_load_buddy(sb, group, &e4b)) {
4148 ext4_error(sb, __func__, "Error in loading buddy " 4391 ext4_error(sb, __func__, "Error in loading buddy "
4149 "information for %lu\n", group); 4392 "information for %u", group);
4150 continue; 4393 continue;
4151 } 4394 }
4152 ext4_lock_group(sb, group); 4395 ext4_lock_group(sb, group);
@@ -4248,6 +4491,8 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4248 } 4491 }
4249 ext4_mb_put_pa(ac, ac->ac_sb, pa); 4492 ext4_mb_put_pa(ac, ac->ac_sb, pa);
4250 } 4493 }
4494 if (ac->alloc_semp)
4495 up_read(ac->alloc_semp);
4251 if (ac->ac_bitmap_page) 4496 if (ac->ac_bitmap_page)
4252 page_cache_release(ac->ac_bitmap_page); 4497 page_cache_release(ac->ac_bitmap_page);
4253 if (ac->ac_buddy_page) 4498 if (ac->ac_buddy_page)
@@ -4264,6 +4509,8 @@ static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4264 int ret; 4509 int ret;
4265 int freed = 0; 4510 int freed = 0;
4266 4511
4512 trace_mark(ext4_mb_discard_preallocations, "dev %s needed %d",
4513 sb->s_id, needed);
4267 for (i = 0; i < EXT4_SB(sb)->s_groups_count && needed > 0; i++) { 4514 for (i = 0; i < EXT4_SB(sb)->s_groups_count && needed > 0; i++) {
4268 ret = ext4_mb_discard_group_preallocations(sb, i, needed); 4515 ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4269 freed += ret; 4516 freed += ret;
@@ -4286,12 +4533,24 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4286 struct ext4_sb_info *sbi; 4533 struct ext4_sb_info *sbi;
4287 struct super_block *sb; 4534 struct super_block *sb;
4288 ext4_fsblk_t block = 0; 4535 ext4_fsblk_t block = 0;
4289 unsigned long inquota; 4536 unsigned int inquota;
4290 unsigned long reserv_blks = 0; 4537 unsigned int reserv_blks = 0;
4291 4538
4292 sb = ar->inode->i_sb; 4539 sb = ar->inode->i_sb;
4293 sbi = EXT4_SB(sb); 4540 sbi = EXT4_SB(sb);
4294 4541
4542 trace_mark(ext4_request_blocks, "dev %s flags %u len %u ino %lu "
4543 "lblk %llu goal %llu lleft %llu lright %llu "
4544 "pleft %llu pright %llu ",
4545 sb->s_id, ar->flags, ar->len,
4546 ar->inode ? ar->inode->i_ino : 0,
4547 (unsigned long long) ar->logical,
4548 (unsigned long long) ar->goal,
4549 (unsigned long long) ar->lleft,
4550 (unsigned long long) ar->lright,
4551 (unsigned long long) ar->pleft,
4552 (unsigned long long) ar->pright);
4553
4295 if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) { 4554 if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) {
4296 /* 4555 /*
4297 * With delalloc we already reserved the blocks 4556 * With delalloc we already reserved the blocks
@@ -4313,7 +4572,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4313 } 4572 }
4314 if (ar->len == 0) { 4573 if (ar->len == 0) {
4315 *errp = -EDQUOT; 4574 *errp = -EDQUOT;
4316 return 0; 4575 goto out3;
4317 } 4576 }
4318 inquota = ar->len; 4577 inquota = ar->len;
4319 4578
@@ -4348,10 +4607,14 @@ repeat:
4348 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 4607 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4349 ext4_mb_new_preallocation(ac); 4608 ext4_mb_new_preallocation(ac);
4350 } 4609 }
4351
4352 if (likely(ac->ac_status == AC_STATUS_FOUND)) { 4610 if (likely(ac->ac_status == AC_STATUS_FOUND)) {
4353 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_blks); 4611 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_blks);
4354 if (*errp == -EAGAIN) { 4612 if (*errp == -EAGAIN) {
4613 /*
4614 * drop the reference that we took
4615 * in ext4_mb_use_best_found
4616 */
4617 ext4_mb_release_context(ac);
4355 ac->ac_b_ex.fe_group = 0; 4618 ac->ac_b_ex.fe_group = 0;
4356 ac->ac_b_ex.fe_start = 0; 4619 ac->ac_b_ex.fe_start = 0;
4357 ac->ac_b_ex.fe_len = 0; 4620 ac->ac_b_ex.fe_len = 0;
@@ -4382,6 +4645,26 @@ out2:
4382out1: 4645out1:
4383 if (ar->len < inquota) 4646 if (ar->len < inquota)
4384 DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len); 4647 DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len);
4648out3:
4649 if (!ar->len) {
4650 if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
4651 /* release all the reserved blocks if non delalloc */
4652 percpu_counter_sub(&sbi->s_dirtyblocks_counter,
4653 reserv_blks);
4654 }
4655
4656 trace_mark(ext4_allocate_blocks,
4657 "dev %s block %llu flags %u len %u ino %lu "
4658 "logical %llu goal %llu lleft %llu lright %llu "
4659 "pleft %llu pright %llu ",
4660 sb->s_id, (unsigned long long) block,
4661 ar->flags, ar->len, ar->inode ? ar->inode->i_ino : 0,
4662 (unsigned long long) ar->logical,
4663 (unsigned long long) ar->goal,
4664 (unsigned long long) ar->lleft,
4665 (unsigned long long) ar->lright,
4666 (unsigned long long) ar->pleft,
4667 (unsigned long long) ar->pright);
4385 4668
4386 return block; 4669 return block;
4387} 4670}
@@ -4403,27 +4686,23 @@ static int can_merge(struct ext4_free_data *entry1,
4403 4686
4404static noinline_for_stack int 4687static noinline_for_stack int
4405ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 4688ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4406 ext4_group_t group, ext4_grpblk_t block, int count) 4689 struct ext4_free_data *new_entry)
4407{ 4690{
4691 ext4_grpblk_t block;
4692 struct ext4_free_data *entry;
4408 struct ext4_group_info *db = e4b->bd_info; 4693 struct ext4_group_info *db = e4b->bd_info;
4409 struct super_block *sb = e4b->bd_sb; 4694 struct super_block *sb = e4b->bd_sb;
4410 struct ext4_sb_info *sbi = EXT4_SB(sb); 4695 struct ext4_sb_info *sbi = EXT4_SB(sb);
4411 struct ext4_free_data *entry, *new_entry;
4412 struct rb_node **n = &db->bb_free_root.rb_node, *node; 4696 struct rb_node **n = &db->bb_free_root.rb_node, *node;
4413 struct rb_node *parent = NULL, *new_node; 4697 struct rb_node *parent = NULL, *new_node;
4414 4698
4415 4699 BUG_ON(!ext4_handle_valid(handle));
4416 BUG_ON(e4b->bd_bitmap_page == NULL); 4700 BUG_ON(e4b->bd_bitmap_page == NULL);
4417 BUG_ON(e4b->bd_buddy_page == NULL); 4701 BUG_ON(e4b->bd_buddy_page == NULL);
4418 4702
4419 new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
4420 new_entry->start_blk = block;
4421 new_entry->group = group;
4422 new_entry->count = count;
4423 new_entry->t_tid = handle->h_transaction->t_tid;
4424 new_node = &new_entry->node; 4703 new_node = &new_entry->node;
4704 block = new_entry->start_blk;
4425 4705
4426 ext4_lock_group(sb, group);
4427 if (!*n) { 4706 if (!*n) {
4428 /* first free block exent. We need to 4707 /* first free block exent. We need to
4429 protect buddy cache from being freed, 4708 protect buddy cache from being freed,
@@ -4441,10 +4720,9 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4441 else if (block >= (entry->start_blk + entry->count)) 4720 else if (block >= (entry->start_blk + entry->count))
4442 n = &(*n)->rb_right; 4721 n = &(*n)->rb_right;
4443 else { 4722 else {
4444 ext4_unlock_group(sb, group); 4723 ext4_grp_locked_error(sb, e4b->bd_group, __func__,
4445 ext4_error(sb, __func__, 4724 "Double free of blocks %d (%d %d)",
4446 "Double free of blocks %d (%d %d)\n", 4725 block, entry->start_blk, entry->count);
4447 block, entry->start_blk, entry->count);
4448 return 0; 4726 return 0;
4449 } 4727 }
4450 } 4728 }
@@ -4483,7 +4761,6 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4483 spin_lock(&sbi->s_md_lock); 4761 spin_lock(&sbi->s_md_lock);
4484 list_add(&new_entry->list, &handle->h_transaction->t_private_list); 4762 list_add(&new_entry->list, &handle->h_transaction->t_private_list);
4485 spin_unlock(&sbi->s_md_lock); 4763 spin_unlock(&sbi->s_md_lock);
4486 ext4_unlock_group(sb, group);
4487 return 0; 4764 return 0;
4488} 4765}
4489 4766
@@ -4499,7 +4776,7 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
4499 struct ext4_allocation_context *ac = NULL; 4776 struct ext4_allocation_context *ac = NULL;
4500 struct ext4_group_desc *gdp; 4777 struct ext4_group_desc *gdp;
4501 struct ext4_super_block *es; 4778 struct ext4_super_block *es;
4502 unsigned long overflow; 4779 unsigned int overflow;
4503 ext4_grpblk_t bit; 4780 ext4_grpblk_t bit;
4504 struct buffer_head *gd_bh; 4781 struct buffer_head *gd_bh;
4505 ext4_group_t block_group; 4782 ext4_group_t block_group;
@@ -4522,6 +4799,10 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
4522 } 4799 }
4523 4800
4524 ext4_debug("freeing block %lu\n", block); 4801 ext4_debug("freeing block %lu\n", block);
4802 trace_mark(ext4_free_blocks,
4803 "dev %s block %llu count %lu metadata %d ino %lu",
4804 sb->s_id, (unsigned long long) block, count, metadata,
4805 inode ? inode->i_ino : 0);
4525 4806
4526 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); 4807 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4527 if (ac) { 4808 if (ac) {
@@ -4581,11 +4862,6 @@ do_more:
4581 err = ext4_journal_get_write_access(handle, gd_bh); 4862 err = ext4_journal_get_write_access(handle, gd_bh);
4582 if (err) 4863 if (err)
4583 goto error_return; 4864 goto error_return;
4584
4585 err = ext4_mb_load_buddy(sb, block_group, &e4b);
4586 if (err)
4587 goto error_return;
4588
4589#ifdef AGGRESSIVE_CHECK 4865#ifdef AGGRESSIVE_CHECK
4590 { 4866 {
4591 int i; 4867 int i;
@@ -4593,13 +4869,6 @@ do_more:
4593 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); 4869 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4594 } 4870 }
4595#endif 4871#endif
4596 mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
4597 bit, count);
4598
4599 /* We dirtied the bitmap block */
4600 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4601 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
4602
4603 if (ac) { 4872 if (ac) {
4604 ac->ac_b_ex.fe_group = block_group; 4873 ac->ac_b_ex.fe_group = block_group;
4605 ac->ac_b_ex.fe_start = bit; 4874 ac->ac_b_ex.fe_start = bit;
@@ -4607,19 +4876,41 @@ do_more:
4607 ext4_mb_store_history(ac); 4876 ext4_mb_store_history(ac);
4608 } 4877 }
4609 4878
4610 if (metadata) { 4879 err = ext4_mb_load_buddy(sb, block_group, &e4b);
4611 /* blocks being freed are metadata. these blocks shouldn't 4880 if (err)
4612 * be used until this transaction is committed */ 4881 goto error_return;
4613 ext4_mb_free_metadata(handle, &e4b, block_group, bit, count); 4882 if (metadata && ext4_handle_valid(handle)) {
4883 struct ext4_free_data *new_entry;
4884 /*
4885 * blocks being freed are metadata. these blocks shouldn't
4886 * be used until this transaction is committed
4887 */
4888 new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
4889 new_entry->start_blk = bit;
4890 new_entry->group = block_group;
4891 new_entry->count = count;
4892 new_entry->t_tid = handle->h_transaction->t_tid;
4893 ext4_lock_group(sb, block_group);
4894 mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
4895 bit, count);
4896 ext4_mb_free_metadata(handle, &e4b, new_entry);
4897 ext4_unlock_group(sb, block_group);
4614 } else { 4898 } else {
4615 ext4_lock_group(sb, block_group); 4899 ext4_lock_group(sb, block_group);
4900 /* need to update group_info->bb_free and bitmap
4901 * with group lock held. generate_buddy look at
4902 * them with group lock_held
4903 */
4904 mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
4905 bit, count);
4616 mb_free_blocks(inode, &e4b, bit, count); 4906 mb_free_blocks(inode, &e4b, bit, count);
4617 ext4_mb_return_to_preallocation(inode, &e4b, block, count); 4907 ext4_mb_return_to_preallocation(inode, &e4b, block, count);
4618 ext4_unlock_group(sb, block_group); 4908 ext4_unlock_group(sb, block_group);
4619 } 4909 }
4620 4910
4621 spin_lock(sb_bgl_lock(sbi, block_group)); 4911 spin_lock(sb_bgl_lock(sbi, block_group));
4622 le16_add_cpu(&gdp->bg_free_blocks_count, count); 4912 ret = ext4_free_blks_count(sb, gdp) + count;
4913 ext4_free_blks_set(sb, gdp, ret);
4623 gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); 4914 gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
4624 spin_unlock(sb_bgl_lock(sbi, block_group)); 4915 spin_unlock(sb_bgl_lock(sbi, block_group));
4625 percpu_counter_add(&sbi->s_freeblocks_counter, count); 4916 percpu_counter_add(&sbi->s_freeblocks_counter, count);
@@ -4635,9 +4926,13 @@ do_more:
4635 4926
4636 *freed += count; 4927 *freed += count;
4637 4928
4929 /* We dirtied the bitmap block */
4930 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4931 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4932
4638 /* And the group descriptor block */ 4933 /* And the group descriptor block */
4639 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 4934 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4640 ret = ext4_journal_dirty_metadata(handle, gd_bh); 4935 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4641 if (!err) 4936 if (!err)
4642 err = ret; 4937 err = ret;
4643 4938
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index b5dff1fff1e5..10a2921baf14 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -20,6 +20,7 @@
20#include <linux/version.h> 20#include <linux/version.h>
21#include <linux/blkdev.h> 21#include <linux/blkdev.h>
22#include <linux/marker.h> 22#include <linux/marker.h>
23#include <linux/mutex.h>
23#include "ext4_jbd2.h" 24#include "ext4_jbd2.h"
24#include "ext4.h" 25#include "ext4.h"
25#include "group.h" 26#include "group.h"
@@ -98,9 +99,6 @@
98 */ 99 */
99#define MB_DEFAULT_GROUP_PREALLOC 512 100#define MB_DEFAULT_GROUP_PREALLOC 512
100 101
101static struct kmem_cache *ext4_pspace_cachep;
102static struct kmem_cache *ext4_ac_cachep;
103static struct kmem_cache *ext4_free_ext_cachep;
104 102
105struct ext4_free_data { 103struct ext4_free_data {
106 /* this links the free block information from group_info */ 104 /* this links the free block information from group_info */
@@ -120,26 +118,6 @@ struct ext4_free_data {
120 tid_t t_tid; 118 tid_t t_tid;
121}; 119};
122 120
123struct ext4_group_info {
124 unsigned long bb_state;
125 struct rb_root bb_free_root;
126 unsigned short bb_first_free;
127 unsigned short bb_free;
128 unsigned short bb_fragments;
129 struct list_head bb_prealloc_list;
130#ifdef DOUBLE_CHECK
131 void *bb_bitmap;
132#endif
133 unsigned short bb_counters[];
134};
135
136#define EXT4_GROUP_INFO_NEED_INIT_BIT 0
137#define EXT4_GROUP_INFO_LOCKED_BIT 1
138
139#define EXT4_MB_GRP_NEED_INIT(grp) \
140 (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
141
142
143struct ext4_prealloc_space { 121struct ext4_prealloc_space {
144 struct list_head pa_inode_list; 122 struct list_head pa_inode_list;
145 struct list_head pa_group_list; 123 struct list_head pa_group_list;
@@ -217,6 +195,11 @@ struct ext4_allocation_context {
217 __u8 ac_op; /* operation, for history only */ 195 __u8 ac_op; /* operation, for history only */
218 struct page *ac_bitmap_page; 196 struct page *ac_bitmap_page;
219 struct page *ac_buddy_page; 197 struct page *ac_buddy_page;
198 /*
199 * pointer to the held semaphore upon successful
200 * block allocation
201 */
202 struct rw_semaphore *alloc_semp;
220 struct ext4_prealloc_space *ac_pa; 203 struct ext4_prealloc_space *ac_pa;
221 struct ext4_locality_group *ac_lg; 204 struct ext4_locality_group *ac_lg;
222}; 205};
@@ -250,6 +233,7 @@ struct ext4_buddy {
250 struct super_block *bd_sb; 233 struct super_block *bd_sb;
251 __u16 bd_blkbits; 234 __u16 bd_blkbits;
252 ext4_group_t bd_group; 235 ext4_group_t bd_group;
236 struct rw_semaphore *alloc_semp;
253}; 237};
254#define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap) 238#define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap)
255#define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy) 239#define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy)
@@ -259,51 +243,12 @@ static inline void ext4_mb_store_history(struct ext4_allocation_context *ac)
259{ 243{
260 return; 244 return;
261} 245}
262#else
263static void ext4_mb_store_history(struct ext4_allocation_context *ac);
264#endif 246#endif
265 247
266#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) 248#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
267 249
268struct buffer_head *read_block_bitmap(struct super_block *, ext4_group_t); 250struct buffer_head *read_block_bitmap(struct super_block *, ext4_group_t);
269 251static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
270static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
271 ext4_group_t group);
272static void ext4_mb_return_to_preallocation(struct inode *inode,
273 struct ext4_buddy *e4b, sector_t block,
274 int count);
275static void ext4_mb_put_pa(struct ext4_allocation_context *,
276 struct super_block *, struct ext4_prealloc_space *pa);
277static int ext4_mb_init_per_dev_proc(struct super_block *sb);
278static int ext4_mb_destroy_per_dev_proc(struct super_block *sb);
279static void release_blocks_on_commit(journal_t *journal, transaction_t *txn);
280
281
282static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
283{
284 struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
285
286 bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
287}
288
289static inline void ext4_unlock_group(struct super_block *sb,
290 ext4_group_t group)
291{
292 struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
293
294 bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
295}
296
297static inline int ext4_is_group_locked(struct super_block *sb,
298 ext4_group_t group)
299{
300 struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
301
302 return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
303 &(grinfo->bb_state));
304}
305
306static ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
307 struct ext4_free_extent *fex) 252 struct ext4_free_extent *fex)
308{ 253{
309 ext4_fsblk_t block; 254 ext4_fsblk_t block;
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index f2a9cf498ecd..734abca25e35 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -59,7 +59,8 @@ static int finish_range(handle_t *handle, struct inode *inode,
59 /* 59 /*
60 * Make sure the credit we accumalated is not really high 60 * Make sure the credit we accumalated is not really high
61 */ 61 */
62 if (needed && handle->h_buffer_credits >= EXT4_RESERVE_TRANS_BLOCKS) { 62 if (needed && ext4_handle_has_enough_credits(handle,
63 EXT4_RESERVE_TRANS_BLOCKS)) {
63 retval = ext4_journal_restart(handle, needed); 64 retval = ext4_journal_restart(handle, needed);
64 if (retval) 65 if (retval)
65 goto err_out; 66 goto err_out;
@@ -229,7 +230,7 @@ static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode)
229{ 230{
230 int retval = 0, needed; 231 int retval = 0, needed;
231 232
232 if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS) 233 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
233 return 0; 234 return 0;
234 /* 235 /*
235 * We are freeing a blocks. During this we touch 236 * We are freeing a blocks. During this we touch
@@ -458,13 +459,13 @@ int ext4_ext_migrate(struct inode *inode)
458 struct list_blocks_struct lb; 459 struct list_blocks_struct lb;
459 unsigned long max_entries; 460 unsigned long max_entries;
460 461
461 if (!test_opt(inode->i_sb, EXTENTS)) 462 /*
462 /* 463 * If the filesystem does not support extents, or the inode
463 * if mounted with noextents we don't allow the migrate 464 * already is extent-based, error out.
464 */ 465 */
465 return -EINVAL; 466 if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
466 467 EXT4_FEATURE_INCOMPAT_EXTENTS) ||
467 if ((EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 468 (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
468 return -EINVAL; 469 return -EINVAL;
469 470
470 if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0) 471 if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 9fd2a5e1be4d..fec0b4c2f5f1 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -74,10 +74,6 @@ static struct buffer_head *ext4_append(handle_t *handle,
74#define assert(test) J_ASSERT(test) 74#define assert(test) J_ASSERT(test)
75#endif 75#endif
76 76
77#ifndef swap
78#define swap(x, y) do { typeof(x) z = x; x = y; y = z; } while (0)
79#endif
80
81#ifdef DX_DEBUG 77#ifdef DX_DEBUG
82#define dxtrace(command) command 78#define dxtrace(command) command
83#else 79#else
@@ -372,6 +368,8 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
372 goto fail; 368 goto fail;
373 } 369 }
374 hinfo->hash_version = root->info.hash_version; 370 hinfo->hash_version = root->info.hash_version;
371 if (hinfo->hash_version <= DX_HASH_TEA)
372 hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
375 hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed; 373 hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
376 if (d_name) 374 if (d_name)
377 ext4fs_dirhash(d_name->name, d_name->len, hinfo); 375 ext4fs_dirhash(d_name->name, d_name->len, hinfo);
@@ -641,6 +639,9 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
641 dir = dir_file->f_path.dentry->d_inode; 639 dir = dir_file->f_path.dentry->d_inode;
642 if (!(EXT4_I(dir)->i_flags & EXT4_INDEX_FL)) { 640 if (!(EXT4_I(dir)->i_flags & EXT4_INDEX_FL)) {
643 hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version; 641 hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
642 if (hinfo.hash_version <= DX_HASH_TEA)
643 hinfo.hash_version +=
644 EXT4_SB(dir->i_sb)->s_hash_unsigned;
644 hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; 645 hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
645 count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo, 646 count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo,
646 start_hash, start_minor_hash); 647 start_hash, start_minor_hash);
@@ -806,7 +807,7 @@ static inline int ext4_match (int len, const char * const name,
806static inline int search_dirblock(struct buffer_head *bh, 807static inline int search_dirblock(struct buffer_head *bh,
807 struct inode *dir, 808 struct inode *dir,
808 const struct qstr *d_name, 809 const struct qstr *d_name,
809 unsigned long offset, 810 unsigned int offset,
810 struct ext4_dir_entry_2 ** res_dir) 811 struct ext4_dir_entry_2 ** res_dir)
811{ 812{
812 struct ext4_dir_entry_2 * de; 813 struct ext4_dir_entry_2 * de;
@@ -1043,11 +1044,11 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
1043 bh = ext4_find_entry(dir, &dentry->d_name, &de); 1044 bh = ext4_find_entry(dir, &dentry->d_name, &de);
1044 inode = NULL; 1045 inode = NULL;
1045 if (bh) { 1046 if (bh) {
1046 unsigned long ino = le32_to_cpu(de->inode); 1047 __u32 ino = le32_to_cpu(de->inode);
1047 brelse(bh); 1048 brelse(bh);
1048 if (!ext4_valid_inum(dir->i_sb, ino)) { 1049 if (!ext4_valid_inum(dir->i_sb, ino)) {
1049 ext4_error(dir->i_sb, "ext4_lookup", 1050 ext4_error(dir->i_sb, "ext4_lookup",
1050 "bad inode number: %lu", ino); 1051 "bad inode number: %u", ino);
1051 return ERR_PTR(-EIO); 1052 return ERR_PTR(-EIO);
1052 } 1053 }
1053 inode = ext4_iget(dir->i_sb, ino); 1054 inode = ext4_iget(dir->i_sb, ino);
@@ -1060,7 +1061,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
1060 1061
1061struct dentry *ext4_get_parent(struct dentry *child) 1062struct dentry *ext4_get_parent(struct dentry *child)
1062{ 1063{
1063 unsigned long ino; 1064 __u32 ino;
1064 struct inode *inode; 1065 struct inode *inode;
1065 static const struct qstr dotdot = { 1066 static const struct qstr dotdot = {
1066 .name = "..", 1067 .name = "..",
@@ -1078,7 +1079,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
1078 1079
1079 if (!ext4_valid_inum(child->d_inode->i_sb, ino)) { 1080 if (!ext4_valid_inum(child->d_inode->i_sb, ino)) {
1080 ext4_error(child->d_inode->i_sb, "ext4_get_parent", 1081 ext4_error(child->d_inode->i_sb, "ext4_get_parent",
1081 "bad inode number: %lu", ino); 1082 "bad inode number: %u", ino);
1082 return ERR_PTR(-EIO); 1083 return ERR_PTR(-EIO);
1083 } 1084 }
1084 1085
@@ -1166,9 +1167,9 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1166 u32 hash2; 1167 u32 hash2;
1167 struct dx_map_entry *map; 1168 struct dx_map_entry *map;
1168 char *data1 = (*bh)->b_data, *data2; 1169 char *data1 = (*bh)->b_data, *data2;
1169 unsigned split, move, size, i; 1170 unsigned split, move, size;
1170 struct ext4_dir_entry_2 *de = NULL, *de2; 1171 struct ext4_dir_entry_2 *de = NULL, *de2;
1171 int err = 0; 1172 int err = 0, i;
1172 1173
1173 bh2 = ext4_append (handle, dir, &newblock, &err); 1174 bh2 = ext4_append (handle, dir, &newblock, &err);
1174 if (!(bh2)) { 1175 if (!(bh2)) {
@@ -1228,10 +1229,10 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1228 de = de2; 1229 de = de2;
1229 } 1230 }
1230 dx_insert_block(frame, hash2 + continued, newblock); 1231 dx_insert_block(frame, hash2 + continued, newblock);
1231 err = ext4_journal_dirty_metadata(handle, bh2); 1232 err = ext4_handle_dirty_metadata(handle, dir, bh2);
1232 if (err) 1233 if (err)
1233 goto journal_error; 1234 goto journal_error;
1234 err = ext4_journal_dirty_metadata(handle, frame->bh); 1235 err = ext4_handle_dirty_metadata(handle, dir, frame->bh);
1235 if (err) 1236 if (err)
1236 goto journal_error; 1237 goto journal_error;
1237 brelse(bh2); 1238 brelse(bh2);
@@ -1266,7 +1267,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
1266 struct inode *dir = dentry->d_parent->d_inode; 1267 struct inode *dir = dentry->d_parent->d_inode;
1267 const char *name = dentry->d_name.name; 1268 const char *name = dentry->d_name.name;
1268 int namelen = dentry->d_name.len; 1269 int namelen = dentry->d_name.len;
1269 unsigned long offset = 0; 1270 unsigned int offset = 0;
1270 unsigned short reclen; 1271 unsigned short reclen;
1271 int nlen, rlen, err; 1272 int nlen, rlen, err;
1272 char *top; 1273 char *top;
@@ -1335,8 +1336,8 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
1335 ext4_update_dx_flag(dir); 1336 ext4_update_dx_flag(dir);
1336 dir->i_version++; 1337 dir->i_version++;
1337 ext4_mark_inode_dirty(handle, dir); 1338 ext4_mark_inode_dirty(handle, dir);
1338 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); 1339 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1339 err = ext4_journal_dirty_metadata(handle, bh); 1340 err = ext4_handle_dirty_metadata(handle, dir, bh);
1340 if (err) 1341 if (err)
1341 ext4_std_error(dir->i_sb, err); 1342 ext4_std_error(dir->i_sb, err);
1342 brelse(bh); 1343 brelse(bh);
@@ -1408,6 +1409,8 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
1408 1409
1409 /* Initialize as for dx_probe */ 1410 /* Initialize as for dx_probe */
1410 hinfo.hash_version = root->info.hash_version; 1411 hinfo.hash_version = root->info.hash_version;
1412 if (hinfo.hash_version <= DX_HASH_TEA)
1413 hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
1411 hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; 1414 hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
1412 ext4fs_dirhash(name, namelen, &hinfo); 1415 ext4fs_dirhash(name, namelen, &hinfo);
1413 frame = frames; 1416 frame = frames;
@@ -1437,7 +1440,6 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1437 struct inode *inode) 1440 struct inode *inode)
1438{ 1441{
1439 struct inode *dir = dentry->d_parent->d_inode; 1442 struct inode *dir = dentry->d_parent->d_inode;
1440 unsigned long offset;
1441 struct buffer_head *bh; 1443 struct buffer_head *bh;
1442 struct ext4_dir_entry_2 *de; 1444 struct ext4_dir_entry_2 *de;
1443 struct super_block *sb; 1445 struct super_block *sb;
@@ -1459,7 +1461,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1459 ext4_mark_inode_dirty(handle, dir); 1461 ext4_mark_inode_dirty(handle, dir);
1460 } 1462 }
1461 blocks = dir->i_size >> sb->s_blocksize_bits; 1463 blocks = dir->i_size >> sb->s_blocksize_bits;
1462 for (block = 0, offset = 0; block < blocks; block++) { 1464 for (block = 0; block < blocks; block++) {
1463 bh = ext4_bread(handle, dir, block, 0, &retval); 1465 bh = ext4_bread(handle, dir, block, 0, &retval);
1464 if(!bh) 1466 if(!bh)
1465 return retval; 1467 return retval;
@@ -1574,7 +1576,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1574 dxtrace(dx_show_index("node", frames[1].entries)); 1576 dxtrace(dx_show_index("node", frames[1].entries));
1575 dxtrace(dx_show_index("node", 1577 dxtrace(dx_show_index("node",
1576 ((struct dx_node *) bh2->b_data)->entries)); 1578 ((struct dx_node *) bh2->b_data)->entries));
1577 err = ext4_journal_dirty_metadata(handle, bh2); 1579 err = ext4_handle_dirty_metadata(handle, inode, bh2);
1578 if (err) 1580 if (err)
1579 goto journal_error; 1581 goto journal_error;
1580 brelse (bh2); 1582 brelse (bh2);
@@ -1600,7 +1602,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1600 if (err) 1602 if (err)
1601 goto journal_error; 1603 goto journal_error;
1602 } 1604 }
1603 ext4_journal_dirty_metadata(handle, frames[0].bh); 1605 ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
1604 } 1606 }
1605 de = do_split(handle, dir, &bh, frame, &hinfo, &err); 1607 de = do_split(handle, dir, &bh, frame, &hinfo, &err);
1606 if (!de) 1608 if (!de)
@@ -1646,8 +1648,8 @@ static int ext4_delete_entry(handle_t *handle,
1646 else 1648 else
1647 de->inode = 0; 1649 de->inode = 0;
1648 dir->i_version++; 1650 dir->i_version++;
1649 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); 1651 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1650 ext4_journal_dirty_metadata(handle, bh); 1652 ext4_handle_dirty_metadata(handle, dir, bh);
1651 return 0; 1653 return 0;
1652 } 1654 }
1653 i += ext4_rec_len_from_disk(de->rec_len); 1655 i += ext4_rec_len_from_disk(de->rec_len);
@@ -1725,7 +1727,7 @@ retry:
1725 return PTR_ERR(handle); 1727 return PTR_ERR(handle);
1726 1728
1727 if (IS_DIRSYNC(dir)) 1729 if (IS_DIRSYNC(dir))
1728 handle->h_sync = 1; 1730 ext4_handle_sync(handle);
1729 1731
1730 inode = ext4_new_inode (handle, dir, mode); 1732 inode = ext4_new_inode (handle, dir, mode);
1731 err = PTR_ERR(inode); 1733 err = PTR_ERR(inode);
@@ -1759,7 +1761,7 @@ retry:
1759 return PTR_ERR(handle); 1761 return PTR_ERR(handle);
1760 1762
1761 if (IS_DIRSYNC(dir)) 1763 if (IS_DIRSYNC(dir))
1762 handle->h_sync = 1; 1764 ext4_handle_sync(handle);
1763 1765
1764 inode = ext4_new_inode(handle, dir, mode); 1766 inode = ext4_new_inode(handle, dir, mode);
1765 err = PTR_ERR(inode); 1767 err = PTR_ERR(inode);
@@ -1795,7 +1797,7 @@ retry:
1795 return PTR_ERR(handle); 1797 return PTR_ERR(handle);
1796 1798
1797 if (IS_DIRSYNC(dir)) 1799 if (IS_DIRSYNC(dir))
1798 handle->h_sync = 1; 1800 ext4_handle_sync(handle);
1799 1801
1800 inode = ext4_new_inode(handle, dir, S_IFDIR | mode); 1802 inode = ext4_new_inode(handle, dir, S_IFDIR | mode);
1801 err = PTR_ERR(inode); 1803 err = PTR_ERR(inode);
@@ -1824,8 +1826,8 @@ retry:
1824 strcpy(de->name, ".."); 1826 strcpy(de->name, "..");
1825 ext4_set_de_type(dir->i_sb, de, S_IFDIR); 1827 ext4_set_de_type(dir->i_sb, de, S_IFDIR);
1826 inode->i_nlink = 2; 1828 inode->i_nlink = 2;
1827 BUFFER_TRACE(dir_block, "call ext4_journal_dirty_metadata"); 1829 BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
1828 ext4_journal_dirty_metadata(handle, dir_block); 1830 ext4_handle_dirty_metadata(handle, dir, dir_block);
1829 brelse(dir_block); 1831 brelse(dir_block);
1830 ext4_mark_inode_dirty(handle, inode); 1832 ext4_mark_inode_dirty(handle, inode);
1831 err = ext4_add_entry(handle, dentry, inode); 1833 err = ext4_add_entry(handle, dentry, inode);
@@ -1854,7 +1856,7 @@ out_stop:
1854 */ 1856 */
1855static int empty_dir(struct inode *inode) 1857static int empty_dir(struct inode *inode)
1856{ 1858{
1857 unsigned long offset; 1859 unsigned int offset;
1858 struct buffer_head *bh; 1860 struct buffer_head *bh;
1859 struct ext4_dir_entry_2 *de, *de1; 1861 struct ext4_dir_entry_2 *de, *de1;
1860 struct super_block *sb; 1862 struct super_block *sb;
@@ -1899,7 +1901,7 @@ static int empty_dir(struct inode *inode)
1899 if (err) 1901 if (err)
1900 ext4_error(sb, __func__, 1902 ext4_error(sb, __func__,
1901 "error %d reading directory" 1903 "error %d reading directory"
1902 " #%lu offset %lu", 1904 " #%lu offset %u",
1903 err, inode->i_ino, offset); 1905 err, inode->i_ino, offset);
1904 offset += sb->s_blocksize; 1906 offset += sb->s_blocksize;
1905 continue; 1907 continue;
@@ -1937,6 +1939,9 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
1937 struct ext4_iloc iloc; 1939 struct ext4_iloc iloc;
1938 int err = 0, rc; 1940 int err = 0, rc;
1939 1941
1942 if (!ext4_handle_valid(handle))
1943 return 0;
1944
1940 lock_super(sb); 1945 lock_super(sb);
1941 if (!list_empty(&EXT4_I(inode)->i_orphan)) 1946 if (!list_empty(&EXT4_I(inode)->i_orphan))
1942 goto out_unlock; 1947 goto out_unlock;
@@ -1965,7 +1970,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
1965 /* Insert this inode at the head of the on-disk orphan list... */ 1970 /* Insert this inode at the head of the on-disk orphan list... */
1966 NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan); 1971 NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan);
1967 EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino); 1972 EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
1968 err = ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh); 1973 err = ext4_handle_dirty_metadata(handle, inode, EXT4_SB(sb)->s_sbh);
1969 rc = ext4_mark_iloc_dirty(handle, inode, &iloc); 1974 rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
1970 if (!err) 1975 if (!err)
1971 err = rc; 1976 err = rc;
@@ -1999,10 +2004,13 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
1999 struct list_head *prev; 2004 struct list_head *prev;
2000 struct ext4_inode_info *ei = EXT4_I(inode); 2005 struct ext4_inode_info *ei = EXT4_I(inode);
2001 struct ext4_sb_info *sbi; 2006 struct ext4_sb_info *sbi;
2002 unsigned long ino_next; 2007 __u32 ino_next;
2003 struct ext4_iloc iloc; 2008 struct ext4_iloc iloc;
2004 int err = 0; 2009 int err = 0;
2005 2010
2011 if (!ext4_handle_valid(handle))
2012 return 0;
2013
2006 lock_super(inode->i_sb); 2014 lock_super(inode->i_sb);
2007 if (list_empty(&ei->i_orphan)) { 2015 if (list_empty(&ei->i_orphan)) {
2008 unlock_super(inode->i_sb); 2016 unlock_super(inode->i_sb);
@@ -2021,7 +2029,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
2021 * transaction handle with which to update the orphan list on 2029 * transaction handle with which to update the orphan list on
2022 * disk, but we still need to remove the inode from the linked 2030 * disk, but we still need to remove the inode from the linked
2023 * list in memory. */ 2031 * list in memory. */
2024 if (!handle) 2032 if (sbi->s_journal && !handle)
2025 goto out; 2033 goto out;
2026 2034
2027 err = ext4_reserve_inode_write(handle, inode, &iloc); 2035 err = ext4_reserve_inode_write(handle, inode, &iloc);
@@ -2029,19 +2037,19 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
2029 goto out_err; 2037 goto out_err;
2030 2038
2031 if (prev == &sbi->s_orphan) { 2039 if (prev == &sbi->s_orphan) {
2032 jbd_debug(4, "superblock will point to %lu\n", ino_next); 2040 jbd_debug(4, "superblock will point to %u\n", ino_next);
2033 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 2041 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
2034 err = ext4_journal_get_write_access(handle, sbi->s_sbh); 2042 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
2035 if (err) 2043 if (err)
2036 goto out_brelse; 2044 goto out_brelse;
2037 sbi->s_es->s_last_orphan = cpu_to_le32(ino_next); 2045 sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
2038 err = ext4_journal_dirty_metadata(handle, sbi->s_sbh); 2046 err = ext4_handle_dirty_metadata(handle, inode, sbi->s_sbh);
2039 } else { 2047 } else {
2040 struct ext4_iloc iloc2; 2048 struct ext4_iloc iloc2;
2041 struct inode *i_prev = 2049 struct inode *i_prev =
2042 &list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode; 2050 &list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode;
2043 2051
2044 jbd_debug(4, "orphan inode %lu will point to %lu\n", 2052 jbd_debug(4, "orphan inode %lu will point to %u\n",
2045 i_prev->i_ino, ino_next); 2053 i_prev->i_ino, ino_next);
2046 err = ext4_reserve_inode_write(handle, i_prev, &iloc2); 2054 err = ext4_reserve_inode_write(handle, i_prev, &iloc2);
2047 if (err) 2055 if (err)
@@ -2086,7 +2094,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
2086 goto end_rmdir; 2094 goto end_rmdir;
2087 2095
2088 if (IS_DIRSYNC(dir)) 2096 if (IS_DIRSYNC(dir))
2089 handle->h_sync = 1; 2097 ext4_handle_sync(handle);
2090 2098
2091 inode = dentry->d_inode; 2099 inode = dentry->d_inode;
2092 2100
@@ -2140,7 +2148,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
2140 return PTR_ERR(handle); 2148 return PTR_ERR(handle);
2141 2149
2142 if (IS_DIRSYNC(dir)) 2150 if (IS_DIRSYNC(dir))
2143 handle->h_sync = 1; 2151 ext4_handle_sync(handle);
2144 2152
2145 retval = -ENOENT; 2153 retval = -ENOENT;
2146 bh = ext4_find_entry(dir, &dentry->d_name, &de); 2154 bh = ext4_find_entry(dir, &dentry->d_name, &de);
@@ -2197,7 +2205,7 @@ retry:
2197 return PTR_ERR(handle); 2205 return PTR_ERR(handle);
2198 2206
2199 if (IS_DIRSYNC(dir)) 2207 if (IS_DIRSYNC(dir))
2200 handle->h_sync = 1; 2208 ext4_handle_sync(handle);
2201 2209
2202 inode = ext4_new_inode(handle, dir, S_IFLNK|S_IRWXUGO); 2210 inode = ext4_new_inode(handle, dir, S_IFLNK|S_IRWXUGO);
2203 err = PTR_ERR(inode); 2211 err = PTR_ERR(inode);
@@ -2260,7 +2268,7 @@ retry:
2260 return PTR_ERR(handle); 2268 return PTR_ERR(handle);
2261 2269
2262 if (IS_DIRSYNC(dir)) 2270 if (IS_DIRSYNC(dir))
2263 handle->h_sync = 1; 2271 ext4_handle_sync(handle);
2264 2272
2265 inode->i_ctime = ext4_current_time(inode); 2273 inode->i_ctime = ext4_current_time(inode);
2266 ext4_inc_count(handle, inode); 2274 ext4_inc_count(handle, inode);
@@ -2309,7 +2317,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
2309 return PTR_ERR(handle); 2317 return PTR_ERR(handle);
2310 2318
2311 if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) 2319 if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
2312 handle->h_sync = 1; 2320 ext4_handle_sync(handle);
2313 2321
2314 old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de); 2322 old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de);
2315 /* 2323 /*
@@ -2363,8 +2371,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
2363 new_dir->i_ctime = new_dir->i_mtime = 2371 new_dir->i_ctime = new_dir->i_mtime =
2364 ext4_current_time(new_dir); 2372 ext4_current_time(new_dir);
2365 ext4_mark_inode_dirty(handle, new_dir); 2373 ext4_mark_inode_dirty(handle, new_dir);
2366 BUFFER_TRACE(new_bh, "call ext4_journal_dirty_metadata"); 2374 BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata");
2367 ext4_journal_dirty_metadata(handle, new_bh); 2375 ext4_handle_dirty_metadata(handle, new_dir, new_bh);
2368 brelse(new_bh); 2376 brelse(new_bh);
2369 new_bh = NULL; 2377 new_bh = NULL;
2370 } 2378 }
@@ -2414,8 +2422,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
2414 BUFFER_TRACE(dir_bh, "get_write_access"); 2422 BUFFER_TRACE(dir_bh, "get_write_access");
2415 ext4_journal_get_write_access(handle, dir_bh); 2423 ext4_journal_get_write_access(handle, dir_bh);
2416 PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino); 2424 PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino);
2417 BUFFER_TRACE(dir_bh, "call ext4_journal_dirty_metadata"); 2425 BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
2418 ext4_journal_dirty_metadata(handle, dir_bh); 2426 ext4_handle_dirty_metadata(handle, old_dir, dir_bh);
2419 ext4_dec_count(handle, old_dir); 2427 ext4_dec_count(handle, old_dir);
2420 if (new_inode) { 2428 if (new_inode) {
2421 /* checked empty_dir above, can't have another parent, 2429 /* checked empty_dir above, can't have another parent,
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index b6ec1843a015..c328be5d6885 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -50,7 +50,7 @@ static int verify_group_input(struct super_block *sb,
50 ext4_get_group_no_and_offset(sb, start, NULL, &offset); 50 ext4_get_group_no_and_offset(sb, start, NULL, &offset);
51 if (group != sbi->s_groups_count) 51 if (group != sbi->s_groups_count)
52 ext4_warning(sb, __func__, 52 ext4_warning(sb, __func__,
53 "Cannot add at group %u (only %lu groups)", 53 "Cannot add at group %u (only %u groups)",
54 input->group, sbi->s_groups_count); 54 input->group, sbi->s_groups_count);
55 else if (offset != 0) 55 else if (offset != 0)
56 ext4_warning(sb, __func__, "Last group not full"); 56 ext4_warning(sb, __func__, "Last group not full");
@@ -149,7 +149,7 @@ static int extend_or_restart_transaction(handle_t *handle, int thresh,
149{ 149{
150 int err; 150 int err;
151 151
152 if (handle->h_buffer_credits >= thresh) 152 if (ext4_handle_has_enough_credits(handle, thresh))
153 return 0; 153 return 0;
154 154
155 err = ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA); 155 err = ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA);
@@ -232,7 +232,7 @@ static int setup_new_group_blocks(struct super_block *sb,
232 memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size); 232 memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
233 set_buffer_uptodate(gdb); 233 set_buffer_uptodate(gdb);
234 unlock_buffer(gdb); 234 unlock_buffer(gdb);
235 ext4_journal_dirty_metadata(handle, gdb); 235 ext4_handle_dirty_metadata(handle, NULL, gdb);
236 ext4_set_bit(bit, bh->b_data); 236 ext4_set_bit(bit, bh->b_data);
237 brelse(gdb); 237 brelse(gdb);
238 } 238 }
@@ -251,7 +251,7 @@ static int setup_new_group_blocks(struct super_block *sb,
251 err = PTR_ERR(bh); 251 err = PTR_ERR(bh);
252 goto exit_bh; 252 goto exit_bh;
253 } 253 }
254 ext4_journal_dirty_metadata(handle, gdb); 254 ext4_handle_dirty_metadata(handle, NULL, gdb);
255 ext4_set_bit(bit, bh->b_data); 255 ext4_set_bit(bit, bh->b_data);
256 brelse(gdb); 256 brelse(gdb);
257 } 257 }
@@ -276,7 +276,7 @@ static int setup_new_group_blocks(struct super_block *sb,
276 err = PTR_ERR(it); 276 err = PTR_ERR(it);
277 goto exit_bh; 277 goto exit_bh;
278 } 278 }
279 ext4_journal_dirty_metadata(handle, it); 279 ext4_handle_dirty_metadata(handle, NULL, it);
280 brelse(it); 280 brelse(it);
281 ext4_set_bit(bit, bh->b_data); 281 ext4_set_bit(bit, bh->b_data);
282 } 282 }
@@ -284,11 +284,9 @@ static int setup_new_group_blocks(struct super_block *sb,
284 if ((err = extend_or_restart_transaction(handle, 2, bh))) 284 if ((err = extend_or_restart_transaction(handle, 2, bh)))
285 goto exit_bh; 285 goto exit_bh;
286 286
287 mark_bitmap_end(input->blocks_count, EXT4_BLOCKS_PER_GROUP(sb), 287 mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8, bh->b_data);
288 bh->b_data); 288 ext4_handle_dirty_metadata(handle, NULL, bh);
289 ext4_journal_dirty_metadata(handle, bh);
290 brelse(bh); 289 brelse(bh);
291
292 /* Mark unused entries in inode bitmap used */ 290 /* Mark unused entries in inode bitmap used */
293 ext4_debug("clear inode bitmap %#04llx (+%llu)\n", 291 ext4_debug("clear inode bitmap %#04llx (+%llu)\n",
294 input->inode_bitmap, input->inode_bitmap - start); 292 input->inode_bitmap, input->inode_bitmap - start);
@@ -297,9 +295,9 @@ static int setup_new_group_blocks(struct super_block *sb,
297 goto exit_journal; 295 goto exit_journal;
298 } 296 }
299 297
300 mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb), 298 mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
301 bh->b_data); 299 bh->b_data);
302 ext4_journal_dirty_metadata(handle, bh); 300 ext4_handle_dirty_metadata(handle, NULL, bh);
303exit_bh: 301exit_bh:
304 brelse(bh); 302 brelse(bh);
305 303
@@ -486,12 +484,12 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
486 * reserved inode, and will become GDT blocks (primary and backup). 484 * reserved inode, and will become GDT blocks (primary and backup).
487 */ 485 */
488 data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0; 486 data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
489 ext4_journal_dirty_metadata(handle, dind); 487 ext4_handle_dirty_metadata(handle, NULL, dind);
490 brelse(dind); 488 brelse(dind);
491 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9; 489 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
492 ext4_mark_iloc_dirty(handle, inode, &iloc); 490 ext4_mark_iloc_dirty(handle, inode, &iloc);
493 memset((*primary)->b_data, 0, sb->s_blocksize); 491 memset((*primary)->b_data, 0, sb->s_blocksize);
494 ext4_journal_dirty_metadata(handle, *primary); 492 ext4_handle_dirty_metadata(handle, NULL, *primary);
495 493
496 o_group_desc = EXT4_SB(sb)->s_group_desc; 494 o_group_desc = EXT4_SB(sb)->s_group_desc;
497 memcpy(n_group_desc, o_group_desc, 495 memcpy(n_group_desc, o_group_desc,
@@ -502,7 +500,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
502 kfree(o_group_desc); 500 kfree(o_group_desc);
503 501
504 le16_add_cpu(&es->s_reserved_gdt_blocks, -1); 502 le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
505 ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh); 503 ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
506 504
507 return 0; 505 return 0;
508 506
@@ -618,7 +616,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
618 primary[i]->b_blocknr, gdbackups, 616 primary[i]->b_blocknr, gdbackups,
619 blk + primary[i]->b_blocknr); */ 617 blk + primary[i]->b_blocknr); */
620 data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr); 618 data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
621 err2 = ext4_journal_dirty_metadata(handle, primary[i]); 619 err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
622 if (!err) 620 if (!err)
623 err = err2; 621 err = err2;
624 } 622 }
@@ -676,7 +674,8 @@ static void update_backups(struct super_block *sb,
676 struct buffer_head *bh; 674 struct buffer_head *bh;
677 675
678 /* Out of journal space, and can't get more - abort - so sad */ 676 /* Out of journal space, and can't get more - abort - so sad */
679 if (handle->h_buffer_credits == 0 && 677 if (ext4_handle_valid(handle) &&
678 handle->h_buffer_credits == 0 &&
680 ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA) && 679 ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA) &&
681 (err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA))) 680 (err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))
682 break; 681 break;
@@ -696,7 +695,7 @@ static void update_backups(struct super_block *sb,
696 memset(bh->b_data + size, 0, rest); 695 memset(bh->b_data + size, 0, rest);
697 set_buffer_uptodate(bh); 696 set_buffer_uptodate(bh);
698 unlock_buffer(bh); 697 unlock_buffer(bh);
699 ext4_journal_dirty_metadata(handle, bh); 698 ext4_handle_dirty_metadata(handle, NULL, bh);
700 brelse(bh); 699 brelse(bh);
701 } 700 }
702 if ((err2 = ext4_journal_stop(handle)) && !err) 701 if ((err2 = ext4_journal_stop(handle)) && !err)
@@ -715,7 +714,7 @@ static void update_backups(struct super_block *sb,
715exit_err: 714exit_err:
716 if (err) { 715 if (err) {
717 ext4_warning(sb, __func__, 716 ext4_warning(sb, __func__,
718 "can't update backup for group %lu (err %d), " 717 "can't update backup for group %u (err %d), "
719 "forcing fsck on next reboot", group, err); 718 "forcing fsck on next reboot", group, err);
720 sbi->s_mount_state &= ~EXT4_VALID_FS; 719 sbi->s_mount_state &= ~EXT4_VALID_FS;
721 sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS); 720 sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
@@ -747,6 +746,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
747 struct inode *inode = NULL; 746 struct inode *inode = NULL;
748 handle_t *handle; 747 handle_t *handle;
749 int gdb_off, gdb_num; 748 int gdb_off, gdb_num;
749 int num_grp_locked = 0;
750 int err, err2; 750 int err, err2;
751 751
752 gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb); 752 gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb);
@@ -761,13 +761,13 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
761 761
762 if (ext4_blocks_count(es) + input->blocks_count < 762 if (ext4_blocks_count(es) + input->blocks_count <
763 ext4_blocks_count(es)) { 763 ext4_blocks_count(es)) {
764 ext4_warning(sb, __func__, "blocks_count overflow\n"); 764 ext4_warning(sb, __func__, "blocks_count overflow");
765 return -EINVAL; 765 return -EINVAL;
766 } 766 }
767 767
768 if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) < 768 if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
769 le32_to_cpu(es->s_inodes_count)) { 769 le32_to_cpu(es->s_inodes_count)) {
770 ext4_warning(sb, __func__, "inodes_count overflow\n"); 770 ext4_warning(sb, __func__, "inodes_count overflow");
771 return -EINVAL; 771 return -EINVAL;
772 } 772 }
773 773
@@ -787,6 +787,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
787 } 787 }
788 } 788 }
789 789
790
790 if ((err = verify_group_input(sb, input))) 791 if ((err = verify_group_input(sb, input)))
791 goto exit_put; 792 goto exit_put;
792 793
@@ -855,6 +856,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
855 * using the new disk blocks. 856 * using the new disk blocks.
856 */ 857 */
857 858
859 num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, input->group);
858 /* Update group descriptor block for new group */ 860 /* Update group descriptor block for new group */
859 gdp = (struct ext4_group_desc *)((char *)primary->b_data + 861 gdp = (struct ext4_group_desc *)((char *)primary->b_data +
860 gdb_off * EXT4_DESC_SIZE(sb)); 862 gdb_off * EXT4_DESC_SIZE(sb));
@@ -862,17 +864,20 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
862 ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */ 864 ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */
863 ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */ 865 ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */
864 ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */ 866 ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */
865 gdp->bg_free_blocks_count = cpu_to_le16(input->free_blocks_count); 867 ext4_free_blks_set(sb, gdp, input->free_blocks_count);
866 gdp->bg_free_inodes_count = cpu_to_le16(EXT4_INODES_PER_GROUP(sb)); 868 ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
869 gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
867 gdp->bg_checksum = ext4_group_desc_csum(sbi, input->group, gdp); 870 gdp->bg_checksum = ext4_group_desc_csum(sbi, input->group, gdp);
868 871
869 /* 872 /*
870 * We can allocate memory for mb_alloc based on the new group 873 * We can allocate memory for mb_alloc based on the new group
871 * descriptor 874 * descriptor
872 */ 875 */
873 err = ext4_mb_add_more_groupinfo(sb, input->group, gdp); 876 err = ext4_mb_add_groupinfo(sb, input->group, gdp);
874 if (err) 877 if (err) {
878 ext4_mb_put_buddy_cache_lock(sb, input->group, num_grp_locked);
875 goto exit_journal; 879 goto exit_journal;
880 }
876 881
877 /* 882 /*
878 * Make the new blocks and inodes valid next. We do this before 883 * Make the new blocks and inodes valid next. We do this before
@@ -914,8 +919,9 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
914 919
915 /* Update the global fs size fields */ 920 /* Update the global fs size fields */
916 sbi->s_groups_count++; 921 sbi->s_groups_count++;
922 ext4_mb_put_buddy_cache_lock(sb, input->group, num_grp_locked);
917 923
918 ext4_journal_dirty_metadata(handle, primary); 924 ext4_handle_dirty_metadata(handle, NULL, primary);
919 925
920 /* Update the reserved block counts only once the new group is 926 /* Update the reserved block counts only once the new group is
921 * active. */ 927 * active. */
@@ -937,7 +943,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
937 EXT4_INODES_PER_GROUP(sb); 943 EXT4_INODES_PER_GROUP(sb);
938 } 944 }
939 945
940 ext4_journal_dirty_metadata(handle, sbi->s_sbh); 946 ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
941 sb->s_dirt = 1; 947 sb->s_dirt = 1;
942 948
943exit_journal: 949exit_journal:
@@ -975,9 +981,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
975 struct buffer_head *bh; 981 struct buffer_head *bh;
976 handle_t *handle; 982 handle_t *handle;
977 int err; 983 int err;
978 unsigned long freed_blocks;
979 ext4_group_t group; 984 ext4_group_t group;
980 struct ext4_group_info *grp;
981 985
982 /* We don't need to worry about locking wrt other resizers just 986 /* We don't need to worry about locking wrt other resizers just
983 * yet: we're going to revalidate es->s_blocks_count after 987 * yet: we're going to revalidate es->s_blocks_count after
@@ -997,8 +1001,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
997 " too large to resize to %llu blocks safely\n", 1001 " too large to resize to %llu blocks safely\n",
998 sb->s_id, n_blocks_count); 1002 sb->s_id, n_blocks_count);
999 if (sizeof(sector_t) < 8) 1003 if (sizeof(sector_t) < 8)
1000 ext4_warning(sb, __func__, 1004 ext4_warning(sb, __func__, "CONFIG_LBD not enabled");
1001 "CONFIG_LBD not enabled\n");
1002 return -EINVAL; 1005 return -EINVAL;
1003 } 1006 }
1004 1007
@@ -1071,62 +1074,18 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
1071 goto exit_put; 1074 goto exit_put;
1072 } 1075 }
1073 ext4_blocks_count_set(es, o_blocks_count + add); 1076 ext4_blocks_count_set(es, o_blocks_count + add);
1074 ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh); 1077 ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
1075 sb->s_dirt = 1; 1078 sb->s_dirt = 1;
1076 unlock_super(sb); 1079 unlock_super(sb);
1077 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count, 1080 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
1078 o_blocks_count + add); 1081 o_blocks_count + add);
1079 ext4_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks); 1082 /* We add the blocks to the bitmap and set the group need init bit */
1083 ext4_add_groupblocks(handle, sb, o_blocks_count, add);
1080 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count, 1084 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
1081 o_blocks_count + add); 1085 o_blocks_count + add);
1082 if ((err = ext4_journal_stop(handle))) 1086 if ((err = ext4_journal_stop(handle)))
1083 goto exit_put; 1087 goto exit_put;
1084 1088
1085 /*
1086 * Mark mballoc pages as not up to date so that they will be updated
1087 * next time they are loaded by ext4_mb_load_buddy.
1088 *
1089 * XXX Bad, Bad, BAD!!! We should not be overloading the
1090 * Uptodate flag, particularly on thte bitmap bh, as way of
1091 * hinting to ext4_mb_load_buddy() that it needs to be
1092 * overloaded. A user could take a LVM snapshot, then do an
1093 * on-line fsck, and clear the uptodate flag, and this would
1094 * not be a bug in userspace, but a bug in the kernel. FIXME!!!
1095 */
1096 {
1097 struct ext4_sb_info *sbi = EXT4_SB(sb);
1098 struct inode *inode = sbi->s_buddy_cache;
1099 int blocks_per_page;
1100 int block;
1101 int pnum;
1102 struct page *page;
1103
1104 /* Set buddy page as not up to date */
1105 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1106 block = group * 2;
1107 pnum = block / blocks_per_page;
1108 page = find_get_page(inode->i_mapping, pnum);
1109 if (page != NULL) {
1110 ClearPageUptodate(page);
1111 page_cache_release(page);
1112 }
1113
1114 /* Set bitmap page as not up to date */
1115 block++;
1116 pnum = block / blocks_per_page;
1117 page = find_get_page(inode->i_mapping, pnum);
1118 if (page != NULL) {
1119 ClearPageUptodate(page);
1120 page_cache_release(page);
1121 }
1122
1123 /* Get the info on the last group */
1124 grp = ext4_get_group_info(sb, group);
1125
1126 /* Update free blocks in group info */
1127 ext4_mb_update_group_info(grp, add);
1128 }
1129
1130 if (test_opt(sb, DEBUG)) 1089 if (test_opt(sb, DEBUG))
1131 printk(KERN_DEBUG "EXT4-fs: extended group to %llu blocks\n", 1090 printk(KERN_DEBUG "EXT4-fs: extended group to %llu blocks\n",
1132 ext4_blocks_count(es)); 1091 ext4_blocks_count(es));
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 9494bb249390..8f7e0be8ab1b 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -51,8 +51,6 @@ struct proc_dir_entry *ext4_proc_root;
51 51
52static int ext4_load_journal(struct super_block *, struct ext4_super_block *, 52static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
53 unsigned long journal_devnum); 53 unsigned long journal_devnum);
54static int ext4_create_journal(struct super_block *, struct ext4_super_block *,
55 unsigned int);
56static void ext4_commit_super(struct super_block *sb, 54static void ext4_commit_super(struct super_block *sb,
57 struct ext4_super_block *es, int sync); 55 struct ext4_super_block *es, int sync);
58static void ext4_mark_recovery_complete(struct super_block *sb, 56static void ext4_mark_recovery_complete(struct super_block *sb,
@@ -93,6 +91,38 @@ ext4_fsblk_t ext4_inode_table(struct super_block *sb,
93 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0); 91 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
94} 92}
95 93
94__u32 ext4_free_blks_count(struct super_block *sb,
95 struct ext4_group_desc *bg)
96{
97 return le16_to_cpu(bg->bg_free_blocks_count_lo) |
98 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
99 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
100}
101
102__u32 ext4_free_inodes_count(struct super_block *sb,
103 struct ext4_group_desc *bg)
104{
105 return le16_to_cpu(bg->bg_free_inodes_count_lo) |
106 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
107 (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
108}
109
110__u32 ext4_used_dirs_count(struct super_block *sb,
111 struct ext4_group_desc *bg)
112{
113 return le16_to_cpu(bg->bg_used_dirs_count_lo) |
114 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
115 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
116}
117
118__u32 ext4_itable_unused_count(struct super_block *sb,
119 struct ext4_group_desc *bg)
120{
121 return le16_to_cpu(bg->bg_itable_unused_lo) |
122 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
123 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
124}
125
96void ext4_block_bitmap_set(struct super_block *sb, 126void ext4_block_bitmap_set(struct super_block *sb,
97 struct ext4_group_desc *bg, ext4_fsblk_t blk) 127 struct ext4_group_desc *bg, ext4_fsblk_t blk)
98{ 128{
@@ -117,6 +147,38 @@ void ext4_inode_table_set(struct super_block *sb,
117 bg->bg_inode_table_hi = cpu_to_le32(blk >> 32); 147 bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
118} 148}
119 149
150void ext4_free_blks_set(struct super_block *sb,
151 struct ext4_group_desc *bg, __u32 count)
152{
153 bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
154 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
155 bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
156}
157
158void ext4_free_inodes_set(struct super_block *sb,
159 struct ext4_group_desc *bg, __u32 count)
160{
161 bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
162 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
163 bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
164}
165
166void ext4_used_dirs_set(struct super_block *sb,
167 struct ext4_group_desc *bg, __u32 count)
168{
169 bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
170 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
171 bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
172}
173
174void ext4_itable_unused_set(struct super_block *sb,
175 struct ext4_group_desc *bg, __u32 count)
176{
177 bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
178 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
179 bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
180}
181
120/* 182/*
121 * Wrappers for jbd2_journal_start/end. 183 * Wrappers for jbd2_journal_start/end.
122 * 184 *
@@ -136,13 +198,19 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
136 * backs (eg. EIO in the commit thread), then we still need to 198 * backs (eg. EIO in the commit thread), then we still need to
137 * take the FS itself readonly cleanly. */ 199 * take the FS itself readonly cleanly. */
138 journal = EXT4_SB(sb)->s_journal; 200 journal = EXT4_SB(sb)->s_journal;
139 if (is_journal_aborted(journal)) { 201 if (journal) {
140 ext4_abort(sb, __func__, 202 if (is_journal_aborted(journal)) {
141 "Detected aborted journal"); 203 ext4_abort(sb, __func__,
142 return ERR_PTR(-EROFS); 204 "Detected aborted journal");
205 return ERR_PTR(-EROFS);
206 }
207 return jbd2_journal_start(journal, nblocks);
143 } 208 }
144 209 /*
145 return jbd2_journal_start(journal, nblocks); 210 * We're not journaling, return the appropriate indication.
211 */
212 current->journal_info = EXT4_NOJOURNAL_HANDLE;
213 return current->journal_info;
146} 214}
147 215
148/* 216/*
@@ -157,6 +225,14 @@ int __ext4_journal_stop(const char *where, handle_t *handle)
157 int err; 225 int err;
158 int rc; 226 int rc;
159 227
228 if (!ext4_handle_valid(handle)) {
229 /*
230 * Do this here since we don't call jbd2_journal_stop() in
231 * no-journal mode.
232 */
233 current->journal_info = NULL;
234 return 0;
235 }
160 sb = handle->h_transaction->t_journal->j_private; 236 sb = handle->h_transaction->t_journal->j_private;
161 err = handle->h_err; 237 err = handle->h_err;
162 rc = jbd2_journal_stop(handle); 238 rc = jbd2_journal_stop(handle);
@@ -174,6 +250,8 @@ void ext4_journal_abort_handle(const char *caller, const char *err_fn,
174 char nbuf[16]; 250 char nbuf[16];
175 const char *errstr = ext4_decode_error(NULL, err, nbuf); 251 const char *errstr = ext4_decode_error(NULL, err, nbuf);
176 252
253 BUG_ON(!ext4_handle_valid(handle));
254
177 if (bh) 255 if (bh)
178 BUFFER_TRACE(bh, "abort"); 256 BUFFER_TRACE(bh, "abort");
179 257
@@ -350,6 +428,44 @@ void ext4_warning(struct super_block *sb, const char *function,
350 va_end(args); 428 va_end(args);
351} 429}
352 430
431void ext4_grp_locked_error(struct super_block *sb, ext4_group_t grp,
432 const char *function, const char *fmt, ...)
433__releases(bitlock)
434__acquires(bitlock)
435{
436 va_list args;
437 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
438
439 va_start(args, fmt);
440 printk(KERN_CRIT "EXT4-fs error (device %s): %s: ", sb->s_id, function);
441 vprintk(fmt, args);
442 printk("\n");
443 va_end(args);
444
445 if (test_opt(sb, ERRORS_CONT)) {
446 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
447 es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
448 ext4_commit_super(sb, es, 0);
449 return;
450 }
451 ext4_unlock_group(sb, grp);
452 ext4_handle_error(sb);
453 /*
454 * We only get here in the ERRORS_RO case; relocking the group
455 * may be dangerous, but nothing bad will happen since the
456 * filesystem will have already been marked read/only and the
457 * journal has been aborted. We return 1 as a hint to callers
458 * who might what to use the return value from
459 * ext4_grp_locked_error() to distinguish beween the
460 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
461 * aggressively from the ext4 function in question, with a
462 * more appropriate error code.
463 */
464 ext4_lock_group(sb, grp);
465 return;
466}
467
468
353void ext4_update_dynamic_rev(struct super_block *sb) 469void ext4_update_dynamic_rev(struct super_block *sb)
354{ 470{
355 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 471 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
@@ -389,7 +505,7 @@ static struct block_device *ext4_blkdev_get(dev_t dev)
389 return bdev; 505 return bdev;
390 506
391fail: 507fail:
392 printk(KERN_ERR "EXT4: failed to open journal device %s: %ld\n", 508 printk(KERN_ERR "EXT4-fs: failed to open journal device %s: %ld\n",
393 __bdevname(dev, b), PTR_ERR(bdev)); 509 __bdevname(dev, b), PTR_ERR(bdev));
394 return NULL; 510 return NULL;
395} 511}
@@ -448,11 +564,13 @@ static void ext4_put_super(struct super_block *sb)
448 ext4_mb_release(sb); 564 ext4_mb_release(sb);
449 ext4_ext_release(sb); 565 ext4_ext_release(sb);
450 ext4_xattr_put_super(sb); 566 ext4_xattr_put_super(sb);
451 err = jbd2_journal_destroy(sbi->s_journal); 567 if (sbi->s_journal) {
452 sbi->s_journal = NULL; 568 err = jbd2_journal_destroy(sbi->s_journal);
453 if (err < 0) 569 sbi->s_journal = NULL;
454 ext4_abort(sb, __func__, "Couldn't clean up the journal"); 570 if (err < 0)
455 571 ext4_abort(sb, __func__,
572 "Couldn't clean up the journal");
573 }
456 if (!(sb->s_flags & MS_RDONLY)) { 574 if (!(sb->s_flags & MS_RDONLY)) {
457 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); 575 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
458 es->s_state = cpu_to_le16(sbi->s_mount_state); 576 es->s_state = cpu_to_le16(sbi->s_mount_state);
@@ -522,6 +640,11 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
522 memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache)); 640 memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
523 INIT_LIST_HEAD(&ei->i_prealloc_list); 641 INIT_LIST_HEAD(&ei->i_prealloc_list);
524 spin_lock_init(&ei->i_prealloc_lock); 642 spin_lock_init(&ei->i_prealloc_lock);
643 /*
644 * Note: We can be called before EXT4_SB(sb)->s_journal is set,
645 * therefore it can be null here. Don't check it, just initialize
646 * jinode.
647 */
525 jbd2_journal_init_jbd_inode(&ei->jinode, &ei->vfs_inode); 648 jbd2_journal_init_jbd_inode(&ei->jinode, &ei->vfs_inode);
526 ei->i_reserved_data_blocks = 0; 649 ei->i_reserved_data_blocks = 0;
527 ei->i_reserved_meta_blocks = 0; 650 ei->i_reserved_meta_blocks = 0;
@@ -588,7 +711,8 @@ static void ext4_clear_inode(struct inode *inode)
588 } 711 }
589#endif 712#endif
590 ext4_discard_preallocations(inode); 713 ext4_discard_preallocations(inode);
591 jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal, 714 if (EXT4_JOURNAL(inode))
715 jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal,
592 &EXT4_I(inode)->jinode); 716 &EXT4_I(inode)->jinode);
593} 717}
594 718
@@ -681,10 +805,19 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
681#endif 805#endif
682 if (!test_opt(sb, RESERVATION)) 806 if (!test_opt(sb, RESERVATION))
683 seq_puts(seq, ",noreservation"); 807 seq_puts(seq, ",noreservation");
684 if (sbi->s_commit_interval) { 808 if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
685 seq_printf(seq, ",commit=%u", 809 seq_printf(seq, ",commit=%u",
686 (unsigned) (sbi->s_commit_interval / HZ)); 810 (unsigned) (sbi->s_commit_interval / HZ));
687 } 811 }
812 if (sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME) {
813 seq_printf(seq, ",min_batch_time=%u",
814 (unsigned) sbi->s_min_batch_time);
815 }
816 if (sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) {
817 seq_printf(seq, ",max_batch_time=%u",
818 (unsigned) sbi->s_min_batch_time);
819 }
820
688 /* 821 /*
689 * We're changing the default of barrier mount option, so 822 * We're changing the default of barrier mount option, so
690 * let's always display its mount state so it's clear what its 823 * let's always display its mount state so it's clear what its
@@ -696,8 +829,6 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
696 seq_puts(seq, ",journal_async_commit"); 829 seq_puts(seq, ",journal_async_commit");
697 if (test_opt(sb, NOBH)) 830 if (test_opt(sb, NOBH))
698 seq_puts(seq, ",nobh"); 831 seq_puts(seq, ",nobh");
699 if (!test_opt(sb, EXTENTS))
700 seq_puts(seq, ",noextents");
701 if (test_opt(sb, I_VERSION)) 832 if (test_opt(sb, I_VERSION))
702 seq_puts(seq, ",i_version"); 833 seq_puts(seq, ",i_version");
703 if (!test_opt(sb, DELALLOC)) 834 if (!test_opt(sb, DELALLOC))
@@ -772,6 +903,25 @@ static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
772 ext4_nfs_get_inode); 903 ext4_nfs_get_inode);
773} 904}
774 905
906/*
907 * Try to release metadata pages (indirect blocks, directories) which are
908 * mapped via the block device. Since these pages could have journal heads
909 * which would prevent try_to_free_buffers() from freeing them, we must use
910 * jbd2 layer's try_to_free_buffers() function to release them.
911 */
912static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_t wait)
913{
914 journal_t *journal = EXT4_SB(sb)->s_journal;
915
916 WARN_ON(PageChecked(page));
917 if (!page_has_buffers(page))
918 return 0;
919 if (journal)
920 return jbd2_journal_try_to_free_buffers(journal, page,
921 wait & ~__GFP_WAIT);
922 return try_to_free_buffers(page);
923}
924
775#ifdef CONFIG_QUOTA 925#ifdef CONFIG_QUOTA
776#define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group") 926#define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group")
777#define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) 927#define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
@@ -838,6 +988,7 @@ static const struct super_operations ext4_sops = {
838 .quota_read = ext4_quota_read, 988 .quota_read = ext4_quota_read,
839 .quota_write = ext4_quota_write, 989 .quota_write = ext4_quota_write,
840#endif 990#endif
991 .bdev_try_to_free_page = bdev_try_to_free_page,
841}; 992};
842 993
843static const struct export_operations ext4_export_ops = { 994static const struct export_operations ext4_export_ops = {
@@ -852,16 +1003,17 @@ enum {
852 Opt_nouid32, Opt_debug, Opt_oldalloc, Opt_orlov, 1003 Opt_nouid32, Opt_debug, Opt_oldalloc, Opt_orlov,
853 Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, 1004 Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
854 Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh, 1005 Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh,
855 Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev, 1006 Opt_commit, Opt_min_batch_time, Opt_max_batch_time,
1007 Opt_journal_update, Opt_journal_dev,
856 Opt_journal_checksum, Opt_journal_async_commit, 1008 Opt_journal_checksum, Opt_journal_async_commit,
857 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, 1009 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
858 Opt_data_err_abort, Opt_data_err_ignore, 1010 Opt_data_err_abort, Opt_data_err_ignore,
859 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, 1011 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
860 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota, 1012 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
861 Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota, 1013 Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
862 Opt_grpquota, Opt_extents, Opt_noextents, Opt_i_version, 1014 Opt_grpquota, Opt_i_version,
863 Opt_stripe, Opt_delalloc, Opt_nodelalloc, 1015 Opt_stripe, Opt_delalloc, Opt_nodelalloc,
864 Opt_inode_readahead_blks 1016 Opt_inode_readahead_blks, Opt_journal_ioprio
865}; 1017};
866 1018
867static const match_table_t tokens = { 1019static const match_table_t tokens = {
@@ -891,8 +1043,9 @@ static const match_table_t tokens = {
891 {Opt_nobh, "nobh"}, 1043 {Opt_nobh, "nobh"},
892 {Opt_bh, "bh"}, 1044 {Opt_bh, "bh"},
893 {Opt_commit, "commit=%u"}, 1045 {Opt_commit, "commit=%u"},
1046 {Opt_min_batch_time, "min_batch_time=%u"},
1047 {Opt_max_batch_time, "max_batch_time=%u"},
894 {Opt_journal_update, "journal=update"}, 1048 {Opt_journal_update, "journal=update"},
895 {Opt_journal_inum, "journal=%u"},
896 {Opt_journal_dev, "journal_dev=%u"}, 1049 {Opt_journal_dev, "journal_dev=%u"},
897 {Opt_journal_checksum, "journal_checksum"}, 1050 {Opt_journal_checksum, "journal_checksum"},
898 {Opt_journal_async_commit, "journal_async_commit"}, 1051 {Opt_journal_async_commit, "journal_async_commit"},
@@ -913,14 +1066,13 @@ static const match_table_t tokens = {
913 {Opt_quota, "quota"}, 1066 {Opt_quota, "quota"},
914 {Opt_usrquota, "usrquota"}, 1067 {Opt_usrquota, "usrquota"},
915 {Opt_barrier, "barrier=%u"}, 1068 {Opt_barrier, "barrier=%u"},
916 {Opt_extents, "extents"},
917 {Opt_noextents, "noextents"},
918 {Opt_i_version, "i_version"}, 1069 {Opt_i_version, "i_version"},
919 {Opt_stripe, "stripe=%u"}, 1070 {Opt_stripe, "stripe=%u"},
920 {Opt_resize, "resize"}, 1071 {Opt_resize, "resize"},
921 {Opt_delalloc, "delalloc"}, 1072 {Opt_delalloc, "delalloc"},
922 {Opt_nodelalloc, "nodelalloc"}, 1073 {Opt_nodelalloc, "nodelalloc"},
923 {Opt_inode_readahead_blks, "inode_readahead_blks=%u"}, 1074 {Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
1075 {Opt_journal_ioprio, "journal_ioprio=%u"},
924 {Opt_err, NULL}, 1076 {Opt_err, NULL},
925}; 1077};
926 1078
@@ -945,8 +1097,11 @@ static ext4_fsblk_t get_sb_block(void **data)
945 return sb_block; 1097 return sb_block;
946} 1098}
947 1099
1100#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
1101
948static int parse_options(char *options, struct super_block *sb, 1102static int parse_options(char *options, struct super_block *sb,
949 unsigned int *inum, unsigned long *journal_devnum, 1103 unsigned long *journal_devnum,
1104 unsigned int *journal_ioprio,
950 ext4_fsblk_t *n_blocks_count, int is_remount) 1105 ext4_fsblk_t *n_blocks_count, int is_remount)
951{ 1106{
952 struct ext4_sb_info *sbi = EXT4_SB(sb); 1107 struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -958,7 +1113,6 @@ static int parse_options(char *options, struct super_block *sb,
958 int qtype, qfmt; 1113 int qtype, qfmt;
959 char *qname; 1114 char *qname;
960#endif 1115#endif
961 ext4_fsblk_t last_block;
962 1116
963 if (!options) 1117 if (!options)
964 return 1; 1118 return 1;
@@ -1070,16 +1224,6 @@ static int parse_options(char *options, struct super_block *sb,
1070 } 1224 }
1071 set_opt(sbi->s_mount_opt, UPDATE_JOURNAL); 1225 set_opt(sbi->s_mount_opt, UPDATE_JOURNAL);
1072 break; 1226 break;
1073 case Opt_journal_inum:
1074 if (is_remount) {
1075 printk(KERN_ERR "EXT4-fs: cannot specify "
1076 "journal on remount\n");
1077 return 0;
1078 }
1079 if (match_int(&args[0], &option))
1080 return 0;
1081 *inum = option;
1082 break;
1083 case Opt_journal_dev: 1227 case Opt_journal_dev:
1084 if (is_remount) { 1228 if (is_remount) {
1085 printk(KERN_ERR "EXT4-fs: cannot specify " 1229 printk(KERN_ERR "EXT4-fs: cannot specify "
@@ -1109,6 +1253,22 @@ static int parse_options(char *options, struct super_block *sb,
1109 option = JBD2_DEFAULT_MAX_COMMIT_AGE; 1253 option = JBD2_DEFAULT_MAX_COMMIT_AGE;
1110 sbi->s_commit_interval = HZ * option; 1254 sbi->s_commit_interval = HZ * option;
1111 break; 1255 break;
1256 case Opt_max_batch_time:
1257 if (match_int(&args[0], &option))
1258 return 0;
1259 if (option < 0)
1260 return 0;
1261 if (option == 0)
1262 option = EXT4_DEF_MAX_BATCH_TIME;
1263 sbi->s_max_batch_time = option;
1264 break;
1265 case Opt_min_batch_time:
1266 if (match_int(&args[0], &option))
1267 return 0;
1268 if (option < 0)
1269 return 0;
1270 sbi->s_min_batch_time = option;
1271 break;
1112 case Opt_data_journal: 1272 case Opt_data_journal:
1113 data_opt = EXT4_MOUNT_JOURNAL_DATA; 1273 data_opt = EXT4_MOUNT_JOURNAL_DATA;
1114 goto datacheck; 1274 goto datacheck;
@@ -1279,33 +1439,6 @@ set_qf_format:
1279 case Opt_bh: 1439 case Opt_bh:
1280 clear_opt(sbi->s_mount_opt, NOBH); 1440 clear_opt(sbi->s_mount_opt, NOBH);
1281 break; 1441 break;
1282 case Opt_extents:
1283 if (!EXT4_HAS_INCOMPAT_FEATURE(sb,
1284 EXT4_FEATURE_INCOMPAT_EXTENTS)) {
1285 ext4_warning(sb, __func__,
1286 "extents feature not enabled "
1287 "on this filesystem, use tune2fs\n");
1288 return 0;
1289 }
1290 set_opt(sbi->s_mount_opt, EXTENTS);
1291 break;
1292 case Opt_noextents:
1293 /*
1294 * When e2fsprogs support resizing an already existing
1295 * ext3 file system to greater than 2**32 we need to
1296 * add support to block allocator to handle growing
1297 * already existing block mapped inode so that blocks
1298 * allocated for them fall within 2**32
1299 */
1300 last_block = ext4_blocks_count(sbi->s_es) - 1;
1301 if (last_block > 0xffffffffULL) {
1302 printk(KERN_ERR "EXT4-fs: Filesystem too "
1303 "large to mount with "
1304 "-o noextents options\n");
1305 return 0;
1306 }
1307 clear_opt(sbi->s_mount_opt, EXTENTS);
1308 break;
1309 case Opt_i_version: 1442 case Opt_i_version:
1310 set_opt(sbi->s_mount_opt, I_VERSION); 1443 set_opt(sbi->s_mount_opt, I_VERSION);
1311 sb->s_flags |= MS_I_VERSION; 1444 sb->s_flags |= MS_I_VERSION;
@@ -1330,6 +1463,14 @@ set_qf_format:
1330 return 0; 1463 return 0;
1331 sbi->s_inode_readahead_blks = option; 1464 sbi->s_inode_readahead_blks = option;
1332 break; 1465 break;
1466 case Opt_journal_ioprio:
1467 if (match_int(&args[0], &option))
1468 return 0;
1469 if (option < 0 || option > 7)
1470 break;
1471 *journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE,
1472 option);
1473 break;
1333 default: 1474 default:
1334 printk(KERN_ERR 1475 printk(KERN_ERR
1335 "EXT4-fs: Unrecognized mount option \"%s\" " 1476 "EXT4-fs: Unrecognized mount option \"%s\" "
@@ -1405,24 +1546,19 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
1405 printk(KERN_WARNING 1546 printk(KERN_WARNING
1406 "EXT4-fs warning: checktime reached, " 1547 "EXT4-fs warning: checktime reached, "
1407 "running e2fsck is recommended\n"); 1548 "running e2fsck is recommended\n");
1408#if 0 1549 if (!sbi->s_journal)
1409 /* @@@ We _will_ want to clear the valid bit if we find 1550 es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
1410 * inconsistencies, to force a fsck at reboot. But for
1411 * a plain journaled filesystem we can keep it set as
1412 * valid forever! :)
1413 */
1414 es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
1415#endif
1416 if (!(__s16) le16_to_cpu(es->s_max_mnt_count)) 1551 if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
1417 es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT); 1552 es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
1418 le16_add_cpu(&es->s_mnt_count, 1); 1553 le16_add_cpu(&es->s_mnt_count, 1);
1419 es->s_mtime = cpu_to_le32(get_seconds()); 1554 es->s_mtime = cpu_to_le32(get_seconds());
1420 ext4_update_dynamic_rev(sb); 1555 ext4_update_dynamic_rev(sb);
1421 EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); 1556 if (sbi->s_journal)
1557 EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
1422 1558
1423 ext4_commit_super(sb, es, 1); 1559 ext4_commit_super(sb, es, 1);
1424 if (test_opt(sb, DEBUG)) 1560 if (test_opt(sb, DEBUG))
1425 printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%lu, " 1561 printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
1426 "bpg=%lu, ipg=%lu, mo=%04lx]\n", 1562 "bpg=%lu, ipg=%lu, mo=%04lx]\n",
1427 sb->s_blocksize, 1563 sb->s_blocksize,
1428 sbi->s_groups_count, 1564 sbi->s_groups_count,
@@ -1430,9 +1566,13 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
1430 EXT4_INODES_PER_GROUP(sb), 1566 EXT4_INODES_PER_GROUP(sb),
1431 sbi->s_mount_opt); 1567 sbi->s_mount_opt);
1432 1568
1433 printk(KERN_INFO "EXT4 FS on %s, %s journal on %s\n", 1569 if (EXT4_SB(sb)->s_journal) {
1434 sb->s_id, EXT4_SB(sb)->s_journal->j_inode ? "internal" : 1570 printk(KERN_INFO "EXT4 FS on %s, %s journal on %s\n",
1435 "external", EXT4_SB(sb)->s_journal->j_devname); 1571 sb->s_id, EXT4_SB(sb)->s_journal->j_inode ? "internal" :
1572 "external", EXT4_SB(sb)->s_journal->j_devname);
1573 } else {
1574 printk(KERN_INFO "EXT4 FS on %s, no journal\n", sb->s_id);
1575 }
1436 return res; 1576 return res;
1437} 1577}
1438 1578
@@ -1444,7 +1584,6 @@ static int ext4_fill_flex_info(struct super_block *sb)
1444 ext4_group_t flex_group_count; 1584 ext4_group_t flex_group_count;
1445 ext4_group_t flex_group; 1585 ext4_group_t flex_group;
1446 int groups_per_flex = 0; 1586 int groups_per_flex = 0;
1447 __u64 block_bitmap = 0;
1448 int i; 1587 int i;
1449 1588
1450 if (!sbi->s_es->s_log_groups_per_flex) { 1589 if (!sbi->s_es->s_log_groups_per_flex) {
@@ -1463,21 +1602,18 @@ static int ext4_fill_flex_info(struct super_block *sb)
1463 sizeof(struct flex_groups), GFP_KERNEL); 1602 sizeof(struct flex_groups), GFP_KERNEL);
1464 if (sbi->s_flex_groups == NULL) { 1603 if (sbi->s_flex_groups == NULL) {
1465 printk(KERN_ERR "EXT4-fs: not enough memory for " 1604 printk(KERN_ERR "EXT4-fs: not enough memory for "
1466 "%lu flex groups\n", flex_group_count); 1605 "%u flex groups\n", flex_group_count);
1467 goto failed; 1606 goto failed;
1468 } 1607 }
1469 1608
1470 gdp = ext4_get_group_desc(sb, 1, &bh);
1471 block_bitmap = ext4_block_bitmap(sb, gdp) - 1;
1472
1473 for (i = 0; i < sbi->s_groups_count; i++) { 1609 for (i = 0; i < sbi->s_groups_count; i++) {
1474 gdp = ext4_get_group_desc(sb, i, &bh); 1610 gdp = ext4_get_group_desc(sb, i, &bh);
1475 1611
1476 flex_group = ext4_flex_group(sbi, i); 1612 flex_group = ext4_flex_group(sbi, i);
1477 sbi->s_flex_groups[flex_group].free_inodes += 1613 sbi->s_flex_groups[flex_group].free_inodes +=
1478 le16_to_cpu(gdp->bg_free_inodes_count); 1614 ext4_free_inodes_count(sb, gdp);
1479 sbi->s_flex_groups[flex_group].free_blocks += 1615 sbi->s_flex_groups[flex_group].free_blocks +=
1480 le16_to_cpu(gdp->bg_free_blocks_count); 1616 ext4_free_blks_count(sb, gdp);
1481 } 1617 }
1482 1618
1483 return 1; 1619 return 1;
@@ -1551,14 +1687,14 @@ static int ext4_check_descriptors(struct super_block *sb)
1551 block_bitmap = ext4_block_bitmap(sb, gdp); 1687 block_bitmap = ext4_block_bitmap(sb, gdp);
1552 if (block_bitmap < first_block || block_bitmap > last_block) { 1688 if (block_bitmap < first_block || block_bitmap > last_block) {
1553 printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: " 1689 printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
1554 "Block bitmap for group %lu not in group " 1690 "Block bitmap for group %u not in group "
1555 "(block %llu)!\n", i, block_bitmap); 1691 "(block %llu)!\n", i, block_bitmap);
1556 return 0; 1692 return 0;
1557 } 1693 }
1558 inode_bitmap = ext4_inode_bitmap(sb, gdp); 1694 inode_bitmap = ext4_inode_bitmap(sb, gdp);
1559 if (inode_bitmap < first_block || inode_bitmap > last_block) { 1695 if (inode_bitmap < first_block || inode_bitmap > last_block) {
1560 printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: " 1696 printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
1561 "Inode bitmap for group %lu not in group " 1697 "Inode bitmap for group %u not in group "
1562 "(block %llu)!\n", i, inode_bitmap); 1698 "(block %llu)!\n", i, inode_bitmap);
1563 return 0; 1699 return 0;
1564 } 1700 }
@@ -1566,14 +1702,14 @@ static int ext4_check_descriptors(struct super_block *sb)
1566 if (inode_table < first_block || 1702 if (inode_table < first_block ||
1567 inode_table + sbi->s_itb_per_group - 1 > last_block) { 1703 inode_table + sbi->s_itb_per_group - 1 > last_block) {
1568 printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: " 1704 printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
1569 "Inode table for group %lu not in group " 1705 "Inode table for group %u not in group "
1570 "(block %llu)!\n", i, inode_table); 1706 "(block %llu)!\n", i, inode_table);
1571 return 0; 1707 return 0;
1572 } 1708 }
1573 spin_lock(sb_bgl_lock(sbi, i)); 1709 spin_lock(sb_bgl_lock(sbi, i));
1574 if (!ext4_group_desc_csum_verify(sbi, i, gdp)) { 1710 if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
1575 printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: " 1711 printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
1576 "Checksum for group %lu failed (%u!=%u)\n", 1712 "Checksum for group %u failed (%u!=%u)\n",
1577 i, le16_to_cpu(ext4_group_desc_csum(sbi, i, 1713 i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
1578 gdp)), le16_to_cpu(gdp->bg_checksum)); 1714 gdp)), le16_to_cpu(gdp->bg_checksum));
1579 if (!(sb->s_flags & MS_RDONLY)) { 1715 if (!(sb->s_flags & MS_RDONLY)) {
@@ -1865,19 +2001,20 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1865 ext4_fsblk_t sb_block = get_sb_block(&data); 2001 ext4_fsblk_t sb_block = get_sb_block(&data);
1866 ext4_fsblk_t logical_sb_block; 2002 ext4_fsblk_t logical_sb_block;
1867 unsigned long offset = 0; 2003 unsigned long offset = 0;
1868 unsigned int journal_inum = 0;
1869 unsigned long journal_devnum = 0; 2004 unsigned long journal_devnum = 0;
1870 unsigned long def_mount_opts; 2005 unsigned long def_mount_opts;
1871 struct inode *root; 2006 struct inode *root;
1872 char *cp; 2007 char *cp;
2008 const char *descr;
1873 int ret = -EINVAL; 2009 int ret = -EINVAL;
1874 int blocksize; 2010 int blocksize;
1875 int db_count; 2011 unsigned int db_count;
1876 int i; 2012 unsigned int i;
1877 int needs_recovery, has_huge_files; 2013 int needs_recovery, has_huge_files;
1878 __le32 features; 2014 int features;
1879 __u64 blocks_count; 2015 __u64 blocks_count;
1880 int err; 2016 int err;
2017 unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
1881 2018
1882 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 2019 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
1883 if (!sbi) 2020 if (!sbi)
@@ -1958,31 +2095,22 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1958 2095
1959 sbi->s_resuid = le16_to_cpu(es->s_def_resuid); 2096 sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
1960 sbi->s_resgid = le16_to_cpu(es->s_def_resgid); 2097 sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
2098 sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
2099 sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
2100 sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
1961 2101
1962 set_opt(sbi->s_mount_opt, RESERVATION); 2102 set_opt(sbi->s_mount_opt, RESERVATION);
1963 set_opt(sbi->s_mount_opt, BARRIER); 2103 set_opt(sbi->s_mount_opt, BARRIER);
1964 2104
1965 /* 2105 /*
1966 * turn on extents feature by default in ext4 filesystem
1967 * only if feature flag already set by mkfs or tune2fs.
1968 * Use -o noextents to turn it off
1969 */
1970 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
1971 set_opt(sbi->s_mount_opt, EXTENTS);
1972 else
1973 ext4_warning(sb, __func__,
1974 "extents feature not enabled on this filesystem, "
1975 "use tune2fs.\n");
1976
1977 /*
1978 * enable delayed allocation by default 2106 * enable delayed allocation by default
1979 * Use -o nodelalloc to turn it off 2107 * Use -o nodelalloc to turn it off
1980 */ 2108 */
1981 set_opt(sbi->s_mount_opt, DELALLOC); 2109 set_opt(sbi->s_mount_opt, DELALLOC);
1982 2110
1983 2111
1984 if (!parse_options((char *) data, sb, &journal_inum, &journal_devnum, 2112 if (!parse_options((char *) data, sb, &journal_devnum,
1985 NULL, 0)) 2113 &journal_ioprio, NULL, 0))
1986 goto failed_mount; 2114 goto failed_mount;
1987 2115
1988 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | 2116 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
@@ -2004,15 +2132,17 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2004 features = EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT4_FEATURE_INCOMPAT_SUPP); 2132 features = EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT4_FEATURE_INCOMPAT_SUPP);
2005 if (features) { 2133 if (features) {
2006 printk(KERN_ERR "EXT4-fs: %s: couldn't mount because of " 2134 printk(KERN_ERR "EXT4-fs: %s: couldn't mount because of "
2007 "unsupported optional features (%x).\n", 2135 "unsupported optional features (%x).\n", sb->s_id,
2008 sb->s_id, le32_to_cpu(features)); 2136 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
2137 ~EXT4_FEATURE_INCOMPAT_SUPP));
2009 goto failed_mount; 2138 goto failed_mount;
2010 } 2139 }
2011 features = EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP); 2140 features = EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP);
2012 if (!(sb->s_flags & MS_RDONLY) && features) { 2141 if (!(sb->s_flags & MS_RDONLY) && features) {
2013 printk(KERN_ERR "EXT4-fs: %s: couldn't mount RDWR because of " 2142 printk(KERN_ERR "EXT4-fs: %s: couldn't mount RDWR because of "
2014 "unsupported optional features (%x).\n", 2143 "unsupported optional features (%x).\n", sb->s_id,
2015 sb->s_id, le32_to_cpu(features)); 2144 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
2145 ~EXT4_FEATURE_RO_COMPAT_SUPP));
2016 goto failed_mount; 2146 goto failed_mount;
2017 } 2147 }
2018 has_huge_files = EXT4_HAS_RO_COMPAT_FEATURE(sb, 2148 has_huge_files = EXT4_HAS_RO_COMPAT_FEATURE(sb,
@@ -2117,6 +2247,18 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2117 for (i = 0; i < 4; i++) 2247 for (i = 0; i < 4; i++)
2118 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); 2248 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
2119 sbi->s_def_hash_version = es->s_def_hash_version; 2249 sbi->s_def_hash_version = es->s_def_hash_version;
2250 i = le32_to_cpu(es->s_flags);
2251 if (i & EXT2_FLAGS_UNSIGNED_HASH)
2252 sbi->s_hash_unsigned = 3;
2253 else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
2254#ifdef __CHAR_UNSIGNED__
2255 es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
2256 sbi->s_hash_unsigned = 3;
2257#else
2258 es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
2259#endif
2260 sb->s_dirt = 1;
2261 }
2120 2262
2121 if (sbi->s_blocks_per_group > blocksize * 8) { 2263 if (sbi->s_blocks_per_group > blocksize * 8) {
2122 printk(KERN_ERR 2264 printk(KERN_ERR
@@ -2144,20 +2286,30 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2144 if (EXT4_BLOCKS_PER_GROUP(sb) == 0) 2286 if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
2145 goto cantfind_ext4; 2287 goto cantfind_ext4;
2146 2288
2147 /* ensure blocks_count calculation below doesn't sign-extend */ 2289 /*
2148 if (ext4_blocks_count(es) + EXT4_BLOCKS_PER_GROUP(sb) < 2290 * It makes no sense for the first data block to be beyond the end
2149 le32_to_cpu(es->s_first_data_block) + 1) { 2291 * of the filesystem.
2150 printk(KERN_WARNING "EXT4-fs: bad geometry: block count %llu, " 2292 */
2151 "first data block %u, blocks per group %lu\n", 2293 if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
2152 ext4_blocks_count(es), 2294 printk(KERN_WARNING "EXT4-fs: bad geometry: first data"
2153 le32_to_cpu(es->s_first_data_block), 2295 "block %u is beyond end of filesystem (%llu)\n",
2154 EXT4_BLOCKS_PER_GROUP(sb)); 2296 le32_to_cpu(es->s_first_data_block),
2297 ext4_blocks_count(es));
2155 goto failed_mount; 2298 goto failed_mount;
2156 } 2299 }
2157 blocks_count = (ext4_blocks_count(es) - 2300 blocks_count = (ext4_blocks_count(es) -
2158 le32_to_cpu(es->s_first_data_block) + 2301 le32_to_cpu(es->s_first_data_block) +
2159 EXT4_BLOCKS_PER_GROUP(sb) - 1); 2302 EXT4_BLOCKS_PER_GROUP(sb) - 1);
2160 do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb)); 2303 do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
2304 if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
2305 printk(KERN_WARNING "EXT4-fs: groups count too large: %u "
2306 "(block count %llu, first data block %u, "
2307 "blocks per group %lu)\n", sbi->s_groups_count,
2308 ext4_blocks_count(es),
2309 le32_to_cpu(es->s_first_data_block),
2310 EXT4_BLOCKS_PER_GROUP(sb));
2311 goto failed_mount;
2312 }
2161 sbi->s_groups_count = blocks_count; 2313 sbi->s_groups_count = blocks_count;
2162 db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / 2314 db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
2163 EXT4_DESC_PER_BLOCK(sb); 2315 EXT4_DESC_PER_BLOCK(sb);
@@ -2269,27 +2421,26 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2269 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; 2421 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
2270 es->s_state |= cpu_to_le16(EXT4_ERROR_FS); 2422 es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
2271 ext4_commit_super(sb, es, 1); 2423 ext4_commit_super(sb, es, 1);
2272 printk(KERN_CRIT
2273 "EXT4-fs (device %s): mount failed\n",
2274 sb->s_id);
2275 goto failed_mount4; 2424 goto failed_mount4;
2276 } 2425 }
2277 } 2426 }
2278 } else if (journal_inum) { 2427 } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
2279 if (ext4_create_journal(sb, es, journal_inum)) 2428 EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
2280 goto failed_mount3; 2429 printk(KERN_ERR "EXT4-fs: required journal recovery "
2430 "suppressed and not mounted read-only\n");
2431 goto failed_mount4;
2281 } else { 2432 } else {
2282 if (!silent) 2433 clear_opt(sbi->s_mount_opt, DATA_FLAGS);
2283 printk(KERN_ERR 2434 set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
2284 "ext4: No journal on filesystem on %s\n", 2435 sbi->s_journal = NULL;
2285 sb->s_id); 2436 needs_recovery = 0;
2286 goto failed_mount3; 2437 goto no_journal;
2287 } 2438 }
2288 2439
2289 if (ext4_blocks_count(es) > 0xffffffffULL && 2440 if (ext4_blocks_count(es) > 0xffffffffULL &&
2290 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, 2441 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
2291 JBD2_FEATURE_INCOMPAT_64BIT)) { 2442 JBD2_FEATURE_INCOMPAT_64BIT)) {
2292 printk(KERN_ERR "ext4: Failed to set 64-bit journal feature\n"); 2443 printk(KERN_ERR "EXT4-fs: Failed to set 64-bit journal feature\n");
2293 goto failed_mount4; 2444 goto failed_mount4;
2294 } 2445 }
2295 2446
@@ -2334,6 +2485,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2334 default: 2485 default:
2335 break; 2486 break;
2336 } 2487 }
2488 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
2489
2490no_journal:
2337 2491
2338 if (test_opt(sb, NOBH)) { 2492 if (test_opt(sb, NOBH)) {
2339 if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) { 2493 if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
@@ -2419,13 +2573,22 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2419 EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS; 2573 EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
2420 ext4_orphan_cleanup(sb, es); 2574 ext4_orphan_cleanup(sb, es);
2421 EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS; 2575 EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
2422 if (needs_recovery) 2576 if (needs_recovery) {
2423 printk(KERN_INFO "EXT4-fs: recovery complete.\n"); 2577 printk(KERN_INFO "EXT4-fs: recovery complete.\n");
2424 ext4_mark_recovery_complete(sb, es); 2578 ext4_mark_recovery_complete(sb, es);
2425 printk(KERN_INFO "EXT4-fs: mounted filesystem with %s data mode.\n", 2579 }
2426 test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ? "journal": 2580 if (EXT4_SB(sb)->s_journal) {
2427 test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA ? "ordered": 2581 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
2428 "writeback"); 2582 descr = " journalled data mode";
2583 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
2584 descr = " ordered data mode";
2585 else
2586 descr = " writeback data mode";
2587 } else
2588 descr = "out journal";
2589
2590 printk(KERN_INFO "EXT4-fs: mounted filesystem %s with%s\n",
2591 sb->s_id, descr);
2429 2592
2430 lock_kernel(); 2593 lock_kernel();
2431 return 0; 2594 return 0;
@@ -2437,8 +2600,11 @@ cantfind_ext4:
2437 goto failed_mount; 2600 goto failed_mount;
2438 2601
2439failed_mount4: 2602failed_mount4:
2440 jbd2_journal_destroy(sbi->s_journal); 2603 printk(KERN_ERR "EXT4-fs (device %s): mount failed\n", sb->s_id);
2441 sbi->s_journal = NULL; 2604 if (sbi->s_journal) {
2605 jbd2_journal_destroy(sbi->s_journal);
2606 sbi->s_journal = NULL;
2607 }
2442failed_mount3: 2608failed_mount3:
2443 percpu_counter_destroy(&sbi->s_freeblocks_counter); 2609 percpu_counter_destroy(&sbi->s_freeblocks_counter);
2444 percpu_counter_destroy(&sbi->s_freeinodes_counter); 2610 percpu_counter_destroy(&sbi->s_freeinodes_counter);
@@ -2475,11 +2641,9 @@ static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
2475{ 2641{
2476 struct ext4_sb_info *sbi = EXT4_SB(sb); 2642 struct ext4_sb_info *sbi = EXT4_SB(sb);
2477 2643
2478 if (sbi->s_commit_interval) 2644 journal->j_commit_interval = sbi->s_commit_interval;
2479 journal->j_commit_interval = sbi->s_commit_interval; 2645 journal->j_min_batch_time = sbi->s_min_batch_time;
2480 /* We could also set up an ext4-specific default for the commit 2646 journal->j_max_batch_time = sbi->s_max_batch_time;
2481 * interval here, but for now we'll just fall back to the jbd
2482 * default. */
2483 2647
2484 spin_lock(&journal->j_state_lock); 2648 spin_lock(&journal->j_state_lock);
2485 if (test_opt(sb, BARRIER)) 2649 if (test_opt(sb, BARRIER))
@@ -2499,6 +2663,8 @@ static journal_t *ext4_get_journal(struct super_block *sb,
2499 struct inode *journal_inode; 2663 struct inode *journal_inode;
2500 journal_t *journal; 2664 journal_t *journal;
2501 2665
2666 BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
2667
2502 /* First, test for the existence of a valid inode on disk. Bad 2668 /* First, test for the existence of a valid inode on disk. Bad
2503 * things happen if we iget() an unused inode, as the subsequent 2669 * things happen if we iget() an unused inode, as the subsequent
2504 * iput() will try to delete it. */ 2670 * iput() will try to delete it. */
@@ -2547,13 +2713,15 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
2547 struct ext4_super_block *es; 2713 struct ext4_super_block *es;
2548 struct block_device *bdev; 2714 struct block_device *bdev;
2549 2715
2716 BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
2717
2550 bdev = ext4_blkdev_get(j_dev); 2718 bdev = ext4_blkdev_get(j_dev);
2551 if (bdev == NULL) 2719 if (bdev == NULL)
2552 return NULL; 2720 return NULL;
2553 2721
2554 if (bd_claim(bdev, sb)) { 2722 if (bd_claim(bdev, sb)) {
2555 printk(KERN_ERR 2723 printk(KERN_ERR
2556 "EXT4: failed to claim external journal device.\n"); 2724 "EXT4-fs: failed to claim external journal device.\n");
2557 blkdev_put(bdev, FMODE_READ|FMODE_WRITE); 2725 blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
2558 return NULL; 2726 return NULL;
2559 } 2727 }
@@ -2634,6 +2802,8 @@ static int ext4_load_journal(struct super_block *sb,
2634 int err = 0; 2802 int err = 0;
2635 int really_read_only; 2803 int really_read_only;
2636 2804
2805 BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
2806
2637 if (journal_devnum && 2807 if (journal_devnum &&
2638 journal_devnum != le32_to_cpu(es->s_journal_dev)) { 2808 journal_devnum != le32_to_cpu(es->s_journal_dev)) {
2639 printk(KERN_INFO "EXT4-fs: external journal device major/minor " 2809 printk(KERN_INFO "EXT4-fs: external journal device major/minor "
@@ -2718,48 +2888,6 @@ static int ext4_load_journal(struct super_block *sb,
2718 return 0; 2888 return 0;
2719} 2889}
2720 2890
2721static int ext4_create_journal(struct super_block *sb,
2722 struct ext4_super_block *es,
2723 unsigned int journal_inum)
2724{
2725 journal_t *journal;
2726 int err;
2727
2728 if (sb->s_flags & MS_RDONLY) {
2729 printk(KERN_ERR "EXT4-fs: readonly filesystem when trying to "
2730 "create journal.\n");
2731 return -EROFS;
2732 }
2733
2734 journal = ext4_get_journal(sb, journal_inum);
2735 if (!journal)
2736 return -EINVAL;
2737
2738 printk(KERN_INFO "EXT4-fs: creating new journal on inode %u\n",
2739 journal_inum);
2740
2741 err = jbd2_journal_create(journal);
2742 if (err) {
2743 printk(KERN_ERR "EXT4-fs: error creating journal.\n");
2744 jbd2_journal_destroy(journal);
2745 return -EIO;
2746 }
2747
2748 EXT4_SB(sb)->s_journal = journal;
2749
2750 ext4_update_dynamic_rev(sb);
2751 EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
2752 EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL);
2753
2754 es->s_journal_inum = cpu_to_le32(journal_inum);
2755 sb->s_dirt = 1;
2756
2757 /* Make sure we flush the recovery flag to disk. */
2758 ext4_commit_super(sb, es, 1);
2759
2760 return 0;
2761}
2762
2763static void ext4_commit_super(struct super_block *sb, 2891static void ext4_commit_super(struct super_block *sb,
2764 struct ext4_super_block *es, int sync) 2892 struct ext4_super_block *es, int sync)
2765{ 2893{
@@ -2776,20 +2904,23 @@ static void ext4_commit_super(struct super_block *sb,
2776 * be remapped. Nothing we can do but to retry the 2904 * be remapped. Nothing we can do but to retry the
2777 * write and hope for the best. 2905 * write and hope for the best.
2778 */ 2906 */
2779 printk(KERN_ERR "ext4: previous I/O error to " 2907 printk(KERN_ERR "EXT4-fs: previous I/O error to "
2780 "superblock detected for %s.\n", sb->s_id); 2908 "superblock detected for %s.\n", sb->s_id);
2781 clear_buffer_write_io_error(sbh); 2909 clear_buffer_write_io_error(sbh);
2782 set_buffer_uptodate(sbh); 2910 set_buffer_uptodate(sbh);
2783 } 2911 }
2784 es->s_wtime = cpu_to_le32(get_seconds()); 2912 es->s_wtime = cpu_to_le32(get_seconds());
2785 ext4_free_blocks_count_set(es, ext4_count_free_blocks(sb)); 2913 ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
2786 es->s_free_inodes_count = cpu_to_le32(ext4_count_free_inodes(sb)); 2914 &EXT4_SB(sb)->s_freeblocks_counter));
2915 es->s_free_inodes_count = cpu_to_le32(percpu_counter_sum_positive(
2916 &EXT4_SB(sb)->s_freeinodes_counter));
2917
2787 BUFFER_TRACE(sbh, "marking dirty"); 2918 BUFFER_TRACE(sbh, "marking dirty");
2788 mark_buffer_dirty(sbh); 2919 mark_buffer_dirty(sbh);
2789 if (sync) { 2920 if (sync) {
2790 sync_dirty_buffer(sbh); 2921 sync_dirty_buffer(sbh);
2791 if (buffer_write_io_error(sbh)) { 2922 if (buffer_write_io_error(sbh)) {
2792 printk(KERN_ERR "ext4: I/O error while writing " 2923 printk(KERN_ERR "EXT4-fs: I/O error while writing "
2793 "superblock for %s.\n", sb->s_id); 2924 "superblock for %s.\n", sb->s_id);
2794 clear_buffer_write_io_error(sbh); 2925 clear_buffer_write_io_error(sbh);
2795 set_buffer_uptodate(sbh); 2926 set_buffer_uptodate(sbh);
@@ -2808,6 +2939,10 @@ static void ext4_mark_recovery_complete(struct super_block *sb,
2808{ 2939{
2809 journal_t *journal = EXT4_SB(sb)->s_journal; 2940 journal_t *journal = EXT4_SB(sb)->s_journal;
2810 2941
2942 if (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) {
2943 BUG_ON(journal != NULL);
2944 return;
2945 }
2811 jbd2_journal_lock_updates(journal); 2946 jbd2_journal_lock_updates(journal);
2812 if (jbd2_journal_flush(journal) < 0) 2947 if (jbd2_journal_flush(journal) < 0)
2813 goto out; 2948 goto out;
@@ -2837,6 +2972,8 @@ static void ext4_clear_journal_err(struct super_block *sb,
2837 int j_errno; 2972 int j_errno;
2838 const char *errstr; 2973 const char *errstr;
2839 2974
2975 BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
2976
2840 journal = EXT4_SB(sb)->s_journal; 2977 journal = EXT4_SB(sb)->s_journal;
2841 2978
2842 /* 2979 /*
@@ -2869,14 +3006,17 @@ static void ext4_clear_journal_err(struct super_block *sb,
2869int ext4_force_commit(struct super_block *sb) 3006int ext4_force_commit(struct super_block *sb)
2870{ 3007{
2871 journal_t *journal; 3008 journal_t *journal;
2872 int ret; 3009 int ret = 0;
2873 3010
2874 if (sb->s_flags & MS_RDONLY) 3011 if (sb->s_flags & MS_RDONLY)
2875 return 0; 3012 return 0;
2876 3013
2877 journal = EXT4_SB(sb)->s_journal; 3014 journal = EXT4_SB(sb)->s_journal;
2878 sb->s_dirt = 0; 3015 if (journal) {
2879 ret = ext4_journal_force_commit(journal); 3016 sb->s_dirt = 0;
3017 ret = ext4_journal_force_commit(journal);
3018 }
3019
2880 return ret; 3020 return ret;
2881} 3021}
2882 3022
@@ -2888,9 +3028,13 @@ int ext4_force_commit(struct super_block *sb)
2888 */ 3028 */
2889static void ext4_write_super(struct super_block *sb) 3029static void ext4_write_super(struct super_block *sb)
2890{ 3030{
2891 if (mutex_trylock(&sb->s_lock) != 0) 3031 if (EXT4_SB(sb)->s_journal) {
2892 BUG(); 3032 if (mutex_trylock(&sb->s_lock) != 0)
2893 sb->s_dirt = 0; 3033 BUG();
3034 sb->s_dirt = 0;
3035 } else {
3036 ext4_commit_super(sb, EXT4_SB(sb)->s_es, 1);
3037 }
2894} 3038}
2895 3039
2896static int ext4_sync_fs(struct super_block *sb, int wait) 3040static int ext4_sync_fs(struct super_block *sb, int wait)
@@ -2899,10 +3043,14 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
2899 3043
2900 trace_mark(ext4_sync_fs, "dev %s wait %d", sb->s_id, wait); 3044 trace_mark(ext4_sync_fs, "dev %s wait %d", sb->s_id, wait);
2901 sb->s_dirt = 0; 3045 sb->s_dirt = 0;
2902 if (wait) 3046 if (EXT4_SB(sb)->s_journal) {
2903 ret = ext4_force_commit(sb); 3047 if (wait)
2904 else 3048 ret = ext4_force_commit(sb);
2905 jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, NULL); 3049 else
3050 jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, NULL);
3051 } else {
3052 ext4_commit_super(sb, EXT4_SB(sb)->s_es, wait);
3053 }
2906 return ret; 3054 return ret;
2907} 3055}
2908 3056
@@ -2917,15 +3065,17 @@ static void ext4_write_super_lockfs(struct super_block *sb)
2917 if (!(sb->s_flags & MS_RDONLY)) { 3065 if (!(sb->s_flags & MS_RDONLY)) {
2918 journal_t *journal = EXT4_SB(sb)->s_journal; 3066 journal_t *journal = EXT4_SB(sb)->s_journal;
2919 3067
2920 /* Now we set up the journal barrier. */ 3068 if (journal) {
2921 jbd2_journal_lock_updates(journal); 3069 /* Now we set up the journal barrier. */
3070 jbd2_journal_lock_updates(journal);
2922 3071
2923 /* 3072 /*
2924 * We don't want to clear needs_recovery flag when we failed 3073 * We don't want to clear needs_recovery flag when we
2925 * to flush the journal. 3074 * failed to flush the journal.
2926 */ 3075 */
2927 if (jbd2_journal_flush(journal) < 0) 3076 if (jbd2_journal_flush(journal) < 0)
2928 return; 3077 return;
3078 }
2929 3079
2930 /* Journal blocked and flushed, clear needs_recovery flag. */ 3080 /* Journal blocked and flushed, clear needs_recovery flag. */
2931 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); 3081 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
@@ -2939,7 +3089,7 @@ static void ext4_write_super_lockfs(struct super_block *sb)
2939 */ 3089 */
2940static void ext4_unlockfs(struct super_block *sb) 3090static void ext4_unlockfs(struct super_block *sb)
2941{ 3091{
2942 if (!(sb->s_flags & MS_RDONLY)) { 3092 if (EXT4_SB(sb)->s_journal && !(sb->s_flags & MS_RDONLY)) {
2943 lock_super(sb); 3093 lock_super(sb);
2944 /* Reser the needs_recovery flag before the fs is unlocked. */ 3094 /* Reser the needs_recovery flag before the fs is unlocked. */
2945 EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); 3095 EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
@@ -2957,6 +3107,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
2957 unsigned long old_sb_flags; 3107 unsigned long old_sb_flags;
2958 struct ext4_mount_options old_opts; 3108 struct ext4_mount_options old_opts;
2959 ext4_group_t g; 3109 ext4_group_t g;
3110 unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
2960 int err; 3111 int err;
2961#ifdef CONFIG_QUOTA 3112#ifdef CONFIG_QUOTA
2962 int i; 3113 int i;
@@ -2968,16 +3119,21 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
2968 old_opts.s_resuid = sbi->s_resuid; 3119 old_opts.s_resuid = sbi->s_resuid;
2969 old_opts.s_resgid = sbi->s_resgid; 3120 old_opts.s_resgid = sbi->s_resgid;
2970 old_opts.s_commit_interval = sbi->s_commit_interval; 3121 old_opts.s_commit_interval = sbi->s_commit_interval;
3122 old_opts.s_min_batch_time = sbi->s_min_batch_time;
3123 old_opts.s_max_batch_time = sbi->s_max_batch_time;
2971#ifdef CONFIG_QUOTA 3124#ifdef CONFIG_QUOTA
2972 old_opts.s_jquota_fmt = sbi->s_jquota_fmt; 3125 old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
2973 for (i = 0; i < MAXQUOTAS; i++) 3126 for (i = 0; i < MAXQUOTAS; i++)
2974 old_opts.s_qf_names[i] = sbi->s_qf_names[i]; 3127 old_opts.s_qf_names[i] = sbi->s_qf_names[i];
2975#endif 3128#endif
3129 if (sbi->s_journal && sbi->s_journal->j_task->io_context)
3130 journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
2976 3131
2977 /* 3132 /*
2978 * Allow the "check" option to be passed as a remount option. 3133 * Allow the "check" option to be passed as a remount option.
2979 */ 3134 */
2980 if (!parse_options(data, sb, NULL, NULL, &n_blocks_count, 1)) { 3135 if (!parse_options(data, sb, NULL, &journal_ioprio,
3136 &n_blocks_count, 1)) {
2981 err = -EINVAL; 3137 err = -EINVAL;
2982 goto restore_opts; 3138 goto restore_opts;
2983 } 3139 }
@@ -2990,7 +3146,10 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
2990 3146
2991 es = sbi->s_es; 3147 es = sbi->s_es;
2992 3148
2993 ext4_init_journal_params(sb, sbi->s_journal); 3149 if (sbi->s_journal) {
3150 ext4_init_journal_params(sb, sbi->s_journal);
3151 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
3152 }
2994 3153
2995 if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) || 3154 if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) ||
2996 n_blocks_count > ext4_blocks_count(es)) { 3155 n_blocks_count > ext4_blocks_count(es)) {
@@ -3019,17 +3178,20 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
3019 * We have to unlock super so that we can wait for 3178 * We have to unlock super so that we can wait for
3020 * transactions. 3179 * transactions.
3021 */ 3180 */
3022 unlock_super(sb); 3181 if (sbi->s_journal) {
3023 ext4_mark_recovery_complete(sb, es); 3182 unlock_super(sb);
3024 lock_super(sb); 3183 ext4_mark_recovery_complete(sb, es);
3184 lock_super(sb);
3185 }
3025 } else { 3186 } else {
3026 __le32 ret; 3187 int ret;
3027 if ((ret = EXT4_HAS_RO_COMPAT_FEATURE(sb, 3188 if ((ret = EXT4_HAS_RO_COMPAT_FEATURE(sb,
3028 ~EXT4_FEATURE_RO_COMPAT_SUPP))) { 3189 ~EXT4_FEATURE_RO_COMPAT_SUPP))) {
3029 printk(KERN_WARNING "EXT4-fs: %s: couldn't " 3190 printk(KERN_WARNING "EXT4-fs: %s: couldn't "
3030 "remount RDWR because of unsupported " 3191 "remount RDWR because of unsupported "
3031 "optional features (%x).\n", 3192 "optional features (%x).\n", sb->s_id,
3032 sb->s_id, le32_to_cpu(ret)); 3193 (le32_to_cpu(sbi->s_es->s_feature_ro_compat) &
3194 ~EXT4_FEATURE_RO_COMPAT_SUPP));
3033 err = -EROFS; 3195 err = -EROFS;
3034 goto restore_opts; 3196 goto restore_opts;
3035 } 3197 }
@@ -3046,7 +3208,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
3046 if (!ext4_group_desc_csum_verify(sbi, g, gdp)) { 3208 if (!ext4_group_desc_csum_verify(sbi, g, gdp)) {
3047 printk(KERN_ERR 3209 printk(KERN_ERR
3048 "EXT4-fs: ext4_remount: " 3210 "EXT4-fs: ext4_remount: "
3049 "Checksum for group %lu failed (%u!=%u)\n", 3211 "Checksum for group %u failed (%u!=%u)\n",
3050 g, le16_to_cpu(ext4_group_desc_csum(sbi, g, gdp)), 3212 g, le16_to_cpu(ext4_group_desc_csum(sbi, g, gdp)),
3051 le16_to_cpu(gdp->bg_checksum)); 3213 le16_to_cpu(gdp->bg_checksum));
3052 err = -EINVAL; 3214 err = -EINVAL;
@@ -3075,7 +3237,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
3075 * been changed by e2fsck since we originally mounted 3237 * been changed by e2fsck since we originally mounted
3076 * the partition.) 3238 * the partition.)
3077 */ 3239 */
3078 ext4_clear_journal_err(sb, es); 3240 if (sbi->s_journal)
3241 ext4_clear_journal_err(sb, es);
3079 sbi->s_mount_state = le16_to_cpu(es->s_state); 3242 sbi->s_mount_state = le16_to_cpu(es->s_state);
3080 if ((err = ext4_group_extend(sb, es, n_blocks_count))) 3243 if ((err = ext4_group_extend(sb, es, n_blocks_count)))
3081 goto restore_opts; 3244 goto restore_opts;
@@ -3083,6 +3246,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
3083 sb->s_flags &= ~MS_RDONLY; 3246 sb->s_flags &= ~MS_RDONLY;
3084 } 3247 }
3085 } 3248 }
3249 if (sbi->s_journal == NULL)
3250 ext4_commit_super(sb, es, 1);
3251
3086#ifdef CONFIG_QUOTA 3252#ifdef CONFIG_QUOTA
3087 /* Release old quota file names */ 3253 /* Release old quota file names */
3088 for (i = 0; i < MAXQUOTAS; i++) 3254 for (i = 0; i < MAXQUOTAS; i++)
@@ -3097,6 +3263,8 @@ restore_opts:
3097 sbi->s_resuid = old_opts.s_resuid; 3263 sbi->s_resuid = old_opts.s_resuid;
3098 sbi->s_resgid = old_opts.s_resgid; 3264 sbi->s_resgid = old_opts.s_resgid;
3099 sbi->s_commit_interval = old_opts.s_commit_interval; 3265 sbi->s_commit_interval = old_opts.s_commit_interval;
3266 sbi->s_min_batch_time = old_opts.s_min_batch_time;
3267 sbi->s_max_batch_time = old_opts.s_max_batch_time;
3100#ifdef CONFIG_QUOTA 3268#ifdef CONFIG_QUOTA
3101 sbi->s_jquota_fmt = old_opts.s_jquota_fmt; 3269 sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
3102 for (i = 0; i < MAXQUOTAS; i++) { 3270 for (i = 0; i < MAXQUOTAS; i++) {
@@ -3359,7 +3527,8 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
3359 * When we journal data on quota file, we have to flush journal to see 3527 * When we journal data on quota file, we have to flush journal to see
3360 * all updates to the file when we bypass pagecache... 3528 * all updates to the file when we bypass pagecache...
3361 */ 3529 */
3362 if (ext4_should_journal_data(path.dentry->d_inode)) { 3530 if (EXT4_SB(sb)->s_journal &&
3531 ext4_should_journal_data(path.dentry->d_inode)) {
3363 /* 3532 /*
3364 * We don't need to lock updates but journal_flush() could 3533 * We don't need to lock updates but journal_flush() could
3365 * otherwise be livelocked... 3534 * otherwise be livelocked...
@@ -3433,7 +3602,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
3433 struct buffer_head *bh; 3602 struct buffer_head *bh;
3434 handle_t *handle = journal_current_handle(); 3603 handle_t *handle = journal_current_handle();
3435 3604
3436 if (!handle) { 3605 if (EXT4_SB(sb)->s_journal && !handle) {
3437 printk(KERN_WARNING "EXT4-fs: Quota write (off=%llu, len=%llu)" 3606 printk(KERN_WARNING "EXT4-fs: Quota write (off=%llu, len=%llu)"
3438 " cancelled because transaction is not started.\n", 3607 " cancelled because transaction is not started.\n",
3439 (unsigned long long)off, (unsigned long long)len); 3608 (unsigned long long)off, (unsigned long long)len);
@@ -3458,7 +3627,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
3458 flush_dcache_page(bh->b_page); 3627 flush_dcache_page(bh->b_page);
3459 unlock_buffer(bh); 3628 unlock_buffer(bh);
3460 if (journal_quota) 3629 if (journal_quota)
3461 err = ext4_journal_dirty_metadata(handle, bh); 3630 err = ext4_handle_dirty_metadata(handle, NULL, bh);
3462 else { 3631 else {
3463 /* Always do at least ordered writes for quotas */ 3632 /* Always do at least ordered writes for quotas */
3464 err = ext4_jbd2_file_inode(handle, inode); 3633 err = ext4_jbd2_file_inode(handle, inode);
@@ -3512,18 +3681,15 @@ static int ext4_ui_proc_open(struct inode *inode, struct file *file)
3512static ssize_t ext4_ui_proc_write(struct file *file, const char __user *buf, 3681static ssize_t ext4_ui_proc_write(struct file *file, const char __user *buf,
3513 size_t cnt, loff_t *ppos) 3682 size_t cnt, loff_t *ppos)
3514{ 3683{
3515 unsigned int *p = PDE(file->f_path.dentry->d_inode)->data; 3684 unsigned long *p = PDE(file->f_path.dentry->d_inode)->data;
3516 char str[32]; 3685 char str[32];
3517 unsigned long value;
3518 3686
3519 if (cnt >= sizeof(str)) 3687 if (cnt >= sizeof(str))
3520 return -EINVAL; 3688 return -EINVAL;
3521 if (copy_from_user(str, buf, cnt)) 3689 if (copy_from_user(str, buf, cnt))
3522 return -EFAULT; 3690 return -EFAULT;
3523 value = simple_strtol(str, NULL, 0); 3691
3524 if (value < 0) 3692 *p = simple_strtoul(str, NULL, 0);
3525 return -ERANGE;
3526 *p = value;
3527 return cnt; 3693 return cnt;
3528} 3694}
3529 3695
@@ -3614,7 +3780,7 @@ static void __exit exit_ext4_fs(void)
3614} 3780}
3615 3781
3616MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); 3782MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
3617MODULE_DESCRIPTION("Fourth Extended Filesystem with extents"); 3783MODULE_DESCRIPTION("Fourth Extended Filesystem");
3618MODULE_LICENSE("GPL"); 3784MODULE_LICENSE("GPL");
3619module_init(init_ext4_fs) 3785module_init(init_ext4_fs)
3620module_exit(exit_ext4_fs) 3786module_exit(exit_ext4_fs)
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 80626d516fee..157ce6589c54 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -457,7 +457,7 @@ static void ext4_xattr_update_super_block(handle_t *handle,
457 if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) { 457 if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
458 EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR); 458 EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR);
459 sb->s_dirt = 1; 459 sb->s_dirt = 1;
460 ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh); 460 ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
461 } 461 }
462} 462}
463 463
@@ -487,9 +487,9 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
487 ext4_forget(handle, 1, inode, bh, bh->b_blocknr); 487 ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
488 } else { 488 } else {
489 le32_add_cpu(&BHDR(bh)->h_refcount, -1); 489 le32_add_cpu(&BHDR(bh)->h_refcount, -1);
490 error = ext4_journal_dirty_metadata(handle, bh); 490 error = ext4_handle_dirty_metadata(handle, inode, bh);
491 if (IS_SYNC(inode)) 491 if (IS_SYNC(inode))
492 handle->h_sync = 1; 492 ext4_handle_sync(handle);
493 DQUOT_FREE_BLOCK(inode, 1); 493 DQUOT_FREE_BLOCK(inode, 1);
494 ea_bdebug(bh, "refcount now=%d; releasing", 494 ea_bdebug(bh, "refcount now=%d; releasing",
495 le32_to_cpu(BHDR(bh)->h_refcount)); 495 le32_to_cpu(BHDR(bh)->h_refcount));
@@ -724,8 +724,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
724 if (error == -EIO) 724 if (error == -EIO)
725 goto bad_block; 725 goto bad_block;
726 if (!error) 726 if (!error)
727 error = ext4_journal_dirty_metadata(handle, 727 error = ext4_handle_dirty_metadata(handle,
728 bs->bh); 728 inode,
729 bs->bh);
729 if (error) 730 if (error)
730 goto cleanup; 731 goto cleanup;
731 goto inserted; 732 goto inserted;
@@ -794,8 +795,9 @@ inserted:
794 ea_bdebug(new_bh, "reusing; refcount now=%d", 795 ea_bdebug(new_bh, "reusing; refcount now=%d",
795 le32_to_cpu(BHDR(new_bh)->h_refcount)); 796 le32_to_cpu(BHDR(new_bh)->h_refcount));
796 unlock_buffer(new_bh); 797 unlock_buffer(new_bh);
797 error = ext4_journal_dirty_metadata(handle, 798 error = ext4_handle_dirty_metadata(handle,
798 new_bh); 799 inode,
800 new_bh);
799 if (error) 801 if (error)
800 goto cleanup_dquot; 802 goto cleanup_dquot;
801 } 803 }
@@ -810,8 +812,8 @@ inserted:
810 /* We need to allocate a new block */ 812 /* We need to allocate a new block */
811 ext4_fsblk_t goal = ext4_group_first_block_no(sb, 813 ext4_fsblk_t goal = ext4_group_first_block_no(sb,
812 EXT4_I(inode)->i_block_group); 814 EXT4_I(inode)->i_block_group);
813 ext4_fsblk_t block = ext4_new_meta_block(handle, inode, 815 ext4_fsblk_t block = ext4_new_meta_blocks(handle, inode,
814 goal, &error); 816 goal, NULL, &error);
815 if (error) 817 if (error)
816 goto cleanup; 818 goto cleanup;
817 ea_idebug(inode, "creating block %d", block); 819 ea_idebug(inode, "creating block %d", block);
@@ -833,7 +835,8 @@ getblk_failed:
833 set_buffer_uptodate(new_bh); 835 set_buffer_uptodate(new_bh);
834 unlock_buffer(new_bh); 836 unlock_buffer(new_bh);
835 ext4_xattr_cache_insert(new_bh); 837 ext4_xattr_cache_insert(new_bh);
836 error = ext4_journal_dirty_metadata(handle, new_bh); 838 error = ext4_handle_dirty_metadata(handle,
839 inode, new_bh);
837 if (error) 840 if (error)
838 goto cleanup; 841 goto cleanup;
839 } 842 }
@@ -1040,7 +1043,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
1040 */ 1043 */
1041 is.iloc.bh = NULL; 1044 is.iloc.bh = NULL;
1042 if (IS_SYNC(inode)) 1045 if (IS_SYNC(inode))
1043 handle->h_sync = 1; 1046 ext4_handle_sync(handle);
1044 } 1047 }
1045 1048
1046cleanup: 1049cleanup:
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 3569e0ad86a2..1a39ac370942 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -27,7 +27,7 @@
27#include <linux/security.h> 27#include <linux/security.h>
28#include <linux/pid_namespace.h> 28#include <linux/pid_namespace.h>
29 29
30static int set_task_ioprio(struct task_struct *task, int ioprio) 30int set_task_ioprio(struct task_struct *task, int ioprio)
31{ 31{
32 int err; 32 int err;
33 struct io_context *ioc; 33 struct io_context *ioc;
@@ -70,6 +70,7 @@ static int set_task_ioprio(struct task_struct *task, int ioprio)
70 task_unlock(task); 70 task_unlock(task);
71 return err; 71 return err;
72} 72}
73EXPORT_SYMBOL_GPL(set_task_ioprio);
73 74
74asmlinkage long sys_ioprio_set(int which, int who, int ioprio) 75asmlinkage long sys_ioprio_set(int which, int who, int ioprio)
75{ 76{
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 25719d902c51..3fbffb1ea714 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -306,6 +306,8 @@ void journal_commit_transaction(journal_t *journal)
306 int flags; 306 int flags;
307 int err; 307 int err;
308 unsigned long blocknr; 308 unsigned long blocknr;
309 ktime_t start_time;
310 u64 commit_time;
309 char *tagp = NULL; 311 char *tagp = NULL;
310 journal_header_t *header; 312 journal_header_t *header;
311 journal_block_tag_t *tag = NULL; 313 journal_block_tag_t *tag = NULL;
@@ -418,6 +420,7 @@ void journal_commit_transaction(journal_t *journal)
418 commit_transaction->t_state = T_FLUSH; 420 commit_transaction->t_state = T_FLUSH;
419 journal->j_committing_transaction = commit_transaction; 421 journal->j_committing_transaction = commit_transaction;
420 journal->j_running_transaction = NULL; 422 journal->j_running_transaction = NULL;
423 start_time = ktime_get();
421 commit_transaction->t_log_start = journal->j_head; 424 commit_transaction->t_log_start = journal->j_head;
422 wake_up(&journal->j_wait_transaction_locked); 425 wake_up(&journal->j_wait_transaction_locked);
423 spin_unlock(&journal->j_state_lock); 426 spin_unlock(&journal->j_state_lock);
@@ -913,6 +916,18 @@ restart_loop:
913 J_ASSERT(commit_transaction == journal->j_committing_transaction); 916 J_ASSERT(commit_transaction == journal->j_committing_transaction);
914 journal->j_commit_sequence = commit_transaction->t_tid; 917 journal->j_commit_sequence = commit_transaction->t_tid;
915 journal->j_committing_transaction = NULL; 918 journal->j_committing_transaction = NULL;
919 commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
920
921 /*
922 * weight the commit time higher than the average time so we don't
923 * react too strongly to vast changes in commit time
924 */
925 if (likely(journal->j_average_commit_time))
926 journal->j_average_commit_time = (commit_time*3 +
927 journal->j_average_commit_time) / 4;
928 else
929 journal->j_average_commit_time = commit_time;
930
916 spin_unlock(&journal->j_state_lock); 931 spin_unlock(&journal->j_state_lock);
917 932
918 if (commit_transaction->t_checkpoint_list == NULL && 933 if (commit_transaction->t_checkpoint_list == NULL &&
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 60d4c32c8808..e6a117431277 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -25,6 +25,7 @@
25#include <linux/timer.h> 25#include <linux/timer.h>
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/highmem.h> 27#include <linux/highmem.h>
28#include <linux/hrtimer.h>
28 29
29static void __journal_temp_unlink_buffer(struct journal_head *jh); 30static void __journal_temp_unlink_buffer(struct journal_head *jh);
30 31
@@ -49,6 +50,7 @@ get_transaction(journal_t *journal, transaction_t *transaction)
49{ 50{
50 transaction->t_journal = journal; 51 transaction->t_journal = journal;
51 transaction->t_state = T_RUNNING; 52 transaction->t_state = T_RUNNING;
53 transaction->t_start_time = ktime_get();
52 transaction->t_tid = journal->j_transaction_sequence++; 54 transaction->t_tid = journal->j_transaction_sequence++;
53 transaction->t_expires = jiffies + journal->j_commit_interval; 55 transaction->t_expires = jiffies + journal->j_commit_interval;
54 spin_lock_init(&transaction->t_handle_lock); 56 spin_lock_init(&transaction->t_handle_lock);
@@ -752,7 +754,6 @@ out:
752 * int journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update. 754 * int journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
753 * @handle: transaction to add buffer modifications to 755 * @handle: transaction to add buffer modifications to
754 * @bh: bh to be used for metadata writes 756 * @bh: bh to be used for metadata writes
755 * @credits: variable that will receive credits for the buffer
756 * 757 *
757 * Returns an error code or 0 on success. 758 * Returns an error code or 0 on success.
758 * 759 *
@@ -1370,7 +1371,7 @@ int journal_stop(handle_t *handle)
1370{ 1371{
1371 transaction_t *transaction = handle->h_transaction; 1372 transaction_t *transaction = handle->h_transaction;
1372 journal_t *journal = transaction->t_journal; 1373 journal_t *journal = transaction->t_journal;
1373 int old_handle_count, err; 1374 int err;
1374 pid_t pid; 1375 pid_t pid;
1375 1376
1376 J_ASSERT(journal_current_handle() == handle); 1377 J_ASSERT(journal_current_handle() == handle);
@@ -1399,6 +1400,17 @@ int journal_stop(handle_t *handle)
1399 * on IO anyway. Speeds up many-threaded, many-dir operations 1400 * on IO anyway. Speeds up many-threaded, many-dir operations
1400 * by 30x or more... 1401 * by 30x or more...
1401 * 1402 *
1403 * We try and optimize the sleep time against what the underlying disk
1404 * can do, instead of having a static sleep time. This is usefull for
1405 * the case where our storage is so fast that it is more optimal to go
1406 * ahead and force a flush and wait for the transaction to be committed
1407 * than it is to wait for an arbitrary amount of time for new writers to
1408 * join the transaction. We acheive this by measuring how long it takes
1409 * to commit a transaction, and compare it with how long this
1410 * transaction has been running, and if run time < commit time then we
1411 * sleep for the delta and commit. This greatly helps super fast disks
1412 * that would see slowdowns as more threads started doing fsyncs.
1413 *
1402 * But don't do this if this process was the most recent one to 1414 * But don't do this if this process was the most recent one to
1403 * perform a synchronous write. We do this to detect the case where a 1415 * perform a synchronous write. We do this to detect the case where a
1404 * single process is doing a stream of sync writes. No point in waiting 1416 * single process is doing a stream of sync writes. No point in waiting
@@ -1406,11 +1418,26 @@ int journal_stop(handle_t *handle)
1406 */ 1418 */
1407 pid = current->pid; 1419 pid = current->pid;
1408 if (handle->h_sync && journal->j_last_sync_writer != pid) { 1420 if (handle->h_sync && journal->j_last_sync_writer != pid) {
1421 u64 commit_time, trans_time;
1422
1409 journal->j_last_sync_writer = pid; 1423 journal->j_last_sync_writer = pid;
1410 do { 1424
1411 old_handle_count = transaction->t_handle_count; 1425 spin_lock(&journal->j_state_lock);
1412 schedule_timeout_uninterruptible(1); 1426 commit_time = journal->j_average_commit_time;
1413 } while (old_handle_count != transaction->t_handle_count); 1427 spin_unlock(&journal->j_state_lock);
1428
1429 trans_time = ktime_to_ns(ktime_sub(ktime_get(),
1430 transaction->t_start_time));
1431
1432 commit_time = min_t(u64, commit_time,
1433 1000*jiffies_to_usecs(1));
1434
1435 if (trans_time < commit_time) {
1436 ktime_t expires = ktime_add_ns(ktime_get(),
1437 commit_time);
1438 set_current_state(TASK_UNINTERRUPTIBLE);
1439 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1440 }
1414 } 1441 }
1415 1442
1416 current->journal_info = NULL; 1443 current->journal_info = NULL;
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 9497718fe920..17159cacbd9e 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -249,16 +249,14 @@ restart:
249 return ret; 249 return ret;
250} 250}
251 251
252#define NR_BATCH 64
253
254static void 252static void
255__flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count) 253__flush_batch(journal_t *journal, int *batch_count)
256{ 254{
257 int i; 255 int i;
258 256
259 ll_rw_block(SWRITE, *batch_count, bhs); 257 ll_rw_block(SWRITE, *batch_count, journal->j_chkpt_bhs);
260 for (i = 0; i < *batch_count; i++) { 258 for (i = 0; i < *batch_count; i++) {
261 struct buffer_head *bh = bhs[i]; 259 struct buffer_head *bh = journal->j_chkpt_bhs[i];
262 clear_buffer_jwrite(bh); 260 clear_buffer_jwrite(bh);
263 BUFFER_TRACE(bh, "brelse"); 261 BUFFER_TRACE(bh, "brelse");
264 __brelse(bh); 262 __brelse(bh);
@@ -277,8 +275,7 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
277 * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it 275 * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
278 */ 276 */
279static int __process_buffer(journal_t *journal, struct journal_head *jh, 277static int __process_buffer(journal_t *journal, struct journal_head *jh,
280 struct buffer_head **bhs, int *batch_count, 278 int *batch_count, transaction_t *transaction)
281 transaction_t *transaction)
282{ 279{
283 struct buffer_head *bh = jh2bh(jh); 280 struct buffer_head *bh = jh2bh(jh);
284 int ret = 0; 281 int ret = 0;
@@ -325,14 +322,14 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
325 get_bh(bh); 322 get_bh(bh);
326 J_ASSERT_BH(bh, !buffer_jwrite(bh)); 323 J_ASSERT_BH(bh, !buffer_jwrite(bh));
327 set_buffer_jwrite(bh); 324 set_buffer_jwrite(bh);
328 bhs[*batch_count] = bh; 325 journal->j_chkpt_bhs[*batch_count] = bh;
329 __buffer_relink_io(jh); 326 __buffer_relink_io(jh);
330 jbd_unlock_bh_state(bh); 327 jbd_unlock_bh_state(bh);
331 transaction->t_chp_stats.cs_written++; 328 transaction->t_chp_stats.cs_written++;
332 (*batch_count)++; 329 (*batch_count)++;
333 if (*batch_count == NR_BATCH) { 330 if (*batch_count == JBD2_NR_BATCH) {
334 spin_unlock(&journal->j_list_lock); 331 spin_unlock(&journal->j_list_lock);
335 __flush_batch(journal, bhs, batch_count); 332 __flush_batch(journal, batch_count);
336 ret = 1; 333 ret = 1;
337 } 334 }
338 } 335 }
@@ -388,7 +385,6 @@ restart:
388 if (journal->j_checkpoint_transactions == transaction && 385 if (journal->j_checkpoint_transactions == transaction &&
389 transaction->t_tid == this_tid) { 386 transaction->t_tid == this_tid) {
390 int batch_count = 0; 387 int batch_count = 0;
391 struct buffer_head *bhs[NR_BATCH];
392 struct journal_head *jh; 388 struct journal_head *jh;
393 int retry = 0, err; 389 int retry = 0, err;
394 390
@@ -402,7 +398,7 @@ restart:
402 retry = 1; 398 retry = 1;
403 break; 399 break;
404 } 400 }
405 retry = __process_buffer(journal, jh, bhs, &batch_count, 401 retry = __process_buffer(journal, jh, &batch_count,
406 transaction); 402 transaction);
407 if (retry < 0 && !result) 403 if (retry < 0 && !result)
408 result = retry; 404 result = retry;
@@ -419,7 +415,7 @@ restart:
419 spin_unlock(&journal->j_list_lock); 415 spin_unlock(&journal->j_list_lock);
420 retry = 1; 416 retry = 1;
421 } 417 }
422 __flush_batch(journal, bhs, &batch_count); 418 __flush_batch(journal, &batch_count);
423 } 419 }
424 420
425 if (retry) { 421 if (retry) {
@@ -686,6 +682,7 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
686 safely remove this transaction from the log */ 682 safely remove this transaction from the log */
687 683
688 __jbd2_journal_drop_transaction(journal, transaction); 684 __jbd2_journal_drop_transaction(journal, transaction);
685 kfree(transaction);
689 686
690 /* Just in case anybody was waiting for more transactions to be 687 /* Just in case anybody was waiting for more transactions to be
691 checkpointed... */ 688 checkpointed... */
@@ -760,5 +757,4 @@ void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transact
760 J_ASSERT(journal->j_running_transaction != transaction); 757 J_ASSERT(journal->j_running_transaction != transaction);
761 758
762 jbd_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid); 759 jbd_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid);
763 kfree(transaction);
764} 760}
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index c8a1bace685a..62804e57a44c 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -25,6 +25,7 @@
25#include <linux/crc32.h> 25#include <linux/crc32.h>
26#include <linux/writeback.h> 26#include <linux/writeback.h>
27#include <linux/backing-dev.h> 27#include <linux/backing-dev.h>
28#include <linux/bio.h>
28 29
29/* 30/*
30 * Default IO end handler for temporary BJ_IO buffer_heads. 31 * Default IO end handler for temporary BJ_IO buffer_heads.
@@ -137,7 +138,7 @@ static int journal_submit_commit_record(journal_t *journal,
137 set_buffer_ordered(bh); 138 set_buffer_ordered(bh);
138 barrier_done = 1; 139 barrier_done = 1;
139 } 140 }
140 ret = submit_bh(WRITE, bh); 141 ret = submit_bh(WRITE_SYNC, bh);
141 if (barrier_done) 142 if (barrier_done)
142 clear_buffer_ordered(bh); 143 clear_buffer_ordered(bh);
143 144
@@ -158,7 +159,7 @@ static int journal_submit_commit_record(journal_t *journal,
158 lock_buffer(bh); 159 lock_buffer(bh);
159 set_buffer_uptodate(bh); 160 set_buffer_uptodate(bh);
160 clear_buffer_dirty(bh); 161 clear_buffer_dirty(bh);
161 ret = submit_bh(WRITE, bh); 162 ret = submit_bh(WRITE_SYNC, bh);
162 } 163 }
163 *cbh = bh; 164 *cbh = bh;
164 return ret; 165 return ret;
@@ -168,12 +169,34 @@ static int journal_submit_commit_record(journal_t *journal,
168 * This function along with journal_submit_commit_record 169 * This function along with journal_submit_commit_record
169 * allows to write the commit record asynchronously. 170 * allows to write the commit record asynchronously.
170 */ 171 */
171static int journal_wait_on_commit_record(struct buffer_head *bh) 172static int journal_wait_on_commit_record(journal_t *journal,
173 struct buffer_head *bh)
172{ 174{
173 int ret = 0; 175 int ret = 0;
174 176
177retry:
175 clear_buffer_dirty(bh); 178 clear_buffer_dirty(bh);
176 wait_on_buffer(bh); 179 wait_on_buffer(bh);
180 if (buffer_eopnotsupp(bh) && (journal->j_flags & JBD2_BARRIER)) {
181 printk(KERN_WARNING
182 "JBD2: wait_on_commit_record: sync failed on %s - "
183 "disabling barriers\n", journal->j_devname);
184 spin_lock(&journal->j_state_lock);
185 journal->j_flags &= ~JBD2_BARRIER;
186 spin_unlock(&journal->j_state_lock);
187
188 lock_buffer(bh);
189 clear_buffer_dirty(bh);
190 set_buffer_uptodate(bh);
191 bh->b_end_io = journal_end_buffer_io_sync;
192
193 ret = submit_bh(WRITE_SYNC, bh);
194 if (ret) {
195 unlock_buffer(bh);
196 return ret;
197 }
198 goto retry;
199 }
177 200
178 if (unlikely(!buffer_uptodate(bh))) 201 if (unlikely(!buffer_uptodate(bh)))
179 ret = -EIO; 202 ret = -EIO;
@@ -332,13 +355,15 @@ void jbd2_journal_commit_transaction(journal_t *journal)
332 int flags; 355 int flags;
333 int err; 356 int err;
334 unsigned long long blocknr; 357 unsigned long long blocknr;
358 ktime_t start_time;
359 u64 commit_time;
335 char *tagp = NULL; 360 char *tagp = NULL;
336 journal_header_t *header; 361 journal_header_t *header;
337 journal_block_tag_t *tag = NULL; 362 journal_block_tag_t *tag = NULL;
338 int space_left = 0; 363 int space_left = 0;
339 int first_tag = 0; 364 int first_tag = 0;
340 int tag_flag; 365 int tag_flag;
341 int i; 366 int i, to_free = 0;
342 int tag_bytes = journal_tag_bytes(journal); 367 int tag_bytes = journal_tag_bytes(journal);
343 struct buffer_head *cbh = NULL; /* For transactional checksums */ 368 struct buffer_head *cbh = NULL; /* For transactional checksums */
344 __u32 crc32_sum = ~0; 369 __u32 crc32_sum = ~0;
@@ -458,6 +483,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
458 commit_transaction->t_state = T_FLUSH; 483 commit_transaction->t_state = T_FLUSH;
459 journal->j_committing_transaction = commit_transaction; 484 journal->j_committing_transaction = commit_transaction;
460 journal->j_running_transaction = NULL; 485 journal->j_running_transaction = NULL;
486 start_time = ktime_get();
461 commit_transaction->t_log_start = journal->j_head; 487 commit_transaction->t_log_start = journal->j_head;
462 wake_up(&journal->j_wait_transaction_locked); 488 wake_up(&journal->j_wait_transaction_locked);
463 spin_unlock(&journal->j_state_lock); 489 spin_unlock(&journal->j_state_lock);
@@ -803,7 +829,7 @@ wait_for_iobuf:
803 __jbd2_journal_abort_hard(journal); 829 __jbd2_journal_abort_hard(journal);
804 } 830 }
805 if (!err && !is_journal_aborted(journal)) 831 if (!err && !is_journal_aborted(journal))
806 err = journal_wait_on_commit_record(cbh); 832 err = journal_wait_on_commit_record(journal, cbh);
807 833
808 if (err) 834 if (err)
809 jbd2_journal_abort(journal, err); 835 jbd2_journal_abort(journal, err);
@@ -981,14 +1007,23 @@ restart_loop:
981 J_ASSERT(commit_transaction == journal->j_committing_transaction); 1007 J_ASSERT(commit_transaction == journal->j_committing_transaction);
982 journal->j_commit_sequence = commit_transaction->t_tid; 1008 journal->j_commit_sequence = commit_transaction->t_tid;
983 journal->j_committing_transaction = NULL; 1009 journal->j_committing_transaction = NULL;
984 spin_unlock(&journal->j_state_lock); 1010 commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
985 1011
986 if (journal->j_commit_callback) 1012 /*
987 journal->j_commit_callback(journal, commit_transaction); 1013 * weight the commit time higher than the average time so we don't
1014 * react too strongly to vast changes in the commit time
1015 */
1016 if (likely(journal->j_average_commit_time))
1017 journal->j_average_commit_time = (commit_time +
1018 journal->j_average_commit_time*3) / 4;
1019 else
1020 journal->j_average_commit_time = commit_time;
1021 spin_unlock(&journal->j_state_lock);
988 1022
989 if (commit_transaction->t_checkpoint_list == NULL && 1023 if (commit_transaction->t_checkpoint_list == NULL &&
990 commit_transaction->t_checkpoint_io_list == NULL) { 1024 commit_transaction->t_checkpoint_io_list == NULL) {
991 __jbd2_journal_drop_transaction(journal, commit_transaction); 1025 __jbd2_journal_drop_transaction(journal, commit_transaction);
1026 to_free = 1;
992 } else { 1027 } else {
993 if (journal->j_checkpoint_transactions == NULL) { 1028 if (journal->j_checkpoint_transactions == NULL) {
994 journal->j_checkpoint_transactions = commit_transaction; 1029 journal->j_checkpoint_transactions = commit_transaction;
@@ -1007,11 +1042,16 @@ restart_loop:
1007 } 1042 }
1008 spin_unlock(&journal->j_list_lock); 1043 spin_unlock(&journal->j_list_lock);
1009 1044
1045 if (journal->j_commit_callback)
1046 journal->j_commit_callback(journal, commit_transaction);
1047
1010 trace_mark(jbd2_end_commit, "dev %s transaction %d head %d", 1048 trace_mark(jbd2_end_commit, "dev %s transaction %d head %d",
1011 journal->j_devname, journal->j_commit_sequence, 1049 journal->j_devname, commit_transaction->t_tid,
1012 journal->j_tail_sequence); 1050 journal->j_tail_sequence);
1013 jbd_debug(1, "JBD: commit %d complete, head %d\n", 1051 jbd_debug(1, "JBD: commit %d complete, head %d\n",
1014 journal->j_commit_sequence, journal->j_tail_sequence); 1052 journal->j_commit_sequence, journal->j_tail_sequence);
1053 if (to_free)
1054 kfree(commit_transaction);
1015 1055
1016 wake_up(&journal->j_wait_done_commit); 1056 wake_up(&journal->j_wait_done_commit);
1017} 1057}
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index f6bff9d6f8df..56675306ed81 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -40,6 +40,7 @@
40 40
41#include <asm/uaccess.h> 41#include <asm/uaccess.h>
42#include <asm/page.h> 42#include <asm/page.h>
43#include <asm/div64.h>
43 44
44EXPORT_SYMBOL(jbd2_journal_start); 45EXPORT_SYMBOL(jbd2_journal_start);
45EXPORT_SYMBOL(jbd2_journal_restart); 46EXPORT_SYMBOL(jbd2_journal_restart);
@@ -66,7 +67,6 @@ EXPORT_SYMBOL(jbd2_journal_update_format);
66EXPORT_SYMBOL(jbd2_journal_check_used_features); 67EXPORT_SYMBOL(jbd2_journal_check_used_features);
67EXPORT_SYMBOL(jbd2_journal_check_available_features); 68EXPORT_SYMBOL(jbd2_journal_check_available_features);
68EXPORT_SYMBOL(jbd2_journal_set_features); 69EXPORT_SYMBOL(jbd2_journal_set_features);
69EXPORT_SYMBOL(jbd2_journal_create);
70EXPORT_SYMBOL(jbd2_journal_load); 70EXPORT_SYMBOL(jbd2_journal_load);
71EXPORT_SYMBOL(jbd2_journal_destroy); 71EXPORT_SYMBOL(jbd2_journal_destroy);
72EXPORT_SYMBOL(jbd2_journal_abort); 72EXPORT_SYMBOL(jbd2_journal_abort);
@@ -132,8 +132,9 @@ static int kjournald2(void *arg)
132 journal->j_task = current; 132 journal->j_task = current;
133 wake_up(&journal->j_wait_done_commit); 133 wake_up(&journal->j_wait_done_commit);
134 134
135 printk(KERN_INFO "kjournald2 starting. Commit interval %ld seconds\n", 135 printk(KERN_INFO "kjournald2 starting: pid %d, dev %s, "
136 journal->j_commit_interval / HZ); 136 "commit interval %ld seconds\n", current->pid,
137 journal->j_devname, journal->j_commit_interval / HZ);
137 138
138 /* 139 /*
139 * And now, wait forever for commit wakeup events. 140 * And now, wait forever for commit wakeup events.
@@ -650,6 +651,8 @@ struct journal_head *jbd2_journal_get_descriptor_buffer(journal_t *journal)
650 return NULL; 651 return NULL;
651 652
652 bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize); 653 bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
654 if (!bh)
655 return NULL;
653 lock_buffer(bh); 656 lock_buffer(bh);
654 memset(bh->b_data, 0, journal->j_blocksize); 657 memset(bh->b_data, 0, journal->j_blocksize);
655 set_buffer_uptodate(bh); 658 set_buffer_uptodate(bh);
@@ -843,6 +846,8 @@ static int jbd2_seq_info_show(struct seq_file *seq, void *v)
843 jiffies_to_msecs(s->stats->u.run.rs_flushing / s->stats->ts_tid)); 846 jiffies_to_msecs(s->stats->u.run.rs_flushing / s->stats->ts_tid));
844 seq_printf(seq, " %ums logging transaction\n", 847 seq_printf(seq, " %ums logging transaction\n",
845 jiffies_to_msecs(s->stats->u.run.rs_logging / s->stats->ts_tid)); 848 jiffies_to_msecs(s->stats->u.run.rs_logging / s->stats->ts_tid));
849 seq_printf(seq, " %luus average transaction commit time\n",
850 do_div(s->journal->j_average_commit_time, 1000));
846 seq_printf(seq, " %lu handles per transaction\n", 851 seq_printf(seq, " %lu handles per transaction\n",
847 s->stats->u.run.rs_handle_count / s->stats->ts_tid); 852 s->stats->u.run.rs_handle_count / s->stats->ts_tid);
848 seq_printf(seq, " %lu blocks per transaction\n", 853 seq_printf(seq, " %lu blocks per transaction\n",
@@ -980,6 +985,8 @@ static journal_t * journal_init_common (void)
980 spin_lock_init(&journal->j_state_lock); 985 spin_lock_init(&journal->j_state_lock);
981 986
982 journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE); 987 journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE);
988 journal->j_min_batch_time = 0;
989 journal->j_max_batch_time = 15000; /* 15ms */
983 990
984 /* The journal is marked for error until we succeed with recovery! */ 991 /* The journal is marked for error until we succeed with recovery! */
985 journal->j_flags = JBD2_ABORT; 992 journal->j_flags = JBD2_ABORT;
@@ -1035,15 +1042,14 @@ journal_t * jbd2_journal_init_dev(struct block_device *bdev,
1035 1042
1036 /* journal descriptor can store up to n blocks -bzzz */ 1043 /* journal descriptor can store up to n blocks -bzzz */
1037 journal->j_blocksize = blocksize; 1044 journal->j_blocksize = blocksize;
1045 jbd2_stats_proc_init(journal);
1038 n = journal->j_blocksize / sizeof(journal_block_tag_t); 1046 n = journal->j_blocksize / sizeof(journal_block_tag_t);
1039 journal->j_wbufsize = n; 1047 journal->j_wbufsize = n;
1040 journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL); 1048 journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
1041 if (!journal->j_wbuf) { 1049 if (!journal->j_wbuf) {
1042 printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n", 1050 printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
1043 __func__); 1051 __func__);
1044 kfree(journal); 1052 goto out_err;
1045 journal = NULL;
1046 goto out;
1047 } 1053 }
1048 journal->j_dev = bdev; 1054 journal->j_dev = bdev;
1049 journal->j_fs_dev = fs_dev; 1055 journal->j_fs_dev = fs_dev;
@@ -1053,14 +1059,22 @@ journal_t * jbd2_journal_init_dev(struct block_device *bdev,
1053 p = journal->j_devname; 1059 p = journal->j_devname;
1054 while ((p = strchr(p, '/'))) 1060 while ((p = strchr(p, '/')))
1055 *p = '!'; 1061 *p = '!';
1056 jbd2_stats_proc_init(journal);
1057 1062
1058 bh = __getblk(journal->j_dev, start, journal->j_blocksize); 1063 bh = __getblk(journal->j_dev, start, journal->j_blocksize);
1059 J_ASSERT(bh != NULL); 1064 if (!bh) {
1065 printk(KERN_ERR
1066 "%s: Cannot get buffer for journal superblock\n",
1067 __func__);
1068 goto out_err;
1069 }
1060 journal->j_sb_buffer = bh; 1070 journal->j_sb_buffer = bh;
1061 journal->j_superblock = (journal_superblock_t *)bh->b_data; 1071 journal->j_superblock = (journal_superblock_t *)bh->b_data;
1062out: 1072
1063 return journal; 1073 return journal;
1074out_err:
1075 jbd2_stats_proc_exit(journal);
1076 kfree(journal);
1077 return NULL;
1064} 1078}
1065 1079
1066/** 1080/**
@@ -1108,9 +1122,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
1108 if (!journal->j_wbuf) { 1122 if (!journal->j_wbuf) {
1109 printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n", 1123 printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
1110 __func__); 1124 __func__);
1111 jbd2_stats_proc_exit(journal); 1125 goto out_err;
1112 kfree(journal);
1113 return NULL;
1114 } 1126 }
1115 1127
1116 err = jbd2_journal_bmap(journal, 0, &blocknr); 1128 err = jbd2_journal_bmap(journal, 0, &blocknr);
@@ -1118,17 +1130,24 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
1118 if (err) { 1130 if (err) {
1119 printk(KERN_ERR "%s: Cannnot locate journal superblock\n", 1131 printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
1120 __func__); 1132 __func__);
1121 jbd2_stats_proc_exit(journal); 1133 goto out_err;
1122 kfree(journal);
1123 return NULL;
1124 } 1134 }
1125 1135
1126 bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize); 1136 bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
1127 J_ASSERT(bh != NULL); 1137 if (!bh) {
1138 printk(KERN_ERR
1139 "%s: Cannot get buffer for journal superblock\n",
1140 __func__);
1141 goto out_err;
1142 }
1128 journal->j_sb_buffer = bh; 1143 journal->j_sb_buffer = bh;
1129 journal->j_superblock = (journal_superblock_t *)bh->b_data; 1144 journal->j_superblock = (journal_superblock_t *)bh->b_data;
1130 1145
1131 return journal; 1146 return journal;
1147out_err:
1148 jbd2_stats_proc_exit(journal);
1149 kfree(journal);
1150 return NULL;
1132} 1151}
1133 1152
1134/* 1153/*
@@ -1177,77 +1196,6 @@ static int journal_reset(journal_t *journal)
1177} 1196}
1178 1197
1179/** 1198/**
1180 * int jbd2_journal_create() - Initialise the new journal file
1181 * @journal: Journal to create. This structure must have been initialised
1182 *
1183 * Given a journal_t structure which tells us which disk blocks we can
1184 * use, create a new journal superblock and initialise all of the
1185 * journal fields from scratch.
1186 **/
1187int jbd2_journal_create(journal_t *journal)
1188{
1189 unsigned long long blocknr;
1190 struct buffer_head *bh;
1191 journal_superblock_t *sb;
1192 int i, err;
1193
1194 if (journal->j_maxlen < JBD2_MIN_JOURNAL_BLOCKS) {
1195 printk (KERN_ERR "Journal length (%d blocks) too short.\n",
1196 journal->j_maxlen);
1197 journal_fail_superblock(journal);
1198 return -EINVAL;
1199 }
1200
1201 if (journal->j_inode == NULL) {
1202 /*
1203 * We don't know what block to start at!
1204 */
1205 printk(KERN_EMERG
1206 "%s: creation of journal on external device!\n",
1207 __func__);
1208 BUG();
1209 }
1210
1211 /* Zero out the entire journal on disk. We cannot afford to
1212 have any blocks on disk beginning with JBD2_MAGIC_NUMBER. */
1213 jbd_debug(1, "JBD: Zeroing out journal blocks...\n");
1214 for (i = 0; i < journal->j_maxlen; i++) {
1215 err = jbd2_journal_bmap(journal, i, &blocknr);
1216 if (err)
1217 return err;
1218 bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
1219 lock_buffer(bh);
1220 memset (bh->b_data, 0, journal->j_blocksize);
1221 BUFFER_TRACE(bh, "marking dirty");
1222 mark_buffer_dirty(bh);
1223 BUFFER_TRACE(bh, "marking uptodate");
1224 set_buffer_uptodate(bh);
1225 unlock_buffer(bh);
1226 __brelse(bh);
1227 }
1228
1229 sync_blockdev(journal->j_dev);
1230 jbd_debug(1, "JBD: journal cleared.\n");
1231
1232 /* OK, fill in the initial static fields in the new superblock */
1233 sb = journal->j_superblock;
1234
1235 sb->s_header.h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
1236 sb->s_header.h_blocktype = cpu_to_be32(JBD2_SUPERBLOCK_V2);
1237
1238 sb->s_blocksize = cpu_to_be32(journal->j_blocksize);
1239 sb->s_maxlen = cpu_to_be32(journal->j_maxlen);
1240 sb->s_first = cpu_to_be32(1);
1241
1242 journal->j_transaction_sequence = 1;
1243
1244 journal->j_flags &= ~JBD2_ABORT;
1245 journal->j_format_version = 2;
1246
1247 return journal_reset(journal);
1248}
1249
1250/**
1251 * void jbd2_journal_update_superblock() - Update journal sb on disk. 1199 * void jbd2_journal_update_superblock() - Update journal sb on disk.
1252 * @journal: The journal to update. 1200 * @journal: The journal to update.
1253 * @wait: Set to '0' if you don't want to wait for IO completion. 1201 * @wait: Set to '0' if you don't want to wait for IO completion.
@@ -1491,7 +1439,9 @@ int jbd2_journal_destroy(journal_t *journal)
1491 spin_lock(&journal->j_list_lock); 1439 spin_lock(&journal->j_list_lock);
1492 while (journal->j_checkpoint_transactions != NULL) { 1440 while (journal->j_checkpoint_transactions != NULL) {
1493 spin_unlock(&journal->j_list_lock); 1441 spin_unlock(&journal->j_list_lock);
1442 mutex_lock(&journal->j_checkpoint_mutex);
1494 jbd2_log_do_checkpoint(journal); 1443 jbd2_log_do_checkpoint(journal);
1444 mutex_unlock(&journal->j_checkpoint_mutex);
1495 spin_lock(&journal->j_list_lock); 1445 spin_lock(&journal->j_list_lock);
1496 } 1446 }
1497 1447
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 4f925a4f3d05..46b4e347ed7d 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -25,6 +25,7 @@
25#include <linux/timer.h> 25#include <linux/timer.h>
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/highmem.h> 27#include <linux/highmem.h>
28#include <linux/hrtimer.h>
28 29
29static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh); 30static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
30 31
@@ -48,6 +49,7 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
48{ 49{
49 transaction->t_journal = journal; 50 transaction->t_journal = journal;
50 transaction->t_state = T_RUNNING; 51 transaction->t_state = T_RUNNING;
52 transaction->t_start_time = ktime_get();
51 transaction->t_tid = journal->j_transaction_sequence++; 53 transaction->t_tid = journal->j_transaction_sequence++;
52 transaction->t_expires = jiffies + journal->j_commit_interval; 54 transaction->t_expires = jiffies + journal->j_commit_interval;
53 spin_lock_init(&transaction->t_handle_lock); 55 spin_lock_init(&transaction->t_handle_lock);
@@ -1240,7 +1242,7 @@ int jbd2_journal_stop(handle_t *handle)
1240{ 1242{
1241 transaction_t *transaction = handle->h_transaction; 1243 transaction_t *transaction = handle->h_transaction;
1242 journal_t *journal = transaction->t_journal; 1244 journal_t *journal = transaction->t_journal;
1243 int old_handle_count, err; 1245 int err;
1244 pid_t pid; 1246 pid_t pid;
1245 1247
1246 J_ASSERT(journal_current_handle() == handle); 1248 J_ASSERT(journal_current_handle() == handle);
@@ -1263,24 +1265,54 @@ int jbd2_journal_stop(handle_t *handle)
1263 /* 1265 /*
1264 * Implement synchronous transaction batching. If the handle 1266 * Implement synchronous transaction batching. If the handle
1265 * was synchronous, don't force a commit immediately. Let's 1267 * was synchronous, don't force a commit immediately. Let's
1266 * yield and let another thread piggyback onto this transaction. 1268 * yield and let another thread piggyback onto this
1267 * Keep doing that while new threads continue to arrive. 1269 * transaction. Keep doing that while new threads continue to
1268 * It doesn't cost much - we're about to run a commit and sleep 1270 * arrive. It doesn't cost much - we're about to run a commit
1269 * on IO anyway. Speeds up many-threaded, many-dir operations 1271 * and sleep on IO anyway. Speeds up many-threaded, many-dir
1270 * by 30x or more... 1272 * operations by 30x or more...
1273 *
1274 * We try and optimize the sleep time against what the
1275 * underlying disk can do, instead of having a static sleep
1276 * time. This is useful for the case where our storage is so
1277 * fast that it is more optimal to go ahead and force a flush
1278 * and wait for the transaction to be committed than it is to
1279 * wait for an arbitrary amount of time for new writers to
1280 * join the transaction. We achieve this by measuring how
1281 * long it takes to commit a transaction, and compare it with
1282 * how long this transaction has been running, and if run time
1283 * < commit time then we sleep for the delta and commit. This
1284 * greatly helps super fast disks that would see slowdowns as
1285 * more threads started doing fsyncs.
1271 * 1286 *
1272 * But don't do this if this process was the most recent one to 1287 * But don't do this if this process was the most recent one
1273 * perform a synchronous write. We do this to detect the case where a 1288 * to perform a synchronous write. We do this to detect the
1274 * single process is doing a stream of sync writes. No point in waiting 1289 * case where a single process is doing a stream of sync
1275 * for joiners in that case. 1290 * writes. No point in waiting for joiners in that case.
1276 */ 1291 */
1277 pid = current->pid; 1292 pid = current->pid;
1278 if (handle->h_sync && journal->j_last_sync_writer != pid) { 1293 if (handle->h_sync && journal->j_last_sync_writer != pid) {
1294 u64 commit_time, trans_time;
1295
1279 journal->j_last_sync_writer = pid; 1296 journal->j_last_sync_writer = pid;
1280 do { 1297
1281 old_handle_count = transaction->t_handle_count; 1298 spin_lock(&journal->j_state_lock);
1282 schedule_timeout_uninterruptible(1); 1299 commit_time = journal->j_average_commit_time;
1283 } while (old_handle_count != transaction->t_handle_count); 1300 spin_unlock(&journal->j_state_lock);
1301
1302 trans_time = ktime_to_ns(ktime_sub(ktime_get(),
1303 transaction->t_start_time));
1304
1305 commit_time = max_t(u64, commit_time,
1306 1000*journal->j_min_batch_time);
1307 commit_time = min_t(u64, commit_time,
1308 1000*journal->j_max_batch_time);
1309
1310 if (trans_time < commit_time) {
1311 ktime_t expires = ktime_add_ns(ktime_get(),
1312 commit_time);
1313 set_current_state(TASK_UNINTERRUPTIBLE);
1314 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1315 }
1284 } 1316 }
1285 1317
1286 current->journal_info = NULL; 1318 current->journal_info = NULL;
diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
index c73fa89b5f8a..170d289ac785 100644
--- a/fs/jffs2/compr_rubin.c
+++ b/fs/jffs2/compr_rubin.c
@@ -22,9 +22,7 @@
22 22
23 23
24#define BIT_DIVIDER_MIPS 1043 24#define BIT_DIVIDER_MIPS 1043
25static int bits_mips[8] = { 277,249,290,267,229,341,212,241}; /* mips32 */ 25static int bits_mips[8] = { 277, 249, 290, 267, 229, 341, 212, 241};
26
27#include <linux/errno.h>
28 26
29struct pushpull { 27struct pushpull {
30 unsigned char *buf; 28 unsigned char *buf;
@@ -43,7 +41,9 @@ struct rubin_state {
43 int bits[8]; 41 int bits[8];
44}; 42};
45 43
46static inline void init_pushpull(struct pushpull *pp, char *buf, unsigned buflen, unsigned ofs, unsigned reserve) 44static inline void init_pushpull(struct pushpull *pp, char *buf,
45 unsigned buflen, unsigned ofs,
46 unsigned reserve)
47{ 47{
48 pp->buf = buf; 48 pp->buf = buf;
49 pp->buflen = buflen; 49 pp->buflen = buflen;
@@ -53,16 +53,14 @@ static inline void init_pushpull(struct pushpull *pp, char *buf, unsigned buflen
53 53
54static inline int pushbit(struct pushpull *pp, int bit, int use_reserved) 54static inline int pushbit(struct pushpull *pp, int bit, int use_reserved)
55{ 55{
56 if (pp->ofs >= pp->buflen - (use_reserved?0:pp->reserve)) { 56 if (pp->ofs >= pp->buflen - (use_reserved?0:pp->reserve))
57 return -ENOSPC; 57 return -ENOSPC;
58 }
59 58
60 if (bit) { 59 if (bit)
61 pp->buf[pp->ofs >> 3] |= (1<<(7-(pp->ofs &7))); 60 pp->buf[pp->ofs >> 3] |= (1<<(7-(pp->ofs & 7)));
62 } 61 else
63 else { 62 pp->buf[pp->ofs >> 3] &= ~(1<<(7-(pp->ofs & 7)));
64 pp->buf[pp->ofs >> 3] &= ~(1<<(7-(pp->ofs &7))); 63
65 }
66 pp->ofs++; 64 pp->ofs++;
67 65
68 return 0; 66 return 0;
@@ -97,6 +95,7 @@ static void init_rubin(struct rubin_state *rs, int div, int *bits)
97 rs->p = (long) (2 * UPPER_BIT_RUBIN); 95 rs->p = (long) (2 * UPPER_BIT_RUBIN);
98 rs->bit_number = (long) 0; 96 rs->bit_number = (long) 0;
99 rs->bit_divider = div; 97 rs->bit_divider = div;
98
100 for (c=0; c<8; c++) 99 for (c=0; c<8; c++)
101 rs->bits[c] = bits[c]; 100 rs->bits[c] = bits[c];
102} 101}
@@ -108,7 +107,8 @@ static int encode(struct rubin_state *rs, long A, long B, int symbol)
108 long i0, i1; 107 long i0, i1;
109 int ret; 108 int ret;
110 109
111 while ((rs->q >= UPPER_BIT_RUBIN) || ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) { 110 while ((rs->q >= UPPER_BIT_RUBIN) ||
111 ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) {
112 rs->bit_number++; 112 rs->bit_number++;
113 113
114 ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0); 114 ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0);
@@ -119,12 +119,12 @@ static int encode(struct rubin_state *rs, long A, long B, int symbol)
119 rs->p <<= 1; 119 rs->p <<= 1;
120 } 120 }
121 i0 = A * rs->p / (A + B); 121 i0 = A * rs->p / (A + B);
122 if (i0 <= 0) { 122 if (i0 <= 0)
123 i0 = 1; 123 i0 = 1;
124 } 124
125 if (i0 >= rs->p) { 125 if (i0 >= rs->p)
126 i0 = rs->p - 1; 126 i0 = rs->p - 1;
127 } 127
128 i1 = rs->p - i0; 128 i1 = rs->p - i0;
129 129
130 if (symbol == 0) 130 if (symbol == 0)
@@ -157,11 +157,13 @@ static void init_decode(struct rubin_state *rs, int div, int *bits)
157 /* behalve lower */ 157 /* behalve lower */
158 rs->rec_q = 0; 158 rs->rec_q = 0;
159 159
160 for (rs->bit_number = 0; rs->bit_number++ < RUBIN_REG_SIZE; rs->rec_q = rs->rec_q * 2 + (long) (pullbit(&rs->pp))) 160 for (rs->bit_number = 0; rs->bit_number++ < RUBIN_REG_SIZE;
161 rs->rec_q = rs->rec_q * 2 + (long) (pullbit(&rs->pp)))
161 ; 162 ;
162} 163}
163 164
164static void __do_decode(struct rubin_state *rs, unsigned long p, unsigned long q) 165static void __do_decode(struct rubin_state *rs, unsigned long p,
166 unsigned long q)
165{ 167{
166 register unsigned long lower_bits_rubin = LOWER_BITS_RUBIN; 168 register unsigned long lower_bits_rubin = LOWER_BITS_RUBIN;
167 unsigned long rec_q; 169 unsigned long rec_q;
@@ -207,12 +209,11 @@ static int decode(struct rubin_state *rs, long A, long B)
207 __do_decode(rs, p, q); 209 __do_decode(rs, p, q);
208 210
209 i0 = A * rs->p / (A + B); 211 i0 = A * rs->p / (A + B);
210 if (i0 <= 0) { 212 if (i0 <= 0)
211 i0 = 1; 213 i0 = 1;
212 } 214
213 if (i0 >= rs->p) { 215 if (i0 >= rs->p)
214 i0 = rs->p - 1; 216 i0 = rs->p - 1;
215 }
216 217
217 threshold = rs->q + i0; 218 threshold = rs->q + i0;
218 symbol = rs->rec_q >= threshold; 219 symbol = rs->rec_q >= threshold;
@@ -234,14 +235,15 @@ static int out_byte(struct rubin_state *rs, unsigned char byte)
234 struct rubin_state rs_copy; 235 struct rubin_state rs_copy;
235 rs_copy = *rs; 236 rs_copy = *rs;
236 237
237 for (i=0;i<8;i++) { 238 for (i=0; i<8; i++) {
238 ret = encode(rs, rs->bit_divider-rs->bits[i],rs->bits[i],byte&1); 239 ret = encode(rs, rs->bit_divider-rs->bits[i],
240 rs->bits[i], byte & 1);
239 if (ret) { 241 if (ret) {
240 /* Failed. Restore old state */ 242 /* Failed. Restore old state */
241 *rs = rs_copy; 243 *rs = rs_copy;
242 return ret; 244 return ret;
243 } 245 }
244 byte=byte>>1; 246 byte >>= 1 ;
245 } 247 }
246 return 0; 248 return 0;
247} 249}
@@ -251,7 +253,8 @@ static int in_byte(struct rubin_state *rs)
251 int i, result = 0, bit_divider = rs->bit_divider; 253 int i, result = 0, bit_divider = rs->bit_divider;
252 254
253 for (i = 0; i < 8; i++) 255 for (i = 0; i < 8; i++)
254 result |= decode(rs, bit_divider - rs->bits[i], rs->bits[i]) << i; 256 result |= decode(rs, bit_divider - rs->bits[i],
257 rs->bits[i]) << i;
255 258
256 return result; 259 return result;
257} 260}
@@ -259,7 +262,8 @@ static int in_byte(struct rubin_state *rs)
259 262
260 263
261static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in, 264static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in,
262 unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen) 265 unsigned char *cpage_out, uint32_t *sourcelen,
266 uint32_t *dstlen)
263 { 267 {
264 int outpos = 0; 268 int outpos = 0;
265 int pos=0; 269 int pos=0;
@@ -295,7 +299,8 @@ static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in,
295int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out, 299int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out,
296 uint32_t *sourcelen, uint32_t *dstlen, void *model) 300 uint32_t *sourcelen, uint32_t *dstlen, void *model)
297{ 301{
298 return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen); 302 return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in,
303 cpage_out, sourcelen, dstlen);
299} 304}
300#endif 305#endif
301static int jffs2_dynrubin_compress(unsigned char *data_in, 306static int jffs2_dynrubin_compress(unsigned char *data_in,
@@ -316,9 +321,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
316 return -1; 321 return -1;
317 322
318 memset(histo, 0, 256); 323 memset(histo, 0, 256);
319 for (i=0; i<mysrclen; i++) { 324 for (i=0; i<mysrclen; i++)
320 histo[data_in[i]]++; 325 histo[data_in[i]]++;
321 }
322 memset(bits, 0, sizeof(int)*8); 326 memset(bits, 0, sizeof(int)*8);
323 for (i=0; i<256; i++) { 327 for (i=0; i<256; i++) {
324 if (i&128) 328 if (i&128)
@@ -346,7 +350,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
346 cpage_out[i] = bits[i]; 350 cpage_out[i] = bits[i];
347 } 351 }
348 352
349 ret = rubin_do_compress(256, bits, data_in, cpage_out+8, &mysrclen, &mydstlen); 353 ret = rubin_do_compress(256, bits, data_in, cpage_out+8, &mysrclen,
354 &mydstlen);
350 if (ret) 355 if (ret)
351 return ret; 356 return ret;
352 357
@@ -363,8 +368,10 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
363 return 0; 368 return 0;
364} 369}
365 370
366static void rubin_do_decompress(int bit_divider, int *bits, unsigned char *cdata_in, 371static void rubin_do_decompress(int bit_divider, int *bits,
367 unsigned char *page_out, uint32_t srclen, uint32_t destlen) 372 unsigned char *cdata_in,
373 unsigned char *page_out, uint32_t srclen,
374 uint32_t destlen)
368{ 375{
369 int outpos = 0; 376 int outpos = 0;
370 struct rubin_state rs; 377 struct rubin_state rs;
@@ -372,9 +379,8 @@ static void rubin_do_decompress(int bit_divider, int *bits, unsigned char *cdata
372 init_pushpull(&rs.pp, cdata_in, srclen, 0, 0); 379 init_pushpull(&rs.pp, cdata_in, srclen, 0, 0);
373 init_decode(&rs, bit_divider, bits); 380 init_decode(&rs, bit_divider, bits);
374 381
375 while (outpos < destlen) { 382 while (outpos < destlen)
376 page_out[outpos++] = in_byte(&rs); 383 page_out[outpos++] = in_byte(&rs);
377 }
378} 384}
379 385
380 386
@@ -383,7 +389,8 @@ static int jffs2_rubinmips_decompress(unsigned char *data_in,
383 uint32_t sourcelen, uint32_t dstlen, 389 uint32_t sourcelen, uint32_t dstlen,
384 void *model) 390 void *model)
385{ 391{
386 rubin_do_decompress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen); 392 rubin_do_decompress(BIT_DIVIDER_MIPS, bits_mips, data_in,
393 cpage_out, sourcelen, dstlen);
387 return 0; 394 return 0;
388} 395}
389 396
@@ -398,52 +405,53 @@ static int jffs2_dynrubin_decompress(unsigned char *data_in,
398 for (c=0; c<8; c++) 405 for (c=0; c<8; c++)
399 bits[c] = data_in[c]; 406 bits[c] = data_in[c];
400 407
401 rubin_do_decompress(256, bits, data_in+8, cpage_out, sourcelen-8, dstlen); 408 rubin_do_decompress(256, bits, data_in+8, cpage_out, sourcelen-8,
409 dstlen);
402 return 0; 410 return 0;
403} 411}
404 412
405static struct jffs2_compressor jffs2_rubinmips_comp = { 413static struct jffs2_compressor jffs2_rubinmips_comp = {
406 .priority = JFFS2_RUBINMIPS_PRIORITY, 414 .priority = JFFS2_RUBINMIPS_PRIORITY,
407 .name = "rubinmips", 415 .name = "rubinmips",
408 .compr = JFFS2_COMPR_DYNRUBIN, 416 .compr = JFFS2_COMPR_DYNRUBIN,
409 .compress = NULL, /*&jffs2_rubinmips_compress,*/ 417 .compress = NULL, /*&jffs2_rubinmips_compress,*/
410 .decompress = &jffs2_rubinmips_decompress, 418 .decompress = &jffs2_rubinmips_decompress,
411#ifdef JFFS2_RUBINMIPS_DISABLED 419#ifdef JFFS2_RUBINMIPS_DISABLED
412 .disabled = 1, 420 .disabled = 1,
413#else 421#else
414 .disabled = 0, 422 .disabled = 0,
415#endif 423#endif
416}; 424};
417 425
418int jffs2_rubinmips_init(void) 426int jffs2_rubinmips_init(void)
419{ 427{
420 return jffs2_register_compressor(&jffs2_rubinmips_comp); 428 return jffs2_register_compressor(&jffs2_rubinmips_comp);
421} 429}
422 430
423void jffs2_rubinmips_exit(void) 431void jffs2_rubinmips_exit(void)
424{ 432{
425 jffs2_unregister_compressor(&jffs2_rubinmips_comp); 433 jffs2_unregister_compressor(&jffs2_rubinmips_comp);
426} 434}
427 435
428static struct jffs2_compressor jffs2_dynrubin_comp = { 436static struct jffs2_compressor jffs2_dynrubin_comp = {
429 .priority = JFFS2_DYNRUBIN_PRIORITY, 437 .priority = JFFS2_DYNRUBIN_PRIORITY,
430 .name = "dynrubin", 438 .name = "dynrubin",
431 .compr = JFFS2_COMPR_RUBINMIPS, 439 .compr = JFFS2_COMPR_RUBINMIPS,
432 .compress = jffs2_dynrubin_compress, 440 .compress = jffs2_dynrubin_compress,
433 .decompress = &jffs2_dynrubin_decompress, 441 .decompress = &jffs2_dynrubin_decompress,
434#ifdef JFFS2_DYNRUBIN_DISABLED 442#ifdef JFFS2_DYNRUBIN_DISABLED
435 .disabled = 1, 443 .disabled = 1,
436#else 444#else
437 .disabled = 0, 445 .disabled = 0,
438#endif 446#endif
439}; 447};
440 448
441int jffs2_dynrubin_init(void) 449int jffs2_dynrubin_init(void)
442{ 450{
443 return jffs2_register_compressor(&jffs2_dynrubin_comp); 451 return jffs2_register_compressor(&jffs2_dynrubin_comp);
444} 452}
445 453
446void jffs2_dynrubin_exit(void) 454void jffs2_dynrubin_exit(void)
447{ 455{
448 jffs2_unregister_compressor(&jffs2_dynrubin_comp); 456 jffs2_unregister_compressor(&jffs2_dynrubin_comp);
449} 457}
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index 259461b910af..c32b4a1ad6cf 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -175,7 +175,7 @@ static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock
175{ 175{
176 /* For NAND, if the failure did not occur at the device level for a 176 /* For NAND, if the failure did not occur at the device level for a
177 specific physical page, don't bother updating the bad block table. */ 177 specific physical page, don't bother updating the bad block table. */
178 if (jffs2_cleanmarker_oob(c) && (bad_offset != MTD_FAIL_ADDR_UNKNOWN)) { 178 if (jffs2_cleanmarker_oob(c) && (bad_offset != (uint32_t)MTD_FAIL_ADDR_UNKNOWN)) {
179 /* We had a device-level failure to erase. Let's see if we've 179 /* We had a device-level failure to erase. Let's see if we've
180 failed too many times. */ 180 failed too many times. */
181 if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) { 181 if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
@@ -209,7 +209,8 @@ static void jffs2_erase_callback(struct erase_info *instr)
209 struct erase_priv_struct *priv = (void *)instr->priv; 209 struct erase_priv_struct *priv = (void *)instr->priv;
210 210
211 if(instr->state != MTD_ERASE_DONE) { 211 if(instr->state != MTD_ERASE_DONE) {
212 printk(KERN_WARNING "Erase at 0x%08x finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", instr->addr, instr->state); 212 printk(KERN_WARNING "Erase at 0x%08llx finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n",
213 (unsigned long long)instr->addr, instr->state);
213 jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr); 214 jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr);
214 } else { 215 } else {
215 jffs2_erase_succeeded(priv->c, priv->jeb); 216 jffs2_erase_succeeded(priv->c, priv->jeb);
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 54ff4c77aaa3..d861096c9d81 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -3868,7 +3868,7 @@ static void ocfs2_split_record(struct inode *inode,
3868 struct ocfs2_extent_list *left_el = NULL, *right_el, *insert_el, *el; 3868 struct ocfs2_extent_list *left_el = NULL, *right_el, *insert_el, *el;
3869 struct ocfs2_extent_rec *rec, *tmprec; 3869 struct ocfs2_extent_rec *rec, *tmprec;
3870 3870
3871 right_el = path_leaf_el(right_path);; 3871 right_el = path_leaf_el(right_path);
3872 if (left_path) 3872 if (left_path)
3873 left_el = path_leaf_el(left_path); 3873 left_el = path_leaf_el(left_path);
3874 3874
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index f731ab491795..b0c4cadd4c45 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -1324,7 +1324,7 @@ again:
1324 goto out; 1324 goto out;
1325 } 1325 }
1326 1326
1327 mlog(0, "lock %s, successfull return from ocfs2_dlm_lock\n", 1327 mlog(0, "lock %s, successful return from ocfs2_dlm_lock\n",
1328 lockres->l_name); 1328 lockres->l_name);
1329 1329
1330 /* At this point we've gone inside the dlm and need to 1330 /* At this point we've gone inside the dlm and need to
@@ -2951,7 +2951,7 @@ static int ocfs2_drop_lock(struct ocfs2_super *osb,
2951 ocfs2_dlm_dump_lksb(&lockres->l_lksb); 2951 ocfs2_dlm_dump_lksb(&lockres->l_lksb);
2952 BUG(); 2952 BUG();
2953 } 2953 }
2954 mlog(0, "lock %s, successfull return from ocfs2_dlm_unlock\n", 2954 mlog(0, "lock %s, successful return from ocfs2_dlm_unlock\n",
2955 lockres->l_name); 2955 lockres->l_name);
2956 2956
2957 ocfs2_wait_on_busy_lock(lockres); 2957 ocfs2_wait_on_busy_lock(lockres);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index e8f795f978aa..a5887df2cd8a 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1605,7 +1605,7 @@ int ocfs2_change_file_space(struct file *file, unsigned int cmd,
1605 struct ocfs2_space_resv *sr) 1605 struct ocfs2_space_resv *sr)
1606{ 1606{
1607 struct inode *inode = file->f_path.dentry->d_inode; 1607 struct inode *inode = file->f_path.dentry->d_inode;
1608 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);; 1608 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1609 1609
1610 if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) && 1610 if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
1611 !ocfs2_writes_unwritten_extents(osb)) 1611 !ocfs2_writes_unwritten_extents(osb))
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 5198ada67398..6d720243f5f4 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -334,6 +334,7 @@ void delete_partition(struct gendisk *disk, int partno)
334 334
335 blk_free_devt(part_devt(part)); 335 blk_free_devt(part_devt(part));
336 rcu_assign_pointer(ptbl->part[partno], NULL); 336 rcu_assign_pointer(ptbl->part[partno], NULL);
337 rcu_assign_pointer(ptbl->last_lookup, NULL);
337 kobject_put(part->holder_dir); 338 kobject_put(part->holder_dir);
338 device_del(part_to_dev(part)); 339 device_del(part_to_dev(part));
339 340
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 03ec59504906..5edcc3f92ba7 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -47,8 +47,6 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
47 47
48 offset = (unsigned long)(*ppos % PAGE_SIZE); 48 offset = (unsigned long)(*ppos % PAGE_SIZE);
49 pfn = (unsigned long)(*ppos / PAGE_SIZE); 49 pfn = (unsigned long)(*ppos / PAGE_SIZE);
50 if (pfn > saved_max_pfn)
51 return -EINVAL;
52 50
53 do { 51 do {
54 if (count > (PAGE_SIZE - offset)) 52 if (count > (PAGE_SIZE - offset))
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
index c97d4c931715..98a232f7196b 100644
--- a/fs/romfs/inode.c
+++ b/fs/romfs/inode.c
@@ -490,7 +490,7 @@ static mode_t romfs_modemap[] =
490static struct inode * 490static struct inode *
491romfs_iget(struct super_block *sb, unsigned long ino) 491romfs_iget(struct super_block *sb, unsigned long ino)
492{ 492{
493 int nextfh; 493 int nextfh, ret;
494 struct romfs_inode ri; 494 struct romfs_inode ri;
495 struct inode *i; 495 struct inode *i;
496 496
@@ -526,11 +526,11 @@ romfs_iget(struct super_block *sb, unsigned long ino)
526 i->i_mtime.tv_nsec = i->i_atime.tv_nsec = i->i_ctime.tv_nsec = 0; 526 i->i_mtime.tv_nsec = i->i_atime.tv_nsec = i->i_ctime.tv_nsec = 0;
527 527
528 /* Precalculate the data offset */ 528 /* Precalculate the data offset */
529 ino = romfs_strnlen(i, ino+ROMFH_SIZE, ROMFS_MAXFN); 529 ret = romfs_strnlen(i, ino + ROMFH_SIZE, ROMFS_MAXFN);
530 if (ino >= 0) 530 if (ret >= 0)
531 ino = ((ROMFH_SIZE+ino+1+ROMFH_PAD)&ROMFH_MASK); 531 ino = (ROMFH_SIZE + ret + 1 + ROMFH_PAD) & ROMFH_MASK;
532 else 532 else
533 ino = 0; 533 ino = 0;
534 534
535 ROMFS_I(i)->i_metasize = ino; 535 ROMFS_I(i)->i_metasize = ino;
536 ROMFS_I(i)->i_dataoffset = ino+(i->i_ino&ROMFH_MASK); 536 ROMFS_I(i)->i_dataoffset = ino+(i->i_ino&ROMFH_MASK);
diff --git a/fs/splice.c b/fs/splice.c
index 1abab5cee4ba..a54b3e3f10a7 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -21,6 +21,7 @@
21#include <linux/file.h> 21#include <linux/file.h>
22#include <linux/pagemap.h> 22#include <linux/pagemap.h>
23#include <linux/splice.h> 23#include <linux/splice.h>
24#include <linux/memcontrol.h>
24#include <linux/mm_inline.h> 25#include <linux/mm_inline.h>
25#include <linux/swap.h> 26#include <linux/swap.h>
26#include <linux/writeback.h> 27#include <linux/writeback.h>
diff --git a/fs/super.c b/fs/super.c
index cb20744ec789..ed080c417167 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -458,7 +458,6 @@ void sync_filesystems(int wait)
458 if (sb->s_flags & MS_RDONLY) 458 if (sb->s_flags & MS_RDONLY)
459 continue; 459 continue;
460 sb->s_need_sync_fs = 1; 460 sb->s_need_sync_fs = 1;
461 async_synchronize_full_special(&sb->s_async_list);
462 } 461 }
463 462
464restart: 463restart:
@@ -471,6 +470,7 @@ restart:
471 sb->s_count++; 470 sb->s_count++;
472 spin_unlock(&sb_lock); 471 spin_unlock(&sb_lock);
473 down_read(&sb->s_umount); 472 down_read(&sb->s_umount);
473 async_synchronize_full_special(&sb->s_async_list);
474 if (sb->s_root && (wait || sb->s_dirt)) 474 if (sb->s_root && (wait || sb->s_dirt))
475 sb->s_op->sync_fs(sb, wait); 475 sb->s_op->sync_fs(sb, wait);
476 up_read(&sb->s_umount); 476 up_read(&sb->s_umount);
@@ -810,6 +810,7 @@ int get_sb_bdev(struct file_system_type *fs_type,
810 } 810 }
811 811
812 s->s_flags |= MS_ACTIVE; 812 s->s_flags |= MS_ACTIVE;
813 bdev->bd_super = s;
813 } 814 }
814 815
815 return simple_set_mnt(mnt, s); 816 return simple_set_mnt(mnt, s);
@@ -829,6 +830,7 @@ void kill_block_super(struct super_block *sb)
829 struct block_device *bdev = sb->s_bdev; 830 struct block_device *bdev = sb->s_bdev;
830 fmode_t mode = sb->s_mode; 831 fmode_t mode = sb->s_mode;
831 832
833 bdev->bd_super = 0;
832 generic_shutdown_super(sb); 834 generic_shutdown_super(sb);
833 sync_blockdev(bdev); 835 sync_blockdev(bdev);
834 close_bdev_exclusive(bdev, mode); 836 close_bdev_exclusive(bdev, mode);
diff --git a/include/acpi/acdisasm.h b/include/acpi/acdisasm.h
deleted file mode 100644
index 0c1ed387073c..000000000000
--- a/include/acpi/acdisasm.h
+++ /dev/null
@@ -1,445 +0,0 @@
1/******************************************************************************
2 *
3 * Name: acdisasm.h - AML disassembler
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#ifndef __ACDISASM_H__
45#define __ACDISASM_H__
46
47#include "amlresrc.h"
48
49#define BLOCK_NONE 0
50#define BLOCK_PAREN 1
51#define BLOCK_BRACE 2
52#define BLOCK_COMMA_LIST 4
53#define ACPI_DEFAULT_RESNAME *(u32 *) "__RD"
54
55struct acpi_external_list {
56 char *path;
57 char *internal_path;
58 struct acpi_external_list *next;
59 u32 value;
60 u16 length;
61 u8 type;
62};
63
64extern struct acpi_external_list *acpi_gbl_external_list;
65
66typedef const struct acpi_dmtable_info {
67 u8 opcode;
68 u8 offset;
69 char *name;
70
71} acpi_dmtable_info;
72
73/*
74 * Values for Opcode above.
75 * Note: 0-7 must not change, used as a flag shift value
76 */
77#define ACPI_DMT_FLAG0 0
78#define ACPI_DMT_FLAG1 1
79#define ACPI_DMT_FLAG2 2
80#define ACPI_DMT_FLAG3 3
81#define ACPI_DMT_FLAG4 4
82#define ACPI_DMT_FLAG5 5
83#define ACPI_DMT_FLAG6 6
84#define ACPI_DMT_FLAG7 7
85#define ACPI_DMT_FLAGS0 8
86#define ACPI_DMT_FLAGS2 9
87#define ACPI_DMT_UINT8 10
88#define ACPI_DMT_UINT16 11
89#define ACPI_DMT_UINT24 12
90#define ACPI_DMT_UINT32 13
91#define ACPI_DMT_UINT56 14
92#define ACPI_DMT_UINT64 15
93#define ACPI_DMT_STRING 16
94#define ACPI_DMT_NAME4 17
95#define ACPI_DMT_NAME6 18
96#define ACPI_DMT_NAME8 19
97#define ACPI_DMT_CHKSUM 20
98#define ACPI_DMT_SPACEID 21
99#define ACPI_DMT_GAS 22
100#define ACPI_DMT_ASF 23
101#define ACPI_DMT_DMAR 24
102#define ACPI_DMT_HEST 25
103#define ACPI_DMT_HESTNTFY 26
104#define ACPI_DMT_HESTNTYP 27
105#define ACPI_DMT_MADT 28
106#define ACPI_DMT_SRAT 29
107#define ACPI_DMT_EXIT 30
108#define ACPI_DMT_SIG 31
109
110typedef
111void (*acpi_dmtable_handler) (struct acpi_table_header * table);
112
113struct acpi_dmtable_data {
114 char *signature;
115 struct acpi_dmtable_info *table_info;
116 acpi_dmtable_handler table_handler;
117 char *name;
118};
119
120struct acpi_op_walk_info {
121 u32 level;
122 u32 last_level;
123 u32 count;
124 u32 bit_offset;
125 u32 flags;
126 struct acpi_walk_state *walk_state;
127};
128
129typedef
130acpi_status(*asl_walk_callback) (union acpi_parse_object * op,
131 u32 level, void *context);
132
133struct acpi_resource_tag {
134 u32 bit_index;
135 char *tag;
136};
137
138/* Strings used for decoding flags to ASL keywords */
139
140extern const char *acpi_gbl_word_decode[];
141extern const char *acpi_gbl_irq_decode[];
142extern const char *acpi_gbl_lock_rule[];
143extern const char *acpi_gbl_access_types[];
144extern const char *acpi_gbl_update_rules[];
145extern const char *acpi_gbl_match_ops[];
146
147extern struct acpi_dmtable_info acpi_dm_table_info_asf0[];
148extern struct acpi_dmtable_info acpi_dm_table_info_asf1[];
149extern struct acpi_dmtable_info acpi_dm_table_info_asf1a[];
150extern struct acpi_dmtable_info acpi_dm_table_info_asf2[];
151extern struct acpi_dmtable_info acpi_dm_table_info_asf2a[];
152extern struct acpi_dmtable_info acpi_dm_table_info_asf3[];
153extern struct acpi_dmtable_info acpi_dm_table_info_asf4[];
154extern struct acpi_dmtable_info acpi_dm_table_info_asf_hdr[];
155extern struct acpi_dmtable_info acpi_dm_table_info_boot[];
156extern struct acpi_dmtable_info acpi_dm_table_info_bert[];
157extern struct acpi_dmtable_info acpi_dm_table_info_cpep[];
158extern struct acpi_dmtable_info acpi_dm_table_info_cpep0[];
159extern struct acpi_dmtable_info acpi_dm_table_info_dbgp[];
160extern struct acpi_dmtable_info acpi_dm_table_info_dmar[];
161extern struct acpi_dmtable_info acpi_dm_table_info_dmar_hdr[];
162extern struct acpi_dmtable_info acpi_dm_table_info_dmar_scope[];
163extern struct acpi_dmtable_info acpi_dm_table_info_dmar0[];
164extern struct acpi_dmtable_info acpi_dm_table_info_dmar1[];
165extern struct acpi_dmtable_info acpi_dm_table_info_dmar2[];
166extern struct acpi_dmtable_info acpi_dm_table_info_ecdt[];
167extern struct acpi_dmtable_info acpi_dm_table_info_einj[];
168extern struct acpi_dmtable_info acpi_dm_table_info_einj0[];
169extern struct acpi_dmtable_info acpi_dm_table_info_erst[];
170extern struct acpi_dmtable_info acpi_dm_table_info_facs[];
171extern struct acpi_dmtable_info acpi_dm_table_info_fadt1[];
172extern struct acpi_dmtable_info acpi_dm_table_info_fadt2[];
173extern struct acpi_dmtable_info acpi_dm_table_info_gas[];
174extern struct acpi_dmtable_info acpi_dm_table_info_header[];
175extern struct acpi_dmtable_info acpi_dm_table_info_hest[];
176extern struct acpi_dmtable_info acpi_dm_table_info_hest9[];
177extern struct acpi_dmtable_info acpi_dm_table_info_hest_notify[];
178extern struct acpi_dmtable_info acpi_dm_table_info_hpet[];
179extern struct acpi_dmtable_info acpi_dm_table_info_madt[];
180extern struct acpi_dmtable_info acpi_dm_table_info_madt0[];
181extern struct acpi_dmtable_info acpi_dm_table_info_madt1[];
182extern struct acpi_dmtable_info acpi_dm_table_info_madt2[];
183extern struct acpi_dmtable_info acpi_dm_table_info_madt3[];
184extern struct acpi_dmtable_info acpi_dm_table_info_madt4[];
185extern struct acpi_dmtable_info acpi_dm_table_info_madt5[];
186extern struct acpi_dmtable_info acpi_dm_table_info_madt6[];
187extern struct acpi_dmtable_info acpi_dm_table_info_madt7[];
188extern struct acpi_dmtable_info acpi_dm_table_info_madt8[];
189extern struct acpi_dmtable_info acpi_dm_table_info_madt9[];
190extern struct acpi_dmtable_info acpi_dm_table_info_madt10[];
191extern struct acpi_dmtable_info acpi_dm_table_info_madt_hdr[];
192extern struct acpi_dmtable_info acpi_dm_table_info_mcfg[];
193extern struct acpi_dmtable_info acpi_dm_table_info_mcfg0[];
194extern struct acpi_dmtable_info acpi_dm_table_info_rsdp1[];
195extern struct acpi_dmtable_info acpi_dm_table_info_rsdp2[];
196extern struct acpi_dmtable_info acpi_dm_table_info_sbst[];
197extern struct acpi_dmtable_info acpi_dm_table_info_slic[];
198extern struct acpi_dmtable_info acpi_dm_table_info_slit[];
199extern struct acpi_dmtable_info acpi_dm_table_info_spcr[];
200extern struct acpi_dmtable_info acpi_dm_table_info_spmi[];
201extern struct acpi_dmtable_info acpi_dm_table_info_srat[];
202extern struct acpi_dmtable_info acpi_dm_table_info_srat_hdr[];
203extern struct acpi_dmtable_info acpi_dm_table_info_srat0[];
204extern struct acpi_dmtable_info acpi_dm_table_info_srat1[];
205extern struct acpi_dmtable_info acpi_dm_table_info_srat2[];
206extern struct acpi_dmtable_info acpi_dm_table_info_tcpa[];
207extern struct acpi_dmtable_info acpi_dm_table_info_wdrt[];
208
209/*
210 * dmtable
211 */
212void acpi_dm_dump_data_table(struct acpi_table_header *table);
213
214acpi_status
215acpi_dm_dump_table(u32 table_length,
216 u32 table_offset,
217 void *table,
218 u32 sub_table_length, struct acpi_dmtable_info *info);
219
220void acpi_dm_line_header(u32 offset, u32 byte_length, char *name);
221
222void acpi_dm_line_header2(u32 offset, u32 byte_length, char *name, u32 value);
223
224/*
225 * dmtbdump
226 */
227void acpi_dm_dump_asf(struct acpi_table_header *table);
228
229void acpi_dm_dump_cpep(struct acpi_table_header *table);
230
231void acpi_dm_dump_dmar(struct acpi_table_header *table);
232
233void acpi_dm_dump_einj(struct acpi_table_header *table);
234
235void acpi_dm_dump_erst(struct acpi_table_header *table);
236
237void acpi_dm_dump_fadt(struct acpi_table_header *table);
238
239void acpi_dm_dump_hest(struct acpi_table_header *table);
240
241void acpi_dm_dump_mcfg(struct acpi_table_header *table);
242
243void acpi_dm_dump_madt(struct acpi_table_header *table);
244
245u32 acpi_dm_dump_rsdp(struct acpi_table_header *table);
246
247void acpi_dm_dump_rsdt(struct acpi_table_header *table);
248
249void acpi_dm_dump_slit(struct acpi_table_header *table);
250
251void acpi_dm_dump_srat(struct acpi_table_header *table);
252
253void acpi_dm_dump_xsdt(struct acpi_table_header *table);
254
255/*
256 * dmwalk
257 */
258void
259acpi_dm_disassemble(struct acpi_walk_state *walk_state,
260 union acpi_parse_object *origin, u32 num_opcodes);
261
262void
263acpi_dm_walk_parse_tree(union acpi_parse_object *op,
264 asl_walk_callback descending_callback,
265 asl_walk_callback ascending_callback, void *context);
266
267/*
268 * dmopcode
269 */
270void
271acpi_dm_disassemble_one_op(struct acpi_walk_state *walk_state,
272 struct acpi_op_walk_info *info,
273 union acpi_parse_object *op);
274
275void acpi_dm_decode_internal_object(union acpi_operand_object *obj_desc);
276
277u32 acpi_dm_list_type(union acpi_parse_object *op);
278
279void acpi_dm_method_flags(union acpi_parse_object *op);
280
281void acpi_dm_field_flags(union acpi_parse_object *op);
282
283void acpi_dm_address_space(u8 space_id);
284
285void acpi_dm_region_flags(union acpi_parse_object *op);
286
287void acpi_dm_match_op(union acpi_parse_object *op);
288
289u8 acpi_dm_comma_if_list_member(union acpi_parse_object *op);
290
291void acpi_dm_comma_if_field_member(union acpi_parse_object *op);
292
293/*
294 * dmnames
295 */
296u32 acpi_dm_dump_name(char *name);
297
298acpi_status
299acpi_ps_display_object_pathname(struct acpi_walk_state *walk_state,
300 union acpi_parse_object *op);
301
302void acpi_dm_namestring(char *name);
303
304/*
305 * dmobject
306 */
307void
308acpi_dm_display_internal_object(union acpi_operand_object *obj_desc,
309 struct acpi_walk_state *walk_state);
310
311void acpi_dm_display_arguments(struct acpi_walk_state *walk_state);
312
313void acpi_dm_display_locals(struct acpi_walk_state *walk_state);
314
315void
316acpi_dm_dump_method_info(acpi_status status,
317 struct acpi_walk_state *walk_state,
318 union acpi_parse_object *op);
319
320/*
321 * dmbuffer
322 */
323void acpi_dm_disasm_byte_list(u32 level, u8 * byte_data, u32 byte_count);
324
325void
326acpi_dm_byte_list(struct acpi_op_walk_info *info, union acpi_parse_object *op);
327
328void acpi_dm_is_eisa_id(union acpi_parse_object *op);
329
330void acpi_dm_eisa_id(u32 encoded_id);
331
332u8 acpi_dm_is_unicode_buffer(union acpi_parse_object *op);
333
334u8 acpi_dm_is_string_buffer(union acpi_parse_object *op);
335
336/*
337 * dmresrc
338 */
339void acpi_dm_dump_integer8(u8 value, char *name);
340
341void acpi_dm_dump_integer16(u16 value, char *name);
342
343void acpi_dm_dump_integer32(u32 value, char *name);
344
345void acpi_dm_dump_integer64(u64 value, char *name);
346
347void
348acpi_dm_resource_template(struct acpi_op_walk_info *info,
349 union acpi_parse_object *op,
350 u8 * byte_data, u32 byte_count);
351
352acpi_status acpi_dm_is_resource_template(union acpi_parse_object *op);
353
354void acpi_dm_indent(u32 level);
355
356void acpi_dm_bit_list(u16 mask);
357
358void acpi_dm_decode_attribute(u8 attribute);
359
360void acpi_dm_descriptor_name(void);
361
362/*
363 * dmresrcl
364 */
365void
366acpi_dm_word_descriptor(union aml_resource *resource, u32 length, u32 level);
367
368void
369acpi_dm_dword_descriptor(union aml_resource *resource, u32 length, u32 level);
370
371void
372acpi_dm_extended_descriptor(union aml_resource *resource,
373 u32 length, u32 level);
374
375void
376acpi_dm_qword_descriptor(union aml_resource *resource, u32 length, u32 level);
377
378void
379acpi_dm_memory24_descriptor(union aml_resource *resource,
380 u32 length, u32 level);
381
382void
383acpi_dm_memory32_descriptor(union aml_resource *resource,
384 u32 length, u32 level);
385
386void
387acpi_dm_fixed_memory32_descriptor(union aml_resource *resource,
388 u32 length, u32 level);
389
390void
391acpi_dm_generic_register_descriptor(union aml_resource *resource,
392 u32 length, u32 level);
393
394void
395acpi_dm_interrupt_descriptor(union aml_resource *resource,
396 u32 length, u32 level);
397
398void
399acpi_dm_vendor_large_descriptor(union aml_resource *resource,
400 u32 length, u32 level);
401
402void acpi_dm_vendor_common(char *name, u8 * byte_data, u32 length, u32 level);
403
404/*
405 * dmresrcs
406 */
407void
408acpi_dm_irq_descriptor(union aml_resource *resource, u32 length, u32 level);
409
410void
411acpi_dm_dma_descriptor(union aml_resource *resource, u32 length, u32 level);
412
413void acpi_dm_io_descriptor(union aml_resource *resource, u32 length, u32 level);
414
415void
416acpi_dm_fixed_io_descriptor(union aml_resource *resource,
417 u32 length, u32 level);
418
419void
420acpi_dm_start_dependent_descriptor(union aml_resource *resource,
421 u32 length, u32 level);
422
423void
424acpi_dm_end_dependent_descriptor(union aml_resource *resource,
425 u32 length, u32 level);
426
427void
428acpi_dm_vendor_small_descriptor(union aml_resource *resource,
429 u32 length, u32 level);
430
431/*
432 * dmutils
433 */
434void acpi_dm_add_to_external_list(char *path, u8 type, u32 value);
435
436/*
437 * dmrestag
438 */
439void acpi_dm_find_resources(union acpi_parse_object *root);
440
441void
442acpi_dm_check_resource_reference(union acpi_parse_object *op,
443 struct acpi_walk_state *walk_state);
444
445#endif /* __ACDISASM_H__ */
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 84f5cb242863..eda04546cdf6 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -153,8 +153,9 @@
153#define AE_AML_CIRCULAR_REFERENCE (acpi_status) (0x001E | AE_CODE_AML) 153#define AE_AML_CIRCULAR_REFERENCE (acpi_status) (0x001E | AE_CODE_AML)
154#define AE_AML_BAD_RESOURCE_LENGTH (acpi_status) (0x001F | AE_CODE_AML) 154#define AE_AML_BAD_RESOURCE_LENGTH (acpi_status) (0x001F | AE_CODE_AML)
155#define AE_AML_ILLEGAL_ADDRESS (acpi_status) (0x0020 | AE_CODE_AML) 155#define AE_AML_ILLEGAL_ADDRESS (acpi_status) (0x0020 | AE_CODE_AML)
156#define AE_AML_INFINITE_LOOP (acpi_status) (0x0021 | AE_CODE_AML)
156 157
157#define AE_CODE_AML_MAX 0x0020 158#define AE_CODE_AML_MAX 0x0021
158 159
159/* 160/*
160 * Internal exceptions used for control 161 * Internal exceptions used for control
@@ -175,6 +176,8 @@
175 176
176#define AE_CODE_CTRL_MAX 0x000D 177#define AE_CODE_CTRL_MAX 0x000D
177 178
179/* Exception strings for acpi_format_exception */
180
178#ifdef DEFINE_ACPI_GLOBALS 181#ifdef DEFINE_ACPI_GLOBALS
179 182
180/* 183/*
@@ -267,6 +270,7 @@ char const *acpi_gbl_exception_names_aml[] = {
267 "AE_AML_CIRCULAR_REFERENCE", 270 "AE_AML_CIRCULAR_REFERENCE",
268 "AE_AML_BAD_RESOURCE_LENGTH", 271 "AE_AML_BAD_RESOURCE_LENGTH",
269 "AE_AML_ILLEGAL_ADDRESS", 272 "AE_AML_ILLEGAL_ADDRESS",
273 "AE_AML_INFINITE_LOOP"
270}; 274};
271 275
272char const *acpi_gbl_exception_names_ctrl[] = { 276char const *acpi_gbl_exception_names_ctrl[] = {
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index db8852d8bcf7..5c823d5ab783 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -45,9 +45,9 @@
45#define __ACOUTPUT_H__ 45#define __ACOUTPUT_H__
46 46
47/* 47/*
48 * Debug levels and component IDs. These are used to control the 48 * Debug levels and component IDs. These are used to control the
49 * granularity of the output of the DEBUG_PRINT macro -- on a per- 49 * granularity of the output of the ACPI_DEBUG_PRINT macro -- on a
50 * component basis and a per-exception-type basis. 50 * per-component basis and a per-exception-type basis.
51 */ 51 */
52 52
53/* Component IDs are used in the global "DebugLayer" */ 53/* Component IDs are used in the global "DebugLayer" */
@@ -69,8 +69,10 @@
69 69
70#define ACPI_COMPILER 0x00001000 70#define ACPI_COMPILER 0x00001000
71#define ACPI_TOOLS 0x00002000 71#define ACPI_TOOLS 0x00002000
72#define ACPI_EXAMPLE 0x00004000
73#define ACPI_DRIVER 0x00008000
72 74
73#define ACPI_ALL_COMPONENTS 0x00003FFF 75#define ACPI_ALL_COMPONENTS 0x0000FFFF
74#define ACPI_COMPONENT_DEFAULT (ACPI_ALL_COMPONENTS) 76#define ACPI_COMPONENT_DEFAULT (ACPI_ALL_COMPONENTS)
75 77
76/* Component IDs reserved for ACPI drivers */ 78/* Component IDs reserved for ACPI drivers */
@@ -78,7 +80,7 @@
78#define ACPI_ALL_DRIVERS 0xFFFF0000 80#define ACPI_ALL_DRIVERS 0xFFFF0000
79 81
80/* 82/*
81 * Raw debug output levels, do not use these in the DEBUG_PRINT macros 83 * Raw debug output levels, do not use these in the ACPI_DEBUG_PRINT macros
82 */ 84 */
83#define ACPI_LV_INIT 0x00000001 85#define ACPI_LV_INIT 0x00000001
84#define ACPI_LV_DEBUG_OBJECT 0x00000002 86#define ACPI_LV_DEBUG_OBJECT 0x00000002
@@ -176,4 +178,95 @@
176#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT) 178#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT)
177#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL) 179#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
178 180
181#if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES)
182/*
183 * Module name is included in both debug and non-debug versions primarily for
184 * error messages. The __FILE__ macro is not very useful for this, because it
185 * often includes the entire pathname to the module
186 */
187#define ACPI_MODULE_NAME(name) static const char ACPI_UNUSED_VAR _acpi_module_name[] = name;
188#else
189#define ACPI_MODULE_NAME(name)
190#endif
191
192/*
193 * Ascii error messages can be configured out
194 */
195#ifndef ACPI_NO_ERROR_MESSAGES
196#define AE_INFO _acpi_module_name, __LINE__
197
198/*
199 * Error reporting. Callers module and line number are inserted by AE_INFO,
200 * the plist contains a set of parens to allow variable-length lists.
201 * These macros are used for both the debug and non-debug versions of the code.
202 */
203#define ACPI_INFO(plist) acpi_info plist
204#define ACPI_WARNING(plist) acpi_warning plist
205#define ACPI_EXCEPTION(plist) acpi_exception plist
206#define ACPI_ERROR(plist) acpi_error plist
207
208#else
209
210/* No error messages */
211
212#define ACPI_INFO(plist)
213#define ACPI_WARNING(plist)
214#define ACPI_EXCEPTION(plist)
215#define ACPI_ERROR(plist)
216
217#endif /* ACPI_NO_ERROR_MESSAGES */
218
219/*
220 * Debug macros that are conditionally compiled
221 */
222#ifdef ACPI_DEBUG_OUTPUT
223
224/*
225 * If ACPI_GET_FUNCTION_NAME was not defined in the compiler-dependent header,
226 * define it now. This is the case where there the compiler does not support
227 * a __FUNCTION__ macro or equivalent.
228 */
229#ifndef ACPI_GET_FUNCTION_NAME
230#define ACPI_GET_FUNCTION_NAME _acpi_function_name
231
232/*
233 * The Name parameter should be the procedure name as a quoted string.
234 * The function name is also used by the function exit macros below.
235 * Note: (const char) is used to be compatible with the debug interfaces
236 * and macros such as __FUNCTION__.
237 */
238#define ACPI_FUNCTION_NAME(name) static const char _acpi_function_name[] = #name;
239
240#else
241/* Compiler supports __FUNCTION__ (or equivalent) -- Ignore this macro */
242
243#define ACPI_FUNCTION_NAME(name)
244#endif /* ACPI_GET_FUNCTION_NAME */
245
246/*
247 * Common parameters used for debug output functions:
248 * line number, function name, module(file) name, component ID
249 */
250#define ACPI_DEBUG_PARAMETERS __LINE__, ACPI_GET_FUNCTION_NAME, _acpi_module_name, _COMPONENT
251
252/*
253 * Master debug print macros
254 * Print message if and only if:
255 * 1) Debug print for the current component is enabled
256 * 2) Debug error level or trace level for the print statement is enabled
257 */
258#define ACPI_DEBUG_PRINT(plist) acpi_debug_print plist
259#define ACPI_DEBUG_PRINT_RAW(plist) acpi_debug_print_raw plist
260
261#else
262/*
263 * This is the non-debug case -- make everything go away,
264 * leaving no executable debug code!
265 */
266#define ACPI_FUNCTION_NAME(a)
267#define ACPI_DEBUG_PRINT(pl)
268#define ACPI_DEBUG_PRINT_RAW(pl)
269
270#endif /* ACPI_DEBUG_OUTPUT */
271
179#endif /* __ACOUTPUT_H__ */ 272#endif /* __ACOUTPUT_H__ */
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index c515ef6cc89e..472b7bf0c5d4 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Name: acpi.h - Master include file, Publics and external data. 3 * Name: acpi.h - Master public include file used to interface to ACPICA
4 * 4 *
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
@@ -45,25 +45,22 @@
45#define __ACPI_H__ 45#define __ACPI_H__
46 46
47/* 47/*
48 * Common includes for all ACPI driver files 48 * Public include files for use by code that will interface to ACPICA.
49 * We put them here because we don't want to duplicate them 49 *
50 * in the rest of the source code again and again. 50 * Information includes the ACPICA data types, names, exceptions, and
51 * external interface prototypes. Also included are the definitions for
52 * all ACPI tables (FADT, MADT, etc.)
53 *
54 * Note: The order of these include files is important.
51 */ 55 */
52#include "acnames.h" /* Global ACPI names and strings */ 56#include "platform/acenv.h" /* Environment-specific items */
53#include "acconfig.h" /* Configuration constants */ 57#include "acnames.h" /* Common ACPI names and strings */
54#include "platform/acenv.h" /* Target environment specific items */ 58#include "actypes.h" /* ACPICA data types and structures */
55#include "actypes.h" /* Fundamental common data types */ 59#include "acexcep.h" /* ACPICA exceptions */
56#include "acexcep.h" /* ACPI exception codes */
57#include "acmacros.h" /* C macros */
58#include "actbl.h" /* ACPI table definitions */ 60#include "actbl.h" /* ACPI table definitions */
59#include "aclocal.h" /* Internal data types */
60#include "acoutput.h" /* Error output and Debug macros */ 61#include "acoutput.h" /* Error output and Debug macros */
61#include "acpiosxf.h" /* Interfaces to the ACPI-to-OS layer */ 62#include "acrestyp.h" /* Resource Descriptor structs */
63#include "acpiosxf.h" /* OSL interfaces (ACPICA-to-OS) */
62#include "acpixf.h" /* ACPI core subsystem external interfaces */ 64#include "acpixf.h" /* ACPI core subsystem external interfaces */
63#include "acobject.h" /* ACPI internal object */
64#include "acstruct.h" /* Common structures */
65#include "acglobal.h" /* All global variables */
66#include "achware.h" /* Hardware defines and interfaces */
67#include "acutils.h" /* Utility interfaces */
68 65
69#endif /* __ACPI_H__ */ 66#endif /* __ACPI_H__ */
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index b91440ac0d16..a62720a7edc0 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -121,8 +121,11 @@ acpi_os_wait_semaphore(acpi_semaphore handle, u32 units, u16 timeout);
121acpi_status acpi_os_signal_semaphore(acpi_semaphore handle, u32 units); 121acpi_status acpi_os_signal_semaphore(acpi_semaphore handle, u32 units);
122 122
123/* 123/*
124 * Mutex primitives 124 * Mutex primitives. May be configured to use semaphores instead via
125 * ACPI_MUTEX_TYPE (see platform/acenv.h)
125 */ 126 */
127#if (ACPI_MUTEX_TYPE != ACPI_BINARY_SEMAPHORE)
128
126acpi_status acpi_os_create_mutex(acpi_mutex * out_handle); 129acpi_status acpi_os_create_mutex(acpi_mutex * out_handle);
127 130
128void acpi_os_delete_mutex(acpi_mutex handle); 131void acpi_os_delete_mutex(acpi_mutex handle);
@@ -130,13 +133,7 @@ void acpi_os_delete_mutex(acpi_mutex handle);
130acpi_status acpi_os_acquire_mutex(acpi_mutex handle, u16 timeout); 133acpi_status acpi_os_acquire_mutex(acpi_mutex handle, u16 timeout);
131 134
132void acpi_os_release_mutex(acpi_mutex handle); 135void acpi_os_release_mutex(acpi_mutex handle);
133 136#endif
134/* Temporary macros for Mutex* interfaces, map to existing semaphore xfaces */
135
136#define acpi_os_create_mutex(out_handle) acpi_os_create_semaphore (1, 1, out_handle)
137#define acpi_os_delete_mutex(handle) (void) acpi_os_delete_semaphore (handle)
138#define acpi_os_acquire_mutex(handle,time) acpi_os_wait_semaphore (handle, 1, time)
139#define acpi_os_release_mutex(handle) (void) acpi_os_signal_semaphore (handle, 1)
140 137
141/* 138/*
142 * Memory allocation and mapping 139 * Memory allocation and mapping
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 33bc0e3b1954..c8e8cf45830f 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -45,9 +45,32 @@
45#ifndef __ACXFACE_H__ 45#ifndef __ACXFACE_H__
46#define __ACXFACE_H__ 46#define __ACXFACE_H__
47 47
48/* Current ACPICA subsystem version in YYYYMMDD format */
49
50#define ACPI_CA_VERSION 0x20081204
51
48#include "actypes.h" 52#include "actypes.h"
49#include "actbl.h" 53#include "actbl.h"
50 54
55extern u8 acpi_gbl_permanent_mmap;
56
57/*
58 * Globals that are publically available, allowing for
59 * run time configuration
60 */
61extern u32 acpi_dbg_level;
62extern u32 acpi_dbg_layer;
63extern u8 acpi_gbl_enable_interpreter_slack;
64extern u8 acpi_gbl_all_methods_serialized;
65extern u8 acpi_gbl_create_osi_method;
66extern u8 acpi_gbl_leave_wake_gpes_disabled;
67extern acpi_name acpi_gbl_trace_method_name;
68extern u32 acpi_gbl_trace_flags;
69
70extern u32 acpi_current_gpe_count;
71extern struct acpi_table_fadt acpi_gbl_FADT;
72
73extern u32 acpi_rsdt_forced;
51/* 74/*
52 * Global interfaces 75 * Global interfaces
53 */ 76 */
@@ -79,11 +102,6 @@ const char *acpi_format_exception(acpi_status exception);
79 102
80acpi_status acpi_purge_cached_objects(void); 103acpi_status acpi_purge_cached_objects(void);
81 104
82#ifdef ACPI_FUTURE_USAGE
83acpi_status
84acpi_install_initialization_handler(acpi_init_handler handler, u32 function);
85#endif
86
87/* 105/*
88 * ACPI Memory management 106 * ACPI Memory management
89 */ 107 */
@@ -193,9 +211,12 @@ acpi_status acpi_get_id(acpi_handle object, acpi_owner_id * out_type);
193acpi_status acpi_get_parent(acpi_handle object, acpi_handle * out_handle); 211acpi_status acpi_get_parent(acpi_handle object, acpi_handle * out_handle);
194 212
195/* 213/*
196 * Event handler interfaces 214 * Handler interfaces
197 */ 215 */
198acpi_status 216acpi_status
217acpi_install_initialization_handler(acpi_init_handler handler, u32 function);
218
219acpi_status
199acpi_install_fixed_event_handler(u32 acpi_event, 220acpi_install_fixed_event_handler(u32 acpi_event,
200 acpi_event_handler handler, void *context); 221 acpi_event_handler handler, void *context);
201 222
@@ -227,6 +248,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
227 u32 gpe_number, 248 u32 gpe_number,
228 u32 type, acpi_event_handler address, void *context); 249 u32 type, acpi_event_handler address, void *context);
229 250
251acpi_status
252acpi_remove_gpe_handler(acpi_handle gpe_device,
253 u32 gpe_number, acpi_event_handler address);
254
230#ifdef ACPI_FUTURE_USAGE 255#ifdef ACPI_FUTURE_USAGE
231acpi_status acpi_install_exception_handler(acpi_exception_handler handler); 256acpi_status acpi_install_exception_handler(acpi_exception_handler handler);
232#endif 257#endif
@@ -238,10 +263,6 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle);
238 263
239acpi_status acpi_release_global_lock(u32 handle); 264acpi_status acpi_release_global_lock(u32 handle);
240 265
241acpi_status
242acpi_remove_gpe_handler(acpi_handle gpe_device,
243 u32 gpe_number, acpi_event_handler address);
244
245acpi_status acpi_enable_event(u32 event, u32 flags); 266acpi_status acpi_enable_event(u32 event, u32 flags);
246 267
247acpi_status acpi_disable_event(u32 event, u32 flags); 268acpi_status acpi_disable_event(u32 event, u32 flags);
@@ -250,6 +271,9 @@ acpi_status acpi_clear_event(u32 event);
250 271
251acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status); 272acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status);
252 273
274/*
275 * GPE Interfaces
276 */
253acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type); 277acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type);
254 278
255acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number); 279acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number);
@@ -263,6 +287,12 @@ acpi_get_gpe_status(acpi_handle gpe_device,
263 u32 gpe_number, 287 u32 gpe_number,
264 u32 flags, acpi_event_status * event_status); 288 u32 flags, acpi_event_status * event_status);
265 289
290acpi_status acpi_disable_all_gpes(void);
291
292acpi_status acpi_enable_all_runtime_gpes(void);
293
294acpi_status acpi_get_gpe_device(u32 gpe_index, acpi_handle *gpe_device);
295
266acpi_status 296acpi_status
267acpi_install_gpe_block(acpi_handle gpe_device, 297acpi_install_gpe_block(acpi_handle gpe_device,
268 struct acpi_generic_address *gpe_block_address, 298 struct acpi_generic_address *gpe_block_address,
@@ -313,6 +343,8 @@ acpi_resource_to_address64(struct acpi_resource *resource,
313/* 343/*
314 * Hardware (ACPI device) interfaces 344 * Hardware (ACPI device) interfaces
315 */ 345 */
346acpi_status acpi_reset(void);
347
316acpi_status acpi_get_register(u32 register_id, u32 * return_value); 348acpi_status acpi_get_register(u32 register_id, u32 * return_value);
317 349
318acpi_status acpi_get_register_unlocked(u32 register_id, u32 *return_value); 350acpi_status acpi_get_register_unlocked(u32 register_id, u32 *return_value);
@@ -320,12 +352,14 @@ acpi_status acpi_get_register_unlocked(u32 register_id, u32 *return_value);
320acpi_status acpi_set_register(u32 register_id, u32 value); 352acpi_status acpi_set_register(u32 register_id, u32 value);
321 353
322acpi_status 354acpi_status
323acpi_set_firmware_waking_vector(acpi_physical_address physical_address); 355acpi_set_firmware_waking_vector(u32 physical_address);
324 356
325#ifdef ACPI_FUTURE_USAGE
326acpi_status 357acpi_status
327acpi_get_firmware_waking_vector(acpi_physical_address * physical_address); 358acpi_set_firmware_waking_vector64(u64 physical_address);
328#endif 359
360acpi_status acpi_read(u32 *value, struct acpi_generic_address *reg);
361
362acpi_status acpi_write(u32 value, struct acpi_generic_address *reg);
329 363
330acpi_status 364acpi_status
331acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b); 365acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b);
@@ -340,4 +374,42 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state);
340 374
341acpi_status acpi_leave_sleep_state(u8 sleep_state); 375acpi_status acpi_leave_sleep_state(u8 sleep_state);
342 376
377/*
378 * Debug output
379 */
380void ACPI_INTERNAL_VAR_XFACE
381acpi_error(const char *module_name,
382 u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
383
384void ACPI_INTERNAL_VAR_XFACE
385acpi_exception(const char *module_name,
386 u32 line_number,
387 acpi_status status, const char *format, ...) ACPI_PRINTF_LIKE(4);
388
389void ACPI_INTERNAL_VAR_XFACE
390acpi_warning(const char *module_name,
391 u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
392
393void ACPI_INTERNAL_VAR_XFACE
394acpi_info(const char *module_name,
395 u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
396
397#ifdef ACPI_DEBUG_OUTPUT
398
399void ACPI_INTERNAL_VAR_XFACE
400acpi_debug_print(u32 requested_debug_level,
401 u32 line_number,
402 const char *function_name,
403 const char *module_name,
404 u32 component_id, const char *format, ...) ACPI_PRINTF_LIKE(6);
405
406void ACPI_INTERNAL_VAR_XFACE
407acpi_debug_print_raw(u32 requested_debug_level,
408 u32 line_number,
409 const char *function_name,
410 const char *module_name,
411 u32 component_id,
412 const char *format, ...) ACPI_PRINTF_LIKE(6);
413#endif
414
343#endif /* __ACXFACE_H__ */ 415#endif /* __ACXFACE_H__ */
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
new file mode 100644
index 000000000000..9ffe00feada6
--- /dev/null
+++ b/include/acpi/acrestyp.h
@@ -0,0 +1,405 @@
1/******************************************************************************
2 *
3 * Name: acrestyp.h - Defines, types, and structures for resource descriptors
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#ifndef __ACRESTYP_H__
45#define __ACRESTYP_H__
46
47/*
48 * Definitions for Resource Attributes
49 */
50typedef u16 acpi_rs_length; /* Resource Length field is fixed at 16 bits */
51typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (Length+3) = (64_k-1)+3 */
52
53/*
54 * Memory Attributes
55 */
56#define ACPI_READ_ONLY_MEMORY (u8) 0x00
57#define ACPI_READ_WRITE_MEMORY (u8) 0x01
58
59#define ACPI_NON_CACHEABLE_MEMORY (u8) 0x00
60#define ACPI_CACHABLE_MEMORY (u8) 0x01
61#define ACPI_WRITE_COMBINING_MEMORY (u8) 0x02
62#define ACPI_PREFETCHABLE_MEMORY (u8) 0x03
63
64/*
65 * IO Attributes
66 * The ISA IO ranges are: n000-n0_fFh, n400-n4_fFh, n800-n8_fFh, n_c00-n_cFFh.
67 * The non-ISA IO ranges are: n100-n3_fFh, n500-n7_fFh, n900-n_bFFh, n_cd0-n_fFFh.
68 */
69#define ACPI_NON_ISA_ONLY_RANGES (u8) 0x01
70#define ACPI_ISA_ONLY_RANGES (u8) 0x02
71#define ACPI_ENTIRE_RANGE (ACPI_NON_ISA_ONLY_RANGES | ACPI_ISA_ONLY_RANGES)
72
73/* Type of translation - 1=Sparse, 0=Dense */
74
75#define ACPI_SPARSE_TRANSLATION (u8) 0x01
76
77/*
78 * IO Port Descriptor Decode
79 */
80#define ACPI_DECODE_10 (u8) 0x00 /* 10-bit IO address decode */
81#define ACPI_DECODE_16 (u8) 0x01 /* 16-bit IO address decode */
82
83/*
84 * IRQ Attributes
85 */
86#define ACPI_LEVEL_SENSITIVE (u8) 0x00
87#define ACPI_EDGE_SENSITIVE (u8) 0x01
88
89#define ACPI_ACTIVE_HIGH (u8) 0x00
90#define ACPI_ACTIVE_LOW (u8) 0x01
91
92#define ACPI_EXCLUSIVE (u8) 0x00
93#define ACPI_SHARED (u8) 0x01
94
95/*
96 * DMA Attributes
97 */
98#define ACPI_COMPATIBILITY (u8) 0x00
99#define ACPI_TYPE_A (u8) 0x01
100#define ACPI_TYPE_B (u8) 0x02
101#define ACPI_TYPE_F (u8) 0x03
102
103#define ACPI_NOT_BUS_MASTER (u8) 0x00
104#define ACPI_BUS_MASTER (u8) 0x01
105
106#define ACPI_TRANSFER_8 (u8) 0x00
107#define ACPI_TRANSFER_8_16 (u8) 0x01
108#define ACPI_TRANSFER_16 (u8) 0x02
109
110/*
111 * Start Dependent Functions Priority definitions
112 */
113#define ACPI_GOOD_CONFIGURATION (u8) 0x00
114#define ACPI_ACCEPTABLE_CONFIGURATION (u8) 0x01
115#define ACPI_SUB_OPTIMAL_CONFIGURATION (u8) 0x02
116
117/*
118 * 16, 32 and 64-bit Address Descriptor resource types
119 */
120#define ACPI_MEMORY_RANGE (u8) 0x00
121#define ACPI_IO_RANGE (u8) 0x01
122#define ACPI_BUS_NUMBER_RANGE (u8) 0x02
123
124#define ACPI_ADDRESS_NOT_FIXED (u8) 0x00
125#define ACPI_ADDRESS_FIXED (u8) 0x01
126
127#define ACPI_POS_DECODE (u8) 0x00
128#define ACPI_SUB_DECODE (u8) 0x01
129
130#define ACPI_PRODUCER (u8) 0x00
131#define ACPI_CONSUMER (u8) 0x01
132
133/*
134 * If possible, pack the following structures to byte alignment
135 */
136#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
137#pragma pack(1)
138#endif
139
140/* UUID data structures for use in vendor-defined resource descriptors */
141
142struct acpi_uuid {
143 u8 data[ACPI_UUID_LENGTH];
144};
145
146struct acpi_vendor_uuid {
147 u8 subtype;
148 u8 data[ACPI_UUID_LENGTH];
149};
150
151/*
152 * Structures used to describe device resources
153 */
154struct acpi_resource_irq {
155 u8 descriptor_length;
156 u8 triggering;
157 u8 polarity;
158 u8 sharable;
159 u8 interrupt_count;
160 u8 interrupts[1];
161};
162
163struct acpi_resource_dma {
164 u8 type;
165 u8 bus_master;
166 u8 transfer;
167 u8 channel_count;
168 u8 channels[1];
169};
170
171struct acpi_resource_start_dependent {
172 u8 descriptor_length;
173 u8 compatibility_priority;
174 u8 performance_robustness;
175};
176
177/*
178 * The END_DEPENDENT_FUNCTIONS_RESOURCE struct is not
179 * needed because it has no fields
180 */
181
182struct acpi_resource_io {
183 u8 io_decode;
184 u8 alignment;
185 u8 address_length;
186 u16 minimum;
187 u16 maximum;
188};
189
190struct acpi_resource_fixed_io {
191 u16 address;
192 u8 address_length;
193};
194
195struct acpi_resource_vendor {
196 u16 byte_length;
197 u8 byte_data[1];
198};
199
200/* Vendor resource with UUID info (introduced in ACPI 3.0) */
201
202struct acpi_resource_vendor_typed {
203 u16 byte_length;
204 u8 uuid_subtype;
205 u8 uuid[ACPI_UUID_LENGTH];
206 u8 byte_data[1];
207};
208
209struct acpi_resource_end_tag {
210 u8 checksum;
211};
212
213struct acpi_resource_memory24 {
214 u8 write_protect;
215 u16 minimum;
216 u16 maximum;
217 u16 alignment;
218 u16 address_length;
219};
220
221struct acpi_resource_memory32 {
222 u8 write_protect;
223 u32 minimum;
224 u32 maximum;
225 u32 alignment;
226 u32 address_length;
227};
228
229struct acpi_resource_fixed_memory32 {
230 u8 write_protect;
231 u32 address;
232 u32 address_length;
233};
234
235struct acpi_memory_attribute {
236 u8 write_protect;
237 u8 caching;
238 u8 range_type;
239 u8 translation;
240};
241
242struct acpi_io_attribute {
243 u8 range_type;
244 u8 translation;
245 u8 translation_type;
246 u8 reserved1;
247};
248
249union acpi_resource_attribute {
250 struct acpi_memory_attribute mem;
251 struct acpi_io_attribute io;
252
253 /* Used for the *word_space macros */
254
255 u8 type_specific;
256};
257
258struct acpi_resource_source {
259 u8 index;
260 u16 string_length;
261 char *string_ptr;
262};
263
264/* Fields common to all address descriptors, 16/32/64 bit */
265
266#define ACPI_RESOURCE_ADDRESS_COMMON \
267 u8 resource_type; \
268 u8 producer_consumer; \
269 u8 decode; \
270 u8 min_address_fixed; \
271 u8 max_address_fixed; \
272 union acpi_resource_attribute info;
273
274struct acpi_resource_address {
275ACPI_RESOURCE_ADDRESS_COMMON};
276
277struct acpi_resource_address16 {
278 ACPI_RESOURCE_ADDRESS_COMMON u16 granularity;
279 u16 minimum;
280 u16 maximum;
281 u16 translation_offset;
282 u16 address_length;
283 struct acpi_resource_source resource_source;
284};
285
286struct acpi_resource_address32 {
287 ACPI_RESOURCE_ADDRESS_COMMON u32 granularity;
288 u32 minimum;
289 u32 maximum;
290 u32 translation_offset;
291 u32 address_length;
292 struct acpi_resource_source resource_source;
293};
294
295struct acpi_resource_address64 {
296 ACPI_RESOURCE_ADDRESS_COMMON u64 granularity;
297 u64 minimum;
298 u64 maximum;
299 u64 translation_offset;
300 u64 address_length;
301 struct acpi_resource_source resource_source;
302};
303
304struct acpi_resource_extended_address64 {
305 ACPI_RESOURCE_ADDRESS_COMMON u8 revision_iD;
306 u64 granularity;
307 u64 minimum;
308 u64 maximum;
309 u64 translation_offset;
310 u64 address_length;
311 u64 type_specific;
312};
313
314struct acpi_resource_extended_irq {
315 u8 producer_consumer;
316 u8 triggering;
317 u8 polarity;
318 u8 sharable;
319 u8 interrupt_count;
320 struct acpi_resource_source resource_source;
321 u32 interrupts[1];
322};
323
324struct acpi_resource_generic_register {
325 u8 space_id;
326 u8 bit_width;
327 u8 bit_offset;
328 u8 access_size;
329 u64 address;
330};
331
332/* ACPI_RESOURCE_TYPEs */
333
334#define ACPI_RESOURCE_TYPE_IRQ 0
335#define ACPI_RESOURCE_TYPE_DMA 1
336#define ACPI_RESOURCE_TYPE_START_DEPENDENT 2
337#define ACPI_RESOURCE_TYPE_END_DEPENDENT 3
338#define ACPI_RESOURCE_TYPE_IO 4
339#define ACPI_RESOURCE_TYPE_FIXED_IO 5
340#define ACPI_RESOURCE_TYPE_VENDOR 6
341#define ACPI_RESOURCE_TYPE_END_TAG 7
342#define ACPI_RESOURCE_TYPE_MEMORY24 8
343#define ACPI_RESOURCE_TYPE_MEMORY32 9
344#define ACPI_RESOURCE_TYPE_FIXED_MEMORY32 10
345#define ACPI_RESOURCE_TYPE_ADDRESS16 11
346#define ACPI_RESOURCE_TYPE_ADDRESS32 12
347#define ACPI_RESOURCE_TYPE_ADDRESS64 13
348#define ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 14 /* ACPI 3.0 */
349#define ACPI_RESOURCE_TYPE_EXTENDED_IRQ 15
350#define ACPI_RESOURCE_TYPE_GENERIC_REGISTER 16
351#define ACPI_RESOURCE_TYPE_MAX 16
352
353/* Master union for resource descriptors */
354
355union acpi_resource_data {
356 struct acpi_resource_irq irq;
357 struct acpi_resource_dma dma;
358 struct acpi_resource_start_dependent start_dpf;
359 struct acpi_resource_io io;
360 struct acpi_resource_fixed_io fixed_io;
361 struct acpi_resource_vendor vendor;
362 struct acpi_resource_vendor_typed vendor_typed;
363 struct acpi_resource_end_tag end_tag;
364 struct acpi_resource_memory24 memory24;
365 struct acpi_resource_memory32 memory32;
366 struct acpi_resource_fixed_memory32 fixed_memory32;
367 struct acpi_resource_address16 address16;
368 struct acpi_resource_address32 address32;
369 struct acpi_resource_address64 address64;
370 struct acpi_resource_extended_address64 ext_address64;
371 struct acpi_resource_extended_irq extended_irq;
372 struct acpi_resource_generic_register generic_reg;
373
374 /* Common fields */
375
376 struct acpi_resource_address address; /* Common 16/32/64 address fields */
377};
378
379/* Common resource header */
380
381struct acpi_resource {
382 u32 type;
383 u32 length;
384 union acpi_resource_data data;
385};
386
387/* restore default alignment */
388
389#pragma pack()
390
391#define ACPI_RS_SIZE_NO_DATA 8 /* Id + Length fields */
392#define ACPI_RS_SIZE_MIN (u32) ACPI_ROUND_UP_TO_NATIVE_WORD (12)
393#define ACPI_RS_SIZE(type) (u32) (ACPI_RS_SIZE_NO_DATA + sizeof (type))
394
395#define ACPI_NEXT_RESOURCE(res) (struct acpi_resource *)((u8 *) res + res->length)
396
397struct acpi_pci_routing_table {
398 u32 length;
399 u32 pin;
400 acpi_integer address; /* here for 64-bit alignment */
401 u32 source_index;
402 char source[4]; /* pad to 64 bits so sizeof() works in all cases */
403};
404
405#endif /* __ACRESTYP_H__ */
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 13a3d9ad92db..813e4b6c2c0d 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -288,6 +288,31 @@ enum acpi_prefered_pm_profiles {
288 288
289#define ACPI_FADT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_fadt, f) 289#define ACPI_FADT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_fadt, f)
290 290
291union acpi_name_union {
292 u32 integer;
293 char ascii[4];
294};
295
296/*
297 * Internal ACPI Table Descriptor. One per ACPI table
298 */
299struct acpi_table_desc {
300 acpi_physical_address address;
301 struct acpi_table_header *pointer;
302 u32 length; /* Length fixed at 32 bits */
303 union acpi_name_union signature;
304 acpi_owner_id owner_id;
305 u8 flags;
306};
307
308/* Flags for above */
309
310#define ACPI_TABLE_ORIGIN_UNKNOWN (0)
311#define ACPI_TABLE_ORIGIN_MAPPED (1)
312#define ACPI_TABLE_ORIGIN_ALLOCATED (2)
313#define ACPI_TABLE_ORIGIN_MASK (3)
314#define ACPI_TABLE_IS_LOADED (4)
315
291/* 316/*
292 * Get the remaining ACPI tables 317 * Get the remaining ACPI tables
293 */ 318 */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 63f5b4cf4de1..18963b968114 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -627,7 +627,7 @@ struct acpi_hest_aer_common {
627 u32 uncorrectable_error_mask; 627 u32 uncorrectable_error_mask;
628 u32 uncorrectable_error_severity; 628 u32 uncorrectable_error_severity;
629 u32 correctable_error_mask; 629 u32 correctable_error_mask;
630 u32 advanced_error_cababilities; 630 u32 advanced_error_capabilities;
631}; 631};
632 632
633/* Hardware Error Notification */ 633/* Hardware Error Notification */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 8222e8de0d1c..a20aab510173 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -204,11 +204,10 @@ typedef u32 acpi_physical_address;
204 204
205/******************************************************************************* 205/*******************************************************************************
206 * 206 *
207 * OS-dependent and compiler-dependent types 207 * OS-dependent types
208 * 208 *
209 * If the defaults below are not appropriate for the host system, they can 209 * If the defaults below are not appropriate for the host system, they can
210 * be defined in the compiler-specific or OS-specific header, and this will 210 * be defined in the OS-specific header, and this will take precedence.
211 * take precedence.
212 * 211 *
213 ******************************************************************************/ 212 ******************************************************************************/
214 213
@@ -218,12 +217,6 @@ typedef u32 acpi_physical_address;
218#define acpi_thread_id acpi_size 217#define acpi_thread_id acpi_size
219#endif 218#endif
220 219
221/* Object returned from acpi_os_create_lock */
222
223#ifndef acpi_spinlock
224#define acpi_spinlock void *
225#endif
226
227/* Flags for acpi_os_acquire_lock/acpi_os_release_lock */ 220/* Flags for acpi_os_acquire_lock/acpi_os_release_lock */
228 221
229#ifndef acpi_cpu_flags 222#ifndef acpi_cpu_flags
@@ -233,9 +226,51 @@ typedef u32 acpi_physical_address;
233/* Object returned from acpi_os_create_cache */ 226/* Object returned from acpi_os_create_cache */
234 227
235#ifndef acpi_cache_t 228#ifndef acpi_cache_t
229#ifdef ACPI_USE_LOCAL_CACHE
236#define acpi_cache_t struct acpi_memory_list 230#define acpi_cache_t struct acpi_memory_list
231#else
232#define acpi_cache_t void *
233#endif
234#endif
235
236/*
237 * Synchronization objects - Mutexes, Semaphores, and spin_locks
238 */
239#if (ACPI_MUTEX_TYPE == ACPI_BINARY_SEMAPHORE)
240/*
241 * These macros are used if the host OS does not support a mutex object.
242 * Map the OSL Mutex interfaces to binary semaphores.
243 */
244#define acpi_mutex acpi_semaphore
245#define acpi_os_create_mutex(out_handle) acpi_os_create_semaphore (1, 1, out_handle)
246#define acpi_os_delete_mutex(handle) (void) acpi_os_delete_semaphore (handle)
247#define acpi_os_acquire_mutex(handle,time) acpi_os_wait_semaphore (handle, 1, time)
248#define acpi_os_release_mutex(handle) (void) acpi_os_signal_semaphore (handle, 1)
249#endif
250
251/* Configurable types for synchronization objects */
252
253#ifndef acpi_spinlock
254#define acpi_spinlock void *
255#endif
256
257#ifndef acpi_semaphore
258#define acpi_semaphore void *
259#endif
260
261#ifndef acpi_mutex
262#define acpi_mutex void *
237#endif 263#endif
238 264
265/*******************************************************************************
266 *
267 * Compiler-dependent types
268 *
269 * If the defaults below are not appropriate for the host compiler, they can
270 * be defined in the compiler-specific header, and this will take precedence.
271 *
272 ******************************************************************************/
273
239/* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */ 274/* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */
240 275
241#ifndef acpi_uintptr_t 276#ifndef acpi_uintptr_t
@@ -268,6 +303,43 @@ typedef u32 acpi_physical_address;
268#define ACPI_EXPORT_SYMBOL(symbol) 303#define ACPI_EXPORT_SYMBOL(symbol)
269#endif 304#endif
270 305
306/******************************************************************************
307 *
308 * ACPI Specification constants (Do not change unless the specification changes)
309 *
310 *****************************************************************************/
311
312/* Number of distinct FADT-based GPE register blocks (GPE0 and GPE1) */
313
314#define ACPI_MAX_GPE_BLOCKS 2
315
316/* Default ACPI register widths */
317
318#define ACPI_GPE_REGISTER_WIDTH 8
319#define ACPI_PM1_REGISTER_WIDTH 16
320#define ACPI_PM2_REGISTER_WIDTH 8
321#define ACPI_PM_TIMER_WIDTH 32
322
323/* Names within the namespace are 4 bytes long */
324
325#define ACPI_NAME_SIZE 4
326#define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */
327#define ACPI_PATH_SEPARATOR '.'
328
329/* Sizes for ACPI table headers */
330
331#define ACPI_OEM_ID_SIZE 6
332#define ACPI_OEM_TABLE_ID_SIZE 8
333
334/* ACPI/PNP hardware IDs */
335
336#define PCI_ROOT_HID_STRING "PNP0A03"
337#define PCI_EXPRESS_ROOT_HID_STRING "PNP0A08"
338
339/* PM Timer ticks per second (HZ) */
340
341#define PM_TIMER_FREQUENCY 3579545
342
271/******************************************************************************* 343/*******************************************************************************
272 * 344 *
273 * Independent types 345 * Independent types
@@ -291,13 +363,18 @@ typedef u32 acpi_physical_address;
291#endif 363#endif
292 364
293/* 365/*
294 * Mescellaneous types 366 * Miscellaneous types
295 */ 367 */
296typedef u32 acpi_status; /* All ACPI Exceptions */ 368typedef u32 acpi_status; /* All ACPI Exceptions */
297typedef u32 acpi_name; /* 4-byte ACPI name */ 369typedef u32 acpi_name; /* 4-byte ACPI name */
298typedef char *acpi_string; /* Null terminated ASCII string */ 370typedef char *acpi_string; /* Null terminated ASCII string */
299typedef void *acpi_handle; /* Actually a ptr to a NS Node */ 371typedef void *acpi_handle; /* Actually a ptr to a NS Node */
300 372
373/* Owner IDs are used to track namespace nodes for selective deletion */
374
375typedef u8 acpi_owner_id;
376#define ACPI_OWNER_ID_MAX 0xFF
377
301struct uint64_struct { 378struct uint64_struct {
302 u32 lo; 379 u32 lo;
303 u32 hi; 380 u32 hi;
@@ -313,13 +390,8 @@ struct uint32_struct {
313 u32 hi; 390 u32 hi;
314}; 391};
315 392
316/* Synchronization objects */
317
318#define acpi_mutex void *
319#define acpi_semaphore void *
320
321/* 393/*
322 * Acpi integer width. In ACPI version 1, integers are 32 bits. In ACPI 394 * Acpi integer width. In ACPI version 1, integers are 32 bits. In ACPI
323 * version 2, integers are 64 bits. Note that this pertains to the ACPI integer 395 * version 2, integers are 64 bits. Note that this pertains to the ACPI integer
324 * type only, not other integers used in the implementation of the ACPI CA 396 * type only, not other integers used in the implementation of the ACPI CA
325 * subsystem. 397 * subsystem.
@@ -338,10 +410,75 @@ typedef unsigned long long acpi_integer;
338#define ACPI_MAX16_DECIMAL_DIGITS 5 410#define ACPI_MAX16_DECIMAL_DIGITS 5
339#define ACPI_MAX8_DECIMAL_DIGITS 3 411#define ACPI_MAX8_DECIMAL_DIGITS 3
340 412
413/* PM Timer ticks per second (HZ) */
414
415#define PM_TIMER_FREQUENCY 3579545
416
341/* 417/*
342 * Constants with special meanings 418 * Constants with special meanings
343 */ 419 */
344#define ACPI_ROOT_OBJECT ACPI_ADD_PTR (acpi_handle, NULL, ACPI_MAX_PTR) 420#define ACPI_ROOT_OBJECT ACPI_ADD_PTR (acpi_handle, NULL, ACPI_MAX_PTR)
421#define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */
422#define ACPI_DO_NOT_WAIT 0
423
424/*******************************************************************************
425 *
426 * Commonly used macros
427 *
428 ******************************************************************************/
429
430/* Data manipulation */
431
432#define ACPI_LOWORD(l) ((u16)(u32)(l))
433#define ACPI_HIWORD(l) ((u16)((((u32)(l)) >> 16) & 0xFFFF))
434#define ACPI_LOBYTE(l) ((u8)(u16)(l))
435#define ACPI_HIBYTE(l) ((u8)((((u16)(l)) >> 8) & 0xFF))
436
437/* Full 64-bit integer must be available on both 32-bit and 64-bit platforms */
438
439struct acpi_integer_overlay {
440 u32 lo_dword;
441 u32 hi_dword;
442};
443
444#define ACPI_LODWORD(integer) (ACPI_CAST_PTR (struct acpi_integer_overlay, &integer)->lo_dword)
445#define ACPI_HIDWORD(integer) (ACPI_CAST_PTR (struct acpi_integer_overlay, &integer)->hi_dword)
446
447#define ACPI_SET_BIT(target,bit) ((target) |= (bit))
448#define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit))
449#define ACPI_MIN(a,b) (((a)<(b))?(a):(b))
450#define ACPI_MAX(a,b) (((a)>(b))?(a):(b))
451
452/* Size calculation */
453
454#define ACPI_ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0]))
455
456/* Pointer manipulation */
457
458#define ACPI_CAST_PTR(t, p) ((t *) (acpi_uintptr_t) (p))
459#define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p))
460#define ACPI_ADD_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b)))
461#define ACPI_PTR_DIFF(a, b) (acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b)))
462
463/* Pointer/Integer type conversions */
464
465#define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void, (void *) NULL,(acpi_size) i)
466#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) NULL)
467#define ACPI_OFFSET(d, f) (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f), (void *) NULL)
468#define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i)
469#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i)
470
471#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
472#define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b)))
473#else
474#define ACPI_COMPARE_NAME(a,b) (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE))
475#endif
476
477/*******************************************************************************
478 *
479 * Miscellaneous constants
480 *
481 ******************************************************************************/
345 482
346/* 483/*
347 * Initialization sequence 484 * Initialization sequence
@@ -414,7 +551,7 @@ typedef unsigned long long acpi_integer;
414#define ACPI_NOTIFY_MAX 0x0B 551#define ACPI_NOTIFY_MAX 0x0B
415 552
416/* 553/*
417 * Types associated with ACPI names and objects. The first group of 554 * Types associated with ACPI names and objects. The first group of
418 * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition 555 * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition
419 * of the ACPI object_type() operator (See the ACPI Spec). Therefore, 556 * of the ACPI object_type() operator (See the ACPI Spec). Therefore,
420 * only add to the first group if the spec changes. 557 * only add to the first group if the spec changes.
@@ -732,6 +869,15 @@ struct acpi_buffer {
732#define ACPI_NAME_TYPE_MAX 1 869#define ACPI_NAME_TYPE_MAX 1
733 870
734/* 871/*
872 * Predefined Namespace items
873 */
874struct acpi_predefined_names {
875 char *name;
876 u8 type;
877 char *val;
878};
879
880/*
735 * Structure and flags for acpi_get_system_info 881 * Structure and flags for acpi_get_system_info
736 */ 882 */
737#define ACPI_SYS_MODE_UNKNOWN 0x0000 883#define ACPI_SYS_MODE_UNKNOWN 0x0000
@@ -787,7 +933,7 @@ acpi_status(*acpi_exception_handler) (acpi_status aml_status,
787 u16 opcode, 933 u16 opcode,
788 u32 aml_offset, void *context); 934 u32 aml_offset, void *context);
789 935
790/* Table Event handler (Load, load_table etc) and types */ 936/* Table Event handler (Load, load_table, etc.) and types */
791 937
792typedef 938typedef
793acpi_status(*acpi_tbl_handler) (u32 event, void *table, void *context); 939acpi_status(*acpi_tbl_handler) (u32 event, void *table, void *context);
@@ -823,6 +969,12 @@ acpi_status(*acpi_walk_callback) (acpi_handle obj_handle,
823#define ACPI_INTERRUPT_NOT_HANDLED 0x00 969#define ACPI_INTERRUPT_NOT_HANDLED 0x00
824#define ACPI_INTERRUPT_HANDLED 0x01 970#define ACPI_INTERRUPT_HANDLED 0x01
825 971
972/* Length of _HID, _UID, _CID, and UUID values */
973
974#define ACPI_DEVICE_ID_LENGTH 0x09
975#define ACPI_MAX_CID_LENGTH 48
976#define ACPI_UUID_LENGTH 16
977
826/* Common string version of device HIDs and UIDs */ 978/* Common string version of device HIDs and UIDs */
827 979
828struct acpica_device_id { 980struct acpica_device_id {
@@ -900,357 +1052,28 @@ struct acpi_mem_space_context {
900}; 1052};
901 1053
902/* 1054/*
903 * Definitions for Resource Attributes 1055 * struct acpi_memory_list is used only if the ACPICA local cache is enabled
904 */
905typedef u16 acpi_rs_length; /* Resource Length field is fixed at 16 bits */
906typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (Length+3) = (64_k-1)+3 */
907
908/*
909 * Memory Attributes
910 */
911#define ACPI_READ_ONLY_MEMORY (u8) 0x00
912#define ACPI_READ_WRITE_MEMORY (u8) 0x01
913
914#define ACPI_NON_CACHEABLE_MEMORY (u8) 0x00
915#define ACPI_CACHABLE_MEMORY (u8) 0x01
916#define ACPI_WRITE_COMBINING_MEMORY (u8) 0x02
917#define ACPI_PREFETCHABLE_MEMORY (u8) 0x03
918
919/*
920 * IO Attributes
921 * The ISA IO ranges are: n000-n0_fFh, n400-n4_fFh, n800-n8_fFh, n_c00-n_cFFh.
922 * The non-ISA IO ranges are: n100-n3_fFh, n500-n7_fFh, n900-n_bFFh, n_cd0-n_fFFh.
923 */ 1056 */
924#define ACPI_NON_ISA_ONLY_RANGES (u8) 0x01 1057struct acpi_memory_list {
925#define ACPI_ISA_ONLY_RANGES (u8) 0x02 1058 char *list_name;
926#define ACPI_ENTIRE_RANGE (ACPI_NON_ISA_ONLY_RANGES | ACPI_ISA_ONLY_RANGES) 1059 void *list_head;
927 1060 u16 object_size;
928/* Type of translation - 1=Sparse, 0=Dense */ 1061 u16 max_depth;
929 1062 u16 current_depth;
930#define ACPI_SPARSE_TRANSLATION (u8) 0x01 1063 u16 link_offset;
931 1064
932/* 1065#ifdef ACPI_DBG_TRACK_ALLOCATIONS
933 * IO Port Descriptor Decode 1066
934 */ 1067 /* Statistics for debug memory tracking only */
935#define ACPI_DECODE_10 (u8) 0x00 /* 10-bit IO address decode */ 1068
936#define ACPI_DECODE_16 (u8) 0x01 /* 16-bit IO address decode */ 1069 u32 total_allocated;
937 1070 u32 total_freed;
938/* 1071 u32 max_occupied;
939 * IRQ Attributes 1072 u32 total_size;
940 */ 1073 u32 current_total_size;
941#define ACPI_LEVEL_SENSITIVE (u8) 0x00 1074 u32 requests;
942#define ACPI_EDGE_SENSITIVE (u8) 0x01 1075 u32 hits;
943
944#define ACPI_ACTIVE_HIGH (u8) 0x00
945#define ACPI_ACTIVE_LOW (u8) 0x01
946
947#define ACPI_EXCLUSIVE (u8) 0x00
948#define ACPI_SHARED (u8) 0x01
949
950/*
951 * DMA Attributes
952 */
953#define ACPI_COMPATIBILITY (u8) 0x00
954#define ACPI_TYPE_A (u8) 0x01
955#define ACPI_TYPE_B (u8) 0x02
956#define ACPI_TYPE_F (u8) 0x03
957
958#define ACPI_NOT_BUS_MASTER (u8) 0x00
959#define ACPI_BUS_MASTER (u8) 0x01
960
961#define ACPI_TRANSFER_8 (u8) 0x00
962#define ACPI_TRANSFER_8_16 (u8) 0x01
963#define ACPI_TRANSFER_16 (u8) 0x02
964
965/*
966 * Start Dependent Functions Priority definitions
967 */
968#define ACPI_GOOD_CONFIGURATION (u8) 0x00
969#define ACPI_ACCEPTABLE_CONFIGURATION (u8) 0x01
970#define ACPI_SUB_OPTIMAL_CONFIGURATION (u8) 0x02
971
972/*
973 * 16, 32 and 64-bit Address Descriptor resource types
974 */
975#define ACPI_MEMORY_RANGE (u8) 0x00
976#define ACPI_IO_RANGE (u8) 0x01
977#define ACPI_BUS_NUMBER_RANGE (u8) 0x02
978
979#define ACPI_ADDRESS_NOT_FIXED (u8) 0x00
980#define ACPI_ADDRESS_FIXED (u8) 0x01
981
982#define ACPI_POS_DECODE (u8) 0x00
983#define ACPI_SUB_DECODE (u8) 0x01
984
985#define ACPI_PRODUCER (u8) 0x00
986#define ACPI_CONSUMER (u8) 0x01
987
988/*
989 * If possible, pack the following structures to byte alignment
990 */
991#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
992#pragma pack(1)
993#endif 1076#endif
994
995/* UUID data structures for use in vendor-defined resource descriptors */
996
997struct acpi_uuid {
998 u8 data[ACPI_UUID_LENGTH];
999};
1000
1001struct acpi_vendor_uuid {
1002 u8 subtype;
1003 u8 data[ACPI_UUID_LENGTH];
1004};
1005
1006/*
1007 * Structures used to describe device resources
1008 */
1009struct acpi_resource_irq {
1010 u8 descriptor_length;
1011 u8 triggering;
1012 u8 polarity;
1013 u8 sharable;
1014 u8 interrupt_count;
1015 u8 interrupts[1];
1016};
1017
1018struct acpi_resource_dma {
1019 u8 type;
1020 u8 bus_master;
1021 u8 transfer;
1022 u8 channel_count;
1023 u8 channels[1];
1024};
1025
1026struct acpi_resource_start_dependent {
1027 u8 descriptor_length;
1028 u8 compatibility_priority;
1029 u8 performance_robustness;
1030};
1031
1032/*
1033 * END_DEPENDENT_FUNCTIONS_RESOURCE struct is not
1034 * needed because it has no fields
1035 */
1036
1037struct acpi_resource_io {
1038 u8 io_decode;
1039 u8 alignment;
1040 u8 address_length;
1041 u16 minimum;
1042 u16 maximum;
1043};
1044
1045struct acpi_resource_fixed_io {
1046 u16 address;
1047 u8 address_length;
1048};
1049
1050struct acpi_resource_vendor {
1051 u16 byte_length;
1052 u8 byte_data[1];
1053};
1054
1055/* Vendor resource with UUID info (introduced in ACPI 3.0) */
1056
1057struct acpi_resource_vendor_typed {
1058 u16 byte_length;
1059 u8 uuid_subtype;
1060 u8 uuid[ACPI_UUID_LENGTH];
1061 u8 byte_data[1];
1062};
1063
1064struct acpi_resource_end_tag {
1065 u8 checksum;
1066};
1067
1068struct acpi_resource_memory24 {
1069 u8 write_protect;
1070 u16 minimum;
1071 u16 maximum;
1072 u16 alignment;
1073 u16 address_length;
1074};
1075
1076struct acpi_resource_memory32 {
1077 u8 write_protect;
1078 u32 minimum;
1079 u32 maximum;
1080 u32 alignment;
1081 u32 address_length;
1082};
1083
1084struct acpi_resource_fixed_memory32 {
1085 u8 write_protect;
1086 u32 address;
1087 u32 address_length;
1088};
1089
1090struct acpi_memory_attribute {
1091 u8 write_protect;
1092 u8 caching;
1093 u8 range_type;
1094 u8 translation;
1095};
1096
1097struct acpi_io_attribute {
1098 u8 range_type;
1099 u8 translation;
1100 u8 translation_type;
1101 u8 reserved1;
1102};
1103
1104union acpi_resource_attribute {
1105 struct acpi_memory_attribute mem;
1106 struct acpi_io_attribute io;
1107
1108 /* Used for the *word_space macros */
1109
1110 u8 type_specific;
1111};
1112
1113struct acpi_resource_source {
1114 u8 index;
1115 u16 string_length;
1116 char *string_ptr;
1117};
1118
1119/* Fields common to all address descriptors, 16/32/64 bit */
1120
1121#define ACPI_RESOURCE_ADDRESS_COMMON \
1122 u8 resource_type; \
1123 u8 producer_consumer; \
1124 u8 decode; \
1125 u8 min_address_fixed; \
1126 u8 max_address_fixed; \
1127 union acpi_resource_attribute info;
1128
1129struct acpi_resource_address {
1130ACPI_RESOURCE_ADDRESS_COMMON};
1131
1132struct acpi_resource_address16 {
1133 ACPI_RESOURCE_ADDRESS_COMMON u16 granularity;
1134 u16 minimum;
1135 u16 maximum;
1136 u16 translation_offset;
1137 u16 address_length;
1138 struct acpi_resource_source resource_source;
1139};
1140
1141struct acpi_resource_address32 {
1142 ACPI_RESOURCE_ADDRESS_COMMON u32 granularity;
1143 u32 minimum;
1144 u32 maximum;
1145 u32 translation_offset;
1146 u32 address_length;
1147 struct acpi_resource_source resource_source;
1148};
1149
1150struct acpi_resource_address64 {
1151 ACPI_RESOURCE_ADDRESS_COMMON u64 granularity;
1152 u64 minimum;
1153 u64 maximum;
1154 u64 translation_offset;
1155 u64 address_length;
1156 struct acpi_resource_source resource_source;
1157};
1158
1159struct acpi_resource_extended_address64 {
1160 ACPI_RESOURCE_ADDRESS_COMMON u8 revision_iD;
1161 u64 granularity;
1162 u64 minimum;
1163 u64 maximum;
1164 u64 translation_offset;
1165 u64 address_length;
1166 u64 type_specific;
1167};
1168
1169struct acpi_resource_extended_irq {
1170 u8 producer_consumer;
1171 u8 triggering;
1172 u8 polarity;
1173 u8 sharable;
1174 u8 interrupt_count;
1175 struct acpi_resource_source resource_source;
1176 u32 interrupts[1];
1177};
1178
1179struct acpi_resource_generic_register {
1180 u8 space_id;
1181 u8 bit_width;
1182 u8 bit_offset;
1183 u8 access_size;
1184 u64 address;
1185};
1186
1187/* ACPI_RESOURCE_TYPEs */
1188
1189#define ACPI_RESOURCE_TYPE_IRQ 0
1190#define ACPI_RESOURCE_TYPE_DMA 1
1191#define ACPI_RESOURCE_TYPE_START_DEPENDENT 2
1192#define ACPI_RESOURCE_TYPE_END_DEPENDENT 3
1193#define ACPI_RESOURCE_TYPE_IO 4
1194#define ACPI_RESOURCE_TYPE_FIXED_IO 5
1195#define ACPI_RESOURCE_TYPE_VENDOR 6
1196#define ACPI_RESOURCE_TYPE_END_TAG 7
1197#define ACPI_RESOURCE_TYPE_MEMORY24 8
1198#define ACPI_RESOURCE_TYPE_MEMORY32 9
1199#define ACPI_RESOURCE_TYPE_FIXED_MEMORY32 10
1200#define ACPI_RESOURCE_TYPE_ADDRESS16 11
1201#define ACPI_RESOURCE_TYPE_ADDRESS32 12
1202#define ACPI_RESOURCE_TYPE_ADDRESS64 13
1203#define ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 14 /* ACPI 3.0 */
1204#define ACPI_RESOURCE_TYPE_EXTENDED_IRQ 15
1205#define ACPI_RESOURCE_TYPE_GENERIC_REGISTER 16
1206#define ACPI_RESOURCE_TYPE_MAX 16
1207
1208union acpi_resource_data {
1209 struct acpi_resource_irq irq;
1210 struct acpi_resource_dma dma;
1211 struct acpi_resource_start_dependent start_dpf;
1212 struct acpi_resource_io io;
1213 struct acpi_resource_fixed_io fixed_io;
1214 struct acpi_resource_vendor vendor;
1215 struct acpi_resource_vendor_typed vendor_typed;
1216 struct acpi_resource_end_tag end_tag;
1217 struct acpi_resource_memory24 memory24;
1218 struct acpi_resource_memory32 memory32;
1219 struct acpi_resource_fixed_memory32 fixed_memory32;
1220 struct acpi_resource_address16 address16;
1221 struct acpi_resource_address32 address32;
1222 struct acpi_resource_address64 address64;
1223 struct acpi_resource_extended_address64 ext_address64;
1224 struct acpi_resource_extended_irq extended_irq;
1225 struct acpi_resource_generic_register generic_reg;
1226
1227 /* Common fields */
1228
1229 struct acpi_resource_address address; /* Common 16/32/64 address fields */
1230};
1231
1232struct acpi_resource {
1233 u32 type;
1234 u32 length;
1235 union acpi_resource_data data;
1236};
1237
1238/* restore default alignment */
1239
1240#pragma pack()
1241
1242#define ACPI_RS_SIZE_NO_DATA 8 /* Id + Length fields */
1243#define ACPI_RS_SIZE_MIN (u32) ACPI_ROUND_UP_TO_NATIVE_WORD (12)
1244#define ACPI_RS_SIZE(type) (u32) (ACPI_RS_SIZE_NO_DATA + sizeof (type))
1245
1246#define ACPI_NEXT_RESOURCE(res) (struct acpi_resource *)((u8 *) res + res->length)
1247
1248struct acpi_pci_routing_table {
1249 u32 length;
1250 u32 pin;
1251 acpi_integer address; /* here for 64-bit alignment */
1252 u32 source_index;
1253 char source[4]; /* pad to 64 bits so sizeof() works in all cases */
1254}; 1077};
1255 1078
1256#endif /* __ACTYPES_H__ */ 1079#endif /* __ACTYPES_H__ */
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index fcd2572e428c..e62f10d9a7d8 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -44,14 +44,26 @@
44#ifndef __ACENV_H__ 44#ifndef __ACENV_H__
45#define __ACENV_H__ 45#define __ACENV_H__
46 46
47/* 47/* Types for ACPI_MUTEX_TYPE */
48
49#define ACPI_BINARY_SEMAPHORE 0
50#define ACPI_OSL_MUTEX 1
51
52/* Types for DEBUGGER_THREADING */
53
54#define DEBUGGER_SINGLE_THREADED 0
55#define DEBUGGER_MULTI_THREADED 1
56
57/******************************************************************************
58 *
48 * Configuration for ACPI tools and utilities 59 * Configuration for ACPI tools and utilities
49 */ 60 *
61 *****************************************************************************/
50 62
51#ifdef ACPI_LIBRARY 63#ifdef ACPI_LIBRARY
52/* 64/*
53 * Note: The non-debug version of the acpi_library does not contain any 65 * Note: The non-debug version of the acpi_library does not contain any
54 * debug support, for minimimal size. The debug version uses ACPI_FULL_DEBUG 66 * debug support, for minimal size. The debug version uses ACPI_FULL_DEBUG
55 */ 67 */
56#define ACPI_USE_LOCAL_CACHE 68#define ACPI_USE_LOCAL_CACHE
57#endif 69#endif
@@ -75,17 +87,6 @@
75#define ACPI_DBG_TRACK_ALLOCATIONS 87#define ACPI_DBG_TRACK_ALLOCATIONS
76#endif 88#endif
77 89
78#ifdef ACPI_DASM_APP
79#ifndef MSDOS
80#define ACPI_DEBUG_OUTPUT
81#endif
82#define ACPI_APPLICATION
83#define ACPI_DISASSEMBLER
84#define ACPI_NO_METHOD_EXECUTION
85#define ACPI_LARGE_NAMESPACE_NODE
86#define ACPI_DATA_TABLE_DISASSEMBLY
87#endif
88
89#ifdef ACPI_APPLICATION 90#ifdef ACPI_APPLICATION
90#define ACPI_USE_SYSTEM_CLIBRARY 91#define ACPI_USE_SYSTEM_CLIBRARY
91#define ACPI_USE_LOCAL_CACHE 92#define ACPI_USE_LOCAL_CACHE
@@ -179,6 +180,19 @@
179 180
180/*! [End] no source code translation !*/ 181/*! [End] no source code translation !*/
181 182
183/******************************************************************************
184 *
185 * Miscellaneous configuration
186 *
187 *****************************************************************************/
188
189/*
190 * Are mutexes supported by the host? default is no, use binary semaphores.
191 */
192#ifndef ACPI_MUTEX_TYPE
193#define ACPI_MUTEX_TYPE ACPI_BINARY_SEMAPHORE
194#endif
195
182/* 196/*
183 * Debugger threading model 197 * Debugger threading model
184 * Use single threaded if the entire subsystem is contained in an application 198 * Use single threaded if the entire subsystem is contained in an application
@@ -187,9 +201,6 @@
187 * By default the model is single threaded if ACPI_APPLICATION is set, 201 * By default the model is single threaded if ACPI_APPLICATION is set,
188 * multi-threaded if ACPI_APPLICATION is not set. 202 * multi-threaded if ACPI_APPLICATION is not set.
189 */ 203 */
190#define DEBUGGER_SINGLE_THREADED 0
191#define DEBUGGER_MULTI_THREADED 1
192
193#ifndef DEBUGGER_THREADING 204#ifndef DEBUGGER_THREADING
194#ifdef ACPI_APPLICATION 205#ifdef ACPI_APPLICATION
195#define DEBUGGER_THREADING DEBUGGER_SINGLE_THREADED 206#define DEBUGGER_THREADING DEBUGGER_SINGLE_THREADED
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 0515e754449d..6d49b2a498c4 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -46,6 +46,7 @@
46 46
47#define ACPI_USE_SYSTEM_CLIBRARY 47#define ACPI_USE_SYSTEM_CLIBRARY
48#define ACPI_USE_DO_WHILE_0 48#define ACPI_USE_DO_WHILE_0
49#define ACPI_MUTEX_TYPE ACPI_BINARY_SEMAPHORE
49 50
50#ifdef __KERNEL__ 51#ifdef __KERNEL__
51 52
@@ -70,9 +71,6 @@
70#define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol); 71#define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol);
71#define strtoul simple_strtoul 72#define strtoul simple_strtoul
72 73
73/* Full namespace pathname length limit - arbitrary */
74#define ACPI_PATHNAME_MAX 256
75
76#else /* !__KERNEL__ */ 74#else /* !__KERNEL__ */
77 75
78#include <stdarg.h> 76#include <stdarg.h>
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index fba8051fb297..6fce2fc2d124 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -131,22 +131,6 @@ extern int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity);
131 */ 131 */
132void acpi_unregister_gsi (u32 gsi); 132void acpi_unregister_gsi (u32 gsi);
133 133
134struct acpi_prt_entry {
135 struct list_head node;
136 struct acpi_pci_id id;
137 u8 pin;
138 struct {
139 acpi_handle handle;
140 u32 index;
141 } link;
142 u32 irq;
143};
144
145struct acpi_prt_list {
146 int count;
147 struct list_head entries;
148};
149
150struct pci_dev; 134struct pci_dev;
151 135
152int acpi_pci_irq_enable (struct pci_dev *dev); 136int acpi_pci_irq_enable (struct pci_dev *dev);
@@ -270,6 +254,7 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n,
270#ifdef CONFIG_PM_SLEEP 254#ifdef CONFIG_PM_SLEEP
271void __init acpi_no_s4_hw_signature(void); 255void __init acpi_no_s4_hw_signature(void);
272void __init acpi_old_suspend_ordering(void); 256void __init acpi_old_suspend_ordering(void);
257void __init acpi_s4_no_nvs(void);
273#endif /* CONFIG_PM_SLEEP */ 258#endif /* CONFIG_PM_SLEEP */
274#else /* CONFIG_ACPI */ 259#else /* CONFIG_ACPI */
275 260
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 0f50d4cc4360..45f6297821bd 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -59,9 +59,7 @@ enum async_tx_flags {
59}; 59};
60 60
61#ifdef CONFIG_DMA_ENGINE 61#ifdef CONFIG_DMA_ENGINE
62void async_tx_issue_pending_all(void); 62#define async_tx_issue_pending_all dma_issue_pending_all
63enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
64void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx);
65#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL 63#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
66#include <asm/async_tx.h> 64#include <asm/async_tx.h>
67#else 65#else
@@ -77,19 +75,6 @@ static inline void async_tx_issue_pending_all(void)
77 do { } while (0); 75 do { } while (0);
78} 76}
79 77
80static inline enum dma_status
81dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
82{
83 return DMA_SUCCESS;
84}
85
86static inline void
87async_tx_run_dependencies(struct dma_async_tx_descriptor *tx,
88 struct dma_chan *host_chan)
89{
90 do { } while (0);
91}
92
93static inline struct dma_chan * 78static inline struct dma_chan *
94async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, 79async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
95 enum dma_transaction_type tx_type, struct page **dst, int dst_count, 80 enum dma_transaction_type tx_type, struct page **dst, int dst_count,
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
index 2a2213eefd85..2f1f95737acb 100644
--- a/include/linux/atmel-mci.h
+++ b/include/linux/atmel-mci.h
@@ -3,7 +3,7 @@
3 3
4#define ATMEL_MCI_MAX_NR_SLOTS 2 4#define ATMEL_MCI_MAX_NR_SLOTS 2
5 5
6struct dma_slave; 6#include <linux/dw_dmac.h>
7 7
8/** 8/**
9 * struct mci_slot_pdata - board-specific per-slot configuration 9 * struct mci_slot_pdata - board-specific per-slot configuration
@@ -28,11 +28,11 @@ struct mci_slot_pdata {
28 28
29/** 29/**
30 * struct mci_platform_data - board-specific MMC/SDcard configuration 30 * struct mci_platform_data - board-specific MMC/SDcard configuration
31 * @dma_slave: DMA slave interface to use in data transfers, or NULL. 31 * @dma_slave: DMA slave interface to use in data transfers.
32 * @slot: Per-slot configuration data. 32 * @slot: Per-slot configuration data.
33 */ 33 */
34struct mci_platform_data { 34struct mci_platform_data {
35 struct dma_slave *dma_slave; 35 struct dw_dma_slave dma_slave;
36 struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS]; 36 struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS];
37}; 37};
38 38
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
index d7afa9dd6635..f3b5d4e3a2ac 100644
--- a/include/linux/auxvec.h
+++ b/include/linux/auxvec.h
@@ -23,16 +23,16 @@
23#define AT_PLATFORM 15 /* string identifying CPU for optimizations */ 23#define AT_PLATFORM 15 /* string identifying CPU for optimizations */
24#define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */ 24#define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */
25#define AT_CLKTCK 17 /* frequency at which times() increments */ 25#define AT_CLKTCK 17 /* frequency at which times() increments */
26 26/* AT_* values 18 through 22 are reserved */
27#define AT_SECURE 23 /* secure mode boolean */ 27#define AT_SECURE 23 /* secure mode boolean */
28
29#define AT_BASE_PLATFORM 24 /* string identifying real platform, may 28#define AT_BASE_PLATFORM 24 /* string identifying real platform, may
30 * differ from AT_PLATFORM. */ 29 * differ from AT_PLATFORM. */
30#define AT_RANDOM 25 /* address of 16 random bytes */
31 31
32#define AT_EXECFN 31 /* filename of program */ 32#define AT_EXECFN 31 /* filename of program */
33 33
34#ifdef __KERNEL__ 34#ifdef __KERNEL__
35#define AT_VECTOR_SIZE_BASE 18 /* NEW_AUX_ENT entries in auxiliary table */ 35#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */
36 /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ 36 /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
37#endif 37#endif
38 38
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 1ee9488ca2e4..79ca2da81c87 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -31,6 +31,10 @@ struct backlight_device;
31struct fb_info; 31struct fb_info;
32 32
33struct backlight_ops { 33struct backlight_ops {
34 unsigned int options;
35
36#define BL_CORE_SUSPENDRESUME (1 << 0)
37
34 /* Notify the backlight driver some property has changed */ 38 /* Notify the backlight driver some property has changed */
35 int (*update_status)(struct backlight_device *); 39 int (*update_status)(struct backlight_device *);
36 /* Return the current backlight brightness (accounting for power, 40 /* Return the current backlight brightness (accounting for power,
@@ -51,7 +55,19 @@ struct backlight_properties {
51 modes; 4: full off), see FB_BLANK_XXX */ 55 modes; 4: full off), see FB_BLANK_XXX */
52 int power; 56 int power;
53 /* FB Blanking active? (values as for power) */ 57 /* FB Blanking active? (values as for power) */
58 /* Due to be removed, please use (state & BL_CORE_FBBLANK) */
54 int fb_blank; 59 int fb_blank;
60 /* Flags used to signal drivers of state changes */
61 /* Upper 4 bits are reserved for driver internal use */
62 unsigned int state;
63
64#define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */
65#define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */
66#define BL_CORE_DRIVER4 (1 << 28) /* reserved for driver specific use */
67#define BL_CORE_DRIVER3 (1 << 29) /* reserved for driver specific use */
68#define BL_CORE_DRIVER2 (1 << 30) /* reserved for driver specific use */
69#define BL_CORE_DRIVER1 (1 << 31) /* reserved for driver specific use */
70
55}; 71};
56 72
57struct backlight_device { 73struct backlight_device {
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7035cec583b6..044467ef7b11 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -690,6 +690,8 @@ struct rq_map_data {
690 struct page **pages; 690 struct page **pages;
691 int page_order; 691 int page_order;
692 int nr_entries; 692 int nr_entries;
693 unsigned long offset;
694 int null_mapped;
693}; 695};
694 696
695struct req_iterator { 697struct req_iterator {
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index f50785ad4781..25085cbadcfc 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -19,7 +19,7 @@
19#include <linux/skbuff.h> 19#include <linux/skbuff.h>
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21 21
22#define CAN_VERSION "20081130" 22#define CAN_VERSION "20090105"
23 23
24/* increment this number each time you change some user-space interface */ 24/* increment this number each time you change some user-space interface */
25#define CAN_ABI_VERSION "8" 25#define CAN_ABI_VERSION "8"
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 08b78c09b09a..e267e62827bb 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -52,9 +52,9 @@ struct cgroup_subsys_state {
52 * hierarchy structure */ 52 * hierarchy structure */
53 struct cgroup *cgroup; 53 struct cgroup *cgroup;
54 54
55 /* State maintained by the cgroup system to allow 55 /* State maintained by the cgroup system to allow subsystems
56 * subsystems to be "busy". Should be accessed via css_get() 56 * to be "busy". Should be accessed via css_get(),
57 * and css_put() */ 57 * css_tryget() and and css_put(). */
58 58
59 atomic_t refcnt; 59 atomic_t refcnt;
60 60
@@ -64,11 +64,14 @@ struct cgroup_subsys_state {
64/* bits in struct cgroup_subsys_state flags field */ 64/* bits in struct cgroup_subsys_state flags field */
65enum { 65enum {
66 CSS_ROOT, /* This CSS is the root of the subsystem */ 66 CSS_ROOT, /* This CSS is the root of the subsystem */
67 CSS_REMOVED, /* This CSS is dead */
67}; 68};
68 69
69/* 70/*
70 * Call css_get() to hold a reference on the cgroup; 71 * Call css_get() to hold a reference on the css; it can be used
71 * 72 * for a reference obtained via:
73 * - an existing ref-counted reference to the css
74 * - task->cgroups for a locked task
72 */ 75 */
73 76
74static inline void css_get(struct cgroup_subsys_state *css) 77static inline void css_get(struct cgroup_subsys_state *css)
@@ -77,9 +80,32 @@ static inline void css_get(struct cgroup_subsys_state *css)
77 if (!test_bit(CSS_ROOT, &css->flags)) 80 if (!test_bit(CSS_ROOT, &css->flags))
78 atomic_inc(&css->refcnt); 81 atomic_inc(&css->refcnt);
79} 82}
83
84static inline bool css_is_removed(struct cgroup_subsys_state *css)
85{
86 return test_bit(CSS_REMOVED, &css->flags);
87}
88
89/*
90 * Call css_tryget() to take a reference on a css if your existing
91 * (known-valid) reference isn't already ref-counted. Returns false if
92 * the css has been destroyed.
93 */
94
95static inline bool css_tryget(struct cgroup_subsys_state *css)
96{
97 if (test_bit(CSS_ROOT, &css->flags))
98 return true;
99 while (!atomic_inc_not_zero(&css->refcnt)) {
100 if (test_bit(CSS_REMOVED, &css->flags))
101 return false;
102 }
103 return true;
104}
105
80/* 106/*
81 * css_put() should be called to release a reference taken by 107 * css_put() should be called to release a reference taken by
82 * css_get() 108 * css_get() or css_tryget()
83 */ 109 */
84 110
85extern void __css_put(struct cgroup_subsys_state *css); 111extern void __css_put(struct cgroup_subsys_state *css);
@@ -116,7 +142,7 @@ struct cgroup {
116 struct list_head children; /* my children */ 142 struct list_head children; /* my children */
117 143
118 struct cgroup *parent; /* my parent */ 144 struct cgroup *parent; /* my parent */
119 struct dentry *dentry; /* cgroup fs entry */ 145 struct dentry *dentry; /* cgroup fs entry, RCU protected */
120 146
121 /* Private pointers for each registered subsystem */ 147 /* Private pointers for each registered subsystem */
122 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; 148 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
@@ -145,6 +171,9 @@ struct cgroup {
145 int pids_use_count; 171 int pids_use_count;
146 /* Length of the current tasks_pids array */ 172 /* Length of the current tasks_pids array */
147 int pids_length; 173 int pids_length;
174
175 /* For RCU-protected deletion */
176 struct rcu_head rcu_head;
148}; 177};
149 178
150/* A css_set is a structure holding pointers to a set of 179/* A css_set is a structure holding pointers to a set of
@@ -337,9 +366,23 @@ struct cgroup_subsys {
337#define MAX_CGROUP_TYPE_NAMELEN 32 366#define MAX_CGROUP_TYPE_NAMELEN 32
338 const char *name; 367 const char *name;
339 368
340 /* Protected by RCU */ 369 /*
341 struct cgroupfs_root *root; 370 * Protects sibling/children links of cgroups in this
371 * hierarchy, plus protects which hierarchy (or none) the
372 * subsystem is a part of (i.e. root/sibling). To avoid
373 * potential deadlocks, the following operations should not be
374 * undertaken while holding any hierarchy_mutex:
375 *
376 * - allocating memory
377 * - initiating hotplug events
378 */
379 struct mutex hierarchy_mutex;
342 380
381 /*
382 * Link to parent, and list entry in parent's children.
383 * Protected by this->hierarchy_mutex and cgroup_lock()
384 */
385 struct cgroupfs_root *root;
343 struct list_head sibling; 386 struct list_head sibling;
344}; 387};
345 388
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 51ea2bdea0f9..90c6074a36ca 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -20,8 +20,9 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
20extern int cpuset_init_early(void); 20extern int cpuset_init_early(void);
21extern int cpuset_init(void); 21extern int cpuset_init(void);
22extern void cpuset_init_smp(void); 22extern void cpuset_init_smp(void);
23extern void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask); 23extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
24extern void cpuset_cpus_allowed_locked(struct task_struct *p, cpumask_t *mask); 24extern void cpuset_cpus_allowed_locked(struct task_struct *p,
25 struct cpumask *mask);
25extern nodemask_t cpuset_mems_allowed(struct task_struct *p); 26extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
26#define cpuset_current_mems_allowed (current->mems_allowed) 27#define cpuset_current_mems_allowed (current->mems_allowed)
27void cpuset_init_current_mems_allowed(void); 28void cpuset_init_current_mems_allowed(void);
@@ -86,12 +87,13 @@ static inline int cpuset_init_early(void) { return 0; }
86static inline int cpuset_init(void) { return 0; } 87static inline int cpuset_init(void) { return 0; }
87static inline void cpuset_init_smp(void) {} 88static inline void cpuset_init_smp(void) {}
88 89
89static inline void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask) 90static inline void cpuset_cpus_allowed(struct task_struct *p,
91 struct cpumask *mask)
90{ 92{
91 *mask = cpu_possible_map; 93 *mask = cpu_possible_map;
92} 94}
93static inline void cpuset_cpus_allowed_locked(struct task_struct *p, 95static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
94 cpumask_t *mask) 96 struct cpumask *mask)
95{ 97{
96 *mask = cpu_possible_map; 98 *mask = cpu_possible_map;
97} 99}
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index adb0b084eb5a..64dea2ab326c 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -29,32 +29,6 @@
29#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
30 30
31/** 31/**
32 * enum dma_state - resource PNP/power management state
33 * @DMA_RESOURCE_SUSPEND: DMA device going into low power state
34 * @DMA_RESOURCE_RESUME: DMA device returning to full power
35 * @DMA_RESOURCE_AVAILABLE: DMA device available to the system
36 * @DMA_RESOURCE_REMOVED: DMA device removed from the system
37 */
38enum dma_state {
39 DMA_RESOURCE_SUSPEND,
40 DMA_RESOURCE_RESUME,
41 DMA_RESOURCE_AVAILABLE,
42 DMA_RESOURCE_REMOVED,
43};
44
45/**
46 * enum dma_state_client - state of the channel in the client
47 * @DMA_ACK: client would like to use, or was using this channel
48 * @DMA_DUP: client has already seen this channel, or is not using this channel
49 * @DMA_NAK: client does not want to see any more channels
50 */
51enum dma_state_client {
52 DMA_ACK,
53 DMA_DUP,
54 DMA_NAK,
55};
56
57/**
58 * typedef dma_cookie_t - an opaque DMA cookie 32 * typedef dma_cookie_t - an opaque DMA cookie
59 * 33 *
60 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code 34 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
@@ -89,23 +63,13 @@ enum dma_transaction_type {
89 DMA_MEMSET, 63 DMA_MEMSET,
90 DMA_MEMCPY_CRC32C, 64 DMA_MEMCPY_CRC32C,
91 DMA_INTERRUPT, 65 DMA_INTERRUPT,
66 DMA_PRIVATE,
92 DMA_SLAVE, 67 DMA_SLAVE,
93}; 68};
94 69
95/* last transaction type for creation of the capabilities mask */ 70/* last transaction type for creation of the capabilities mask */
96#define DMA_TX_TYPE_END (DMA_SLAVE + 1) 71#define DMA_TX_TYPE_END (DMA_SLAVE + 1)
97 72
98/**
99 * enum dma_slave_width - DMA slave register access width.
100 * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
101 * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
102 * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
103 */
104enum dma_slave_width {
105 DMA_SLAVE_WIDTH_8BIT,
106 DMA_SLAVE_WIDTH_16BIT,
107 DMA_SLAVE_WIDTH_32BIT,
108};
109 73
110/** 74/**
111 * enum dma_ctrl_flags - DMA flags to augment operation preparation, 75 * enum dma_ctrl_flags - DMA flags to augment operation preparation,
@@ -132,32 +96,6 @@ enum dma_ctrl_flags {
132typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; 96typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
133 97
134/** 98/**
135 * struct dma_slave - Information about a DMA slave
136 * @dev: device acting as DMA slave
137 * @dma_dev: required DMA master device. If non-NULL, the client can not be
138 * bound to other masters than this.
139 * @tx_reg: physical address of data register used for
140 * memory-to-peripheral transfers
141 * @rx_reg: physical address of data register used for
142 * peripheral-to-memory transfers
143 * @reg_width: peripheral register width
144 *
145 * If dma_dev is non-NULL, the client can not be bound to other DMA
146 * masters than the one corresponding to this device. The DMA master
147 * driver may use this to determine if there is controller-specific
148 * data wrapped around this struct. Drivers of platform code that sets
149 * the dma_dev field must therefore make sure to use an appropriate
150 * controller-specific dma slave structure wrapping this struct.
151 */
152struct dma_slave {
153 struct device *dev;
154 struct device *dma_dev;
155 dma_addr_t tx_reg;
156 dma_addr_t rx_reg;
157 enum dma_slave_width reg_width;
158};
159
160/**
161 * struct dma_chan_percpu - the per-CPU part of struct dma_chan 99 * struct dma_chan_percpu - the per-CPU part of struct dma_chan
162 * @refcount: local_t used for open-coded "bigref" counting 100 * @refcount: local_t used for open-coded "bigref" counting
163 * @memcpy_count: transaction counter 101 * @memcpy_count: transaction counter
@@ -165,7 +103,6 @@ struct dma_slave {
165 */ 103 */
166 104
167struct dma_chan_percpu { 105struct dma_chan_percpu {
168 local_t refcount;
169 /* stats */ 106 /* stats */
170 unsigned long memcpy_count; 107 unsigned long memcpy_count;
171 unsigned long bytes_transferred; 108 unsigned long bytes_transferred;
@@ -176,13 +113,14 @@ struct dma_chan_percpu {
176 * @device: ptr to the dma device who supplies this channel, always !%NULL 113 * @device: ptr to the dma device who supplies this channel, always !%NULL
177 * @cookie: last cookie value returned to client 114 * @cookie: last cookie value returned to client
178 * @chan_id: channel ID for sysfs 115 * @chan_id: channel ID for sysfs
179 * @class_dev: class device for sysfs 116 * @dev: class device for sysfs
180 * @refcount: kref, used in "bigref" slow-mode 117 * @refcount: kref, used in "bigref" slow-mode
181 * @slow_ref: indicates that the DMA channel is free 118 * @slow_ref: indicates that the DMA channel is free
182 * @rcu: the DMA channel's RCU head 119 * @rcu: the DMA channel's RCU head
183 * @device_node: used to add this to the device chan list 120 * @device_node: used to add this to the device chan list
184 * @local: per-cpu pointer to a struct dma_chan_percpu 121 * @local: per-cpu pointer to a struct dma_chan_percpu
185 * @client-count: how many clients are using this channel 122 * @client-count: how many clients are using this channel
123 * @table_count: number of appearances in the mem-to-mem allocation table
186 */ 124 */
187struct dma_chan { 125struct dma_chan {
188 struct dma_device *device; 126 struct dma_device *device;
@@ -190,73 +128,47 @@ struct dma_chan {
190 128
191 /* sysfs */ 129 /* sysfs */
192 int chan_id; 130 int chan_id;
193 struct device dev; 131 struct dma_chan_dev *dev;
194
195 struct kref refcount;
196 int slow_ref;
197 struct rcu_head rcu;
198 132
199 struct list_head device_node; 133 struct list_head device_node;
200 struct dma_chan_percpu *local; 134 struct dma_chan_percpu *local;
201 int client_count; 135 int client_count;
136 int table_count;
202}; 137};
203 138
204#define to_dma_chan(p) container_of(p, struct dma_chan, dev) 139/**
205 140 * struct dma_chan_dev - relate sysfs device node to backing channel device
206void dma_chan_cleanup(struct kref *kref); 141 * @chan - driver channel device
207 142 * @device - sysfs device
208static inline void dma_chan_get(struct dma_chan *chan) 143 * @dev_id - parent dma_device dev_id
209{ 144 * @idr_ref - reference count to gate release of dma_device dev_id
210 if (unlikely(chan->slow_ref)) 145 */
211 kref_get(&chan->refcount); 146struct dma_chan_dev {
212 else { 147 struct dma_chan *chan;
213 local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount)); 148 struct device device;
214 put_cpu(); 149 int dev_id;
215 } 150 atomic_t *idr_ref;
216} 151};
217 152
218static inline void dma_chan_put(struct dma_chan *chan) 153static inline const char *dma_chan_name(struct dma_chan *chan)
219{ 154{
220 if (unlikely(chan->slow_ref)) 155 return dev_name(&chan->dev->device);
221 kref_put(&chan->refcount, dma_chan_cleanup);
222 else {
223 local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
224 put_cpu();
225 }
226} 156}
227 157
228/* 158void dma_chan_cleanup(struct kref *kref);
229 * typedef dma_event_callback - function pointer to a DMA event callback
230 * For each channel added to the system this routine is called for each client.
231 * If the client would like to use the channel it returns '1' to signal (ack)
232 * the dmaengine core to take out a reference on the channel and its
233 * corresponding device. A client must not 'ack' an available channel more
234 * than once. When a channel is removed all clients are notified. If a client
235 * is using the channel it must 'ack' the removal. A client must not 'ack' a
236 * removed channel more than once.
237 * @client - 'this' pointer for the client context
238 * @chan - channel to be acted upon
239 * @state - available or removed
240 */
241struct dma_client;
242typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client,
243 struct dma_chan *chan, enum dma_state state);
244 159
245/** 160/**
246 * struct dma_client - info on the entity making use of DMA services 161 * typedef dma_filter_fn - callback filter for dma_request_channel
247 * @event_callback: func ptr to call when something happens 162 * @chan: channel to be reviewed
248 * @cap_mask: only return channels that satisfy the requested capabilities 163 * @filter_param: opaque parameter passed through dma_request_channel
249 * a value of zero corresponds to any capability 164 *
250 * @slave: data for preparing slave transfer. Must be non-NULL iff the 165 * When this optional parameter is specified in a call to dma_request_channel a
251 * DMA_SLAVE capability is requested. 166 * suitable channel is passed to this routine for further dispositioning before
252 * @global_node: list_head for global dma_client_list 167 * being returned. Where 'suitable' indicates a non-busy channel that
168 * satisfies the given capability mask. It returns 'true' to indicate that the
169 * channel is suitable.
253 */ 170 */
254struct dma_client { 171typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
255 dma_event_callback event_callback;
256 dma_cap_mask_t cap_mask;
257 struct dma_slave *slave;
258 struct list_head global_node;
259};
260 172
261typedef void (*dma_async_tx_callback)(void *dma_async_param); 173typedef void (*dma_async_tx_callback)(void *dma_async_param);
262/** 174/**
@@ -323,14 +235,10 @@ struct dma_device {
323 dma_cap_mask_t cap_mask; 235 dma_cap_mask_t cap_mask;
324 int max_xor; 236 int max_xor;
325 237
326 struct kref refcount;
327 struct completion done;
328
329 int dev_id; 238 int dev_id;
330 struct device *dev; 239 struct device *dev;
331 240
332 int (*device_alloc_chan_resources)(struct dma_chan *chan, 241 int (*device_alloc_chan_resources)(struct dma_chan *chan);
333 struct dma_client *client);
334 void (*device_free_chan_resources)(struct dma_chan *chan); 242 void (*device_free_chan_resources)(struct dma_chan *chan);
335 243
336 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( 244 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
@@ -362,9 +270,8 @@ struct dma_device {
362 270
363/* --- public DMA engine API --- */ 271/* --- public DMA engine API --- */
364 272
365void dma_async_client_register(struct dma_client *client); 273void dmaengine_get(void);
366void dma_async_client_unregister(struct dma_client *client); 274void dmaengine_put(void);
367void dma_async_client_chan_request(struct dma_client *client);
368dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, 275dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
369 void *dest, void *src, size_t len); 276 void *dest, void *src, size_t len);
370dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, 277dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
@@ -406,6 +313,12 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
406 set_bit(tx_type, dstp->bits); 313 set_bit(tx_type, dstp->bits);
407} 314}
408 315
316#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
317static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
318{
319 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
320}
321
409#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) 322#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
410static inline int 323static inline int
411__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) 324__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
@@ -475,11 +388,25 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
475} 388}
476 389
477enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); 390enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
391#ifdef CONFIG_DMA_ENGINE
392enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
393#else
394static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
395{
396 return DMA_SUCCESS;
397}
398#endif
478 399
479/* --- DMA device --- */ 400/* --- DMA device --- */
480 401
481int dma_async_device_register(struct dma_device *device); 402int dma_async_device_register(struct dma_device *device);
482void dma_async_device_unregister(struct dma_device *device); 403void dma_async_device_unregister(struct dma_device *device);
404void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
405struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
406void dma_issue_pending_all(void);
407#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
408struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
409void dma_release_channel(struct dma_chan *chan);
483 410
484/* --- Helper iov-locking functions --- */ 411/* --- Helper iov-locking functions --- */
485 412
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index 04d217b442bf..d797dde247f7 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -22,14 +22,34 @@ struct dw_dma_platform_data {
22}; 22};
23 23
24/** 24/**
25 * enum dw_dma_slave_width - DMA slave register access width.
26 * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
27 * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
28 * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
29 */
30enum dw_dma_slave_width {
31 DW_DMA_SLAVE_WIDTH_8BIT,
32 DW_DMA_SLAVE_WIDTH_16BIT,
33 DW_DMA_SLAVE_WIDTH_32BIT,
34};
35
36/**
25 * struct dw_dma_slave - Controller-specific information about a slave 37 * struct dw_dma_slave - Controller-specific information about a slave
26 * @slave: Generic information about the slave 38 *
27 * @ctl_lo: Platform-specific initializer for the CTL_LO register 39 * @dma_dev: required DMA master device
40 * @tx_reg: physical address of data register used for
41 * memory-to-peripheral transfers
42 * @rx_reg: physical address of data register used for
43 * peripheral-to-memory transfers
44 * @reg_width: peripheral register width
28 * @cfg_hi: Platform-specific initializer for the CFG_HI register 45 * @cfg_hi: Platform-specific initializer for the CFG_HI register
29 * @cfg_lo: Platform-specific initializer for the CFG_LO register 46 * @cfg_lo: Platform-specific initializer for the CFG_LO register
30 */ 47 */
31struct dw_dma_slave { 48struct dw_dma_slave {
32 struct dma_slave slave; 49 struct device *dma_dev;
50 dma_addr_t tx_reg;
51 dma_addr_t rx_reg;
52 enum dw_dma_slave_width reg_width;
33 u32 cfg_hi; 53 u32 cfg_hi;
34 u32 cfg_lo; 54 u32 cfg_lo;
35}; 55};
@@ -54,9 +74,4 @@ struct dw_dma_slave {
54#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ 74#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
55#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ 75#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
56 76
57static inline struct dw_dma_slave *to_dw_dma_slave(struct dma_slave *slave)
58{
59 return container_of(slave, struct dw_dma_slave, slave);
60}
61
62#endif /* DW_DMAC_H */ 77#endif /* DW_DMAC_H */
diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h
index 78c775a83f7c..121720d74e15 100644
--- a/include/linux/ext2_fs.h
+++ b/include/linux/ext2_fs.h
@@ -194,6 +194,30 @@ struct ext2_group_desc
194#define EXT2_FL_USER_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */ 194#define EXT2_FL_USER_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */
195#define EXT2_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */ 195#define EXT2_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */
196 196
197/* Flags that should be inherited by new inodes from their parent. */
198#define EXT2_FL_INHERITED (EXT2_SECRM_FL | EXT2_UNRM_FL | EXT2_COMPR_FL |\
199 EXT2_SYNC_FL | EXT2_IMMUTABLE_FL | EXT2_APPEND_FL |\
200 EXT2_NODUMP_FL | EXT2_NOATIME_FL | EXT2_COMPRBLK_FL|\
201 EXT2_NOCOMP_FL | EXT2_JOURNAL_DATA_FL |\
202 EXT2_NOTAIL_FL | EXT2_DIRSYNC_FL)
203
204/* Flags that are appropriate for regular files (all but dir-specific ones). */
205#define EXT2_REG_FLMASK (~(EXT2_DIRSYNC_FL | EXT2_TOPDIR_FL))
206
207/* Flags that are appropriate for non-directories/regular files. */
208#define EXT2_OTHER_FLMASK (EXT2_NODUMP_FL | EXT2_NOATIME_FL)
209
210/* Mask out flags that are inappropriate for the given type of inode. */
211static inline __u32 ext2_mask_flags(umode_t mode, __u32 flags)
212{
213 if (S_ISDIR(mode))
214 return flags;
215 else if (S_ISREG(mode))
216 return flags & EXT2_REG_FLMASK;
217 else
218 return flags & EXT2_OTHER_FLMASK;
219}
220
197/* 221/*
198 * ioctl commands 222 * ioctl commands
199 */ 223 */
diff --git a/include/linux/ext2_fs_sb.h b/include/linux/ext2_fs_sb.h
index dc541f3653d1..1cdb66367c98 100644
--- a/include/linux/ext2_fs_sb.h
+++ b/include/linux/ext2_fs_sb.h
@@ -101,7 +101,7 @@ struct ext2_sb_info {
101 struct percpu_counter s_freeblocks_counter; 101 struct percpu_counter s_freeblocks_counter;
102 struct percpu_counter s_freeinodes_counter; 102 struct percpu_counter s_freeinodes_counter;
103 struct percpu_counter s_dirs_counter; 103 struct percpu_counter s_dirs_counter;
104 struct blockgroup_lock s_blockgroup_lock; 104 struct blockgroup_lock *s_blockgroup_lock;
105 /* root of the per fs reservation window tree */ 105 /* root of the per fs reservation window tree */
106 spinlock_t s_rsv_window_lock; 106 spinlock_t s_rsv_window_lock;
107 struct rb_root s_rsv_window_root; 107 struct rb_root s_rsv_window_root;
@@ -111,7 +111,7 @@ struct ext2_sb_info {
111static inline spinlock_t * 111static inline spinlock_t *
112sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group) 112sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group)
113{ 113{
114 return bgl_lock_ptr(&sbi->s_blockgroup_lock, block_group); 114 return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
115} 115}
116 116
117#endif /* _LINUX_EXT2_FS_SB */ 117#endif /* _LINUX_EXT2_FS_SB */
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index d14f02918483..dd495b8c3091 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -178,6 +178,30 @@ struct ext3_group_desc
178#define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ 178#define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
179#define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ 179#define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
180 180
181/* Flags that should be inherited by new inodes from their parent. */
182#define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\
183 EXT3_SYNC_FL | EXT3_IMMUTABLE_FL | EXT3_APPEND_FL |\
184 EXT3_NODUMP_FL | EXT3_NOATIME_FL | EXT3_COMPRBLK_FL|\
185 EXT3_NOCOMPR_FL | EXT3_JOURNAL_DATA_FL |\
186 EXT3_NOTAIL_FL | EXT3_DIRSYNC_FL)
187
188/* Flags that are appropriate for regular files (all but dir-specific ones). */
189#define EXT3_REG_FLMASK (~(EXT3_DIRSYNC_FL | EXT3_TOPDIR_FL))
190
191/* Flags that are appropriate for non-directories/regular files. */
192#define EXT3_OTHER_FLMASK (EXT3_NODUMP_FL | EXT3_NOATIME_FL)
193
194/* Mask out flags that are inappropriate for the given type of inode. */
195static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags)
196{
197 if (S_ISDIR(mode))
198 return flags;
199 else if (S_ISREG(mode))
200 return flags & EXT3_REG_FLMASK;
201 else
202 return flags & EXT3_OTHER_FLMASK;
203}
204
181/* 205/*
182 * Inode dynamic state flags 206 * Inode dynamic state flags
183 */ 207 */
@@ -354,6 +378,13 @@ struct ext3_inode {
354#define EXT3_ORPHAN_FS 0x0004 /* Orphans being recovered */ 378#define EXT3_ORPHAN_FS 0x0004 /* Orphans being recovered */
355 379
356/* 380/*
381 * Misc. filesystem flags
382 */
383#define EXT2_FLAGS_SIGNED_HASH 0x0001 /* Signed dirhash in use */
384#define EXT2_FLAGS_UNSIGNED_HASH 0x0002 /* Unsigned dirhash in use */
385#define EXT2_FLAGS_TEST_FILESYS 0x0004 /* to test development code */
386
387/*
357 * Mount flags 388 * Mount flags
358 */ 389 */
359#define EXT3_MOUNT_CHECK 0x00001 /* Do mount-time checks */ 390#define EXT3_MOUNT_CHECK 0x00001 /* Do mount-time checks */
@@ -489,7 +520,23 @@ struct ext3_super_block {
489 __u16 s_reserved_word_pad; 520 __u16 s_reserved_word_pad;
490 __le32 s_default_mount_opts; 521 __le32 s_default_mount_opts;
491 __le32 s_first_meta_bg; /* First metablock block group */ 522 __le32 s_first_meta_bg; /* First metablock block group */
492 __u32 s_reserved[190]; /* Padding to the end of the block */ 523 __le32 s_mkfs_time; /* When the filesystem was created */
524 __le32 s_jnl_blocks[17]; /* Backup of the journal inode */
525 /* 64bit support valid if EXT4_FEATURE_COMPAT_64BIT */
526/*150*/ __le32 s_blocks_count_hi; /* Blocks count */
527 __le32 s_r_blocks_count_hi; /* Reserved blocks count */
528 __le32 s_free_blocks_count_hi; /* Free blocks count */
529 __le16 s_min_extra_isize; /* All inodes have at least # bytes */
530 __le16 s_want_extra_isize; /* New inodes should reserve # bytes */
531 __le32 s_flags; /* Miscellaneous flags */
532 __le16 s_raid_stride; /* RAID stride */
533 __le16 s_mmp_interval; /* # seconds to wait in MMP checking */
534 __le64 s_mmp_block; /* Block for multi-mount protection */
535 __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/
536 __u8 s_log_groups_per_flex; /* FLEX_BG group size */
537 __u8 s_reserved_char_pad2;
538 __le16 s_reserved_pad;
539 __u32 s_reserved[162]; /* Padding to the end of the block */
493}; 540};
494 541
495#ifdef __KERNEL__ 542#ifdef __KERNEL__
@@ -694,6 +741,9 @@ static inline __le16 ext3_rec_len_to_disk(unsigned len)
694#define DX_HASH_LEGACY 0 741#define DX_HASH_LEGACY 0
695#define DX_HASH_HALF_MD4 1 742#define DX_HASH_HALF_MD4 1
696#define DX_HASH_TEA 2 743#define DX_HASH_TEA 2
744#define DX_HASH_LEGACY_UNSIGNED 3
745#define DX_HASH_HALF_MD4_UNSIGNED 4
746#define DX_HASH_TEA_UNSIGNED 5
697 747
698#ifdef __KERNEL__ 748#ifdef __KERNEL__
699 749
diff --git a/include/linux/ext3_fs_sb.h b/include/linux/ext3_fs_sb.h
index e024e38248ff..f07f34de2f0e 100644
--- a/include/linux/ext3_fs_sb.h
+++ b/include/linux/ext3_fs_sb.h
@@ -57,10 +57,11 @@ struct ext3_sb_info {
57 u32 s_next_generation; 57 u32 s_next_generation;
58 u32 s_hash_seed[4]; 58 u32 s_hash_seed[4];
59 int s_def_hash_version; 59 int s_def_hash_version;
60 int s_hash_unsigned; /* 3 if hash should be signed, 0 if not */
60 struct percpu_counter s_freeblocks_counter; 61 struct percpu_counter s_freeblocks_counter;
61 struct percpu_counter s_freeinodes_counter; 62 struct percpu_counter s_freeinodes_counter;
62 struct percpu_counter s_dirs_counter; 63 struct percpu_counter s_dirs_counter;
63 struct blockgroup_lock s_blockgroup_lock; 64 struct blockgroup_lock *s_blockgroup_lock;
64 65
65 /* root of the per fs reservation window tree */ 66 /* root of the per fs reservation window tree */
66 spinlock_t s_rsv_window_lock; 67 spinlock_t s_rsv_window_lock;
@@ -86,7 +87,7 @@ struct ext3_sb_info {
86static inline spinlock_t * 87static inline spinlock_t *
87sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group) 88sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group)
88{ 89{
89 return bgl_lock_ptr(&sbi->s_blockgroup_lock, block_group); 90 return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
90} 91}
91 92
92#endif /* _LINUX_EXT3_FS_SB */ 93#endif /* _LINUX_EXT3_FS_SB */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e38a64d71eff..0b87b29f4797 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -565,6 +565,7 @@ struct address_space {
565struct block_device { 565struct block_device {
566 dev_t bd_dev; /* not a kdev_t - it's a search key */ 566 dev_t bd_dev; /* not a kdev_t - it's a search key */
567 struct inode * bd_inode; /* will die */ 567 struct inode * bd_inode; /* will die */
568 struct super_block * bd_super;
568 int bd_openers; 569 int bd_openers;
569 struct mutex bd_mutex; /* open/close mutex */ 570 struct mutex bd_mutex; /* open/close mutex */
570 struct semaphore bd_mount_sem; 571 struct semaphore bd_mount_sem;
@@ -1389,6 +1390,7 @@ struct super_operations {
1389 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); 1390 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
1390 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); 1391 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
1391#endif 1392#endif
1393 int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
1392}; 1394};
1393 1395
1394/* 1396/*
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index a5cb0c3f6dcf..f8ff918c208f 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -115,6 +115,11 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev);
115extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 115extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
116 u16 vlan_tci, int polling); 116 u16 vlan_tci, int polling);
117extern int vlan_hwaccel_do_receive(struct sk_buff *skb); 117extern int vlan_hwaccel_do_receive(struct sk_buff *skb);
118extern int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
119 unsigned int vlan_tci, struct sk_buff *skb);
120extern int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
121 unsigned int vlan_tci,
122 struct napi_gro_fraginfo *info);
118 123
119#else 124#else
120static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) 125static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
@@ -140,6 +145,20 @@ static inline int vlan_hwaccel_do_receive(struct sk_buff *skb)
140{ 145{
141 return 0; 146 return 0;
142} 147}
148
149static inline int vlan_gro_receive(struct napi_struct *napi,
150 struct vlan_group *grp,
151 unsigned int vlan_tci, struct sk_buff *skb)
152{
153 return NET_RX_DROP;
154}
155
156static inline int vlan_gro_frags(struct napi_struct *napi,
157 struct vlan_group *grp, unsigned int vlan_tci,
158 struct napi_gro_fraginfo *info)
159{
160 return NET_RX_DROP;
161}
143#endif 162#endif
144 163
145/** 164/**
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index f98a656b17e5..76dad4808847 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -86,4 +86,6 @@ static inline int task_nice_ioclass(struct task_struct *task)
86 */ 86 */
87extern int ioprio_best(unsigned short aprio, unsigned short bprio); 87extern int ioprio_best(unsigned short aprio, unsigned short bprio);
88 88
89extern int set_task_ioprio(struct task_struct *task, int ioprio);
90
89#endif 91#endif
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 346e2b80be7d..6384b19efe64 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -543,6 +543,11 @@ struct transaction_s
543 unsigned long t_expires; 543 unsigned long t_expires;
544 544
545 /* 545 /*
546 * When this transaction started, in nanoseconds [no locking]
547 */
548 ktime_t t_start_time;
549
550 /*
546 * How many handles used this transaction? [t_handle_lock] 551 * How many handles used this transaction? [t_handle_lock]
547 */ 552 */
548 int t_handle_count; 553 int t_handle_count;
@@ -798,9 +803,19 @@ struct journal_s
798 struct buffer_head **j_wbuf; 803 struct buffer_head **j_wbuf;
799 int j_wbufsize; 804 int j_wbufsize;
800 805
806 /*
807 * this is the pid of the last person to run a synchronous operation
808 * through the journal.
809 */
801 pid_t j_last_sync_writer; 810 pid_t j_last_sync_writer;
802 811
803 /* 812 /*
813 * the average amount of time in nanoseconds it takes to commit a
814 * transaction to the disk. [j_state_lock]
815 */
816 u64 j_average_commit_time;
817
818 /*
804 * An opaque pointer to fs-private information. ext3 puts its 819 * An opaque pointer to fs-private information. ext3 puts its
805 * superblock pointer here 820 * superblock pointer here
806 */ 821 */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 34456476e761..b45109c61fba 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -638,6 +638,11 @@ struct transaction_s
638 unsigned long t_expires; 638 unsigned long t_expires;
639 639
640 /* 640 /*
641 * When this transaction started, in nanoseconds [no locking]
642 */
643 ktime_t t_start_time;
644
645 /*
641 * How many handles used this transaction? [t_handle_lock] 646 * How many handles used this transaction? [t_handle_lock]
642 */ 647 */
643 int t_handle_count; 648 int t_handle_count;
@@ -682,6 +687,8 @@ jbd2_time_diff(unsigned long start, unsigned long end)
682 return end + (MAX_JIFFY_OFFSET - start); 687 return end + (MAX_JIFFY_OFFSET - start);
683} 688}
684 689
690#define JBD2_NR_BATCH 64
691
685/** 692/**
686 * struct journal_s - The journal_s type is the concrete type associated with 693 * struct journal_s - The journal_s type is the concrete type associated with
687 * journal_t. 694 * journal_t.
@@ -826,6 +833,14 @@ struct journal_s
826 struct mutex j_checkpoint_mutex; 833 struct mutex j_checkpoint_mutex;
827 834
828 /* 835 /*
836 * List of buffer heads used by the checkpoint routine. This
837 * was moved from jbd2_log_do_checkpoint() to reduce stack
838 * usage. Access to this array is controlled by the
839 * j_checkpoint_mutex. [j_checkpoint_mutex]
840 */
841 struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH];
842
843 /*
829 * Journal head: identifies the first unused block in the journal. 844 * Journal head: identifies the first unused block in the journal.
830 * [j_state_lock] 845 * [j_state_lock]
831 */ 846 */
@@ -939,8 +954,26 @@ struct journal_s
939 struct buffer_head **j_wbuf; 954 struct buffer_head **j_wbuf;
940 int j_wbufsize; 955 int j_wbufsize;
941 956
957 /*
958 * this is the pid of hte last person to run a synchronous operation
959 * through the journal
960 */
942 pid_t j_last_sync_writer; 961 pid_t j_last_sync_writer;
943 962
963 /*
964 * the average amount of time in nanoseconds it takes to commit a
965 * transaction to disk. [j_state_lock]
966 */
967 u64 j_average_commit_time;
968
969 /*
970 * minimum and maximum times that we should wait for
971 * additional filesystem operations to get batched into a
972 * synchronous handle in microseconds
973 */
974 u32 j_min_batch_time;
975 u32 j_max_batch_time;
976
944 /* This function is called when a transaction is closed */ 977 /* This function is called when a transaction is closed */
945 void (*j_commit_callback)(journal_t *, 978 void (*j_commit_callback)(journal_t *,
946 transaction_t *); 979 transaction_t *);
@@ -1102,7 +1135,6 @@ extern int jbd2_journal_set_features
1102 (journal_t *, unsigned long, unsigned long, unsigned long); 1135 (journal_t *, unsigned long, unsigned long, unsigned long);
1103extern void jbd2_journal_clear_features 1136extern void jbd2_journal_clear_features
1104 (journal_t *, unsigned long, unsigned long, unsigned long); 1137 (journal_t *, unsigned long, unsigned long, unsigned long);
1105extern int jbd2_journal_create (journal_t *);
1106extern int jbd2_journal_load (journal_t *journal); 1138extern int jbd2_journal_load (journal_t *journal);
1107extern int jbd2_journal_destroy (journal_t *); 1139extern int jbd2_journal_destroy (journal_t *);
1108extern int jbd2_journal_recover (journal_t *journal); 1140extern int jbd2_journal_recover (journal_t *journal);
@@ -1177,8 +1209,8 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
1177int jbd2_log_do_checkpoint(journal_t *journal); 1209int jbd2_log_do_checkpoint(journal_t *journal);
1178 1210
1179void __jbd2_log_wait_for_space(journal_t *journal); 1211void __jbd2_log_wait_for_space(journal_t *journal);
1180extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *); 1212extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
1181extern int jbd2_cleanup_journal_tail(journal_t *); 1213extern int jbd2_cleanup_journal_tail(journal_t *);
1182 1214
1183/* Debugging code only: */ 1215/* Debugging code only: */
1184 1216
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 6b8e2027165e..343df9ef2412 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -476,6 +476,12 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
476 __val = __val < __min ? __min: __val; \ 476 __val = __val < __min ? __min: __val; \
477 __val > __max ? __max: __val; }) 477 __val > __max ? __max: __val; })
478 478
479
480/*
481 * swap - swap value of @a and @b
482 */
483#define swap(a, b) ({ typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; })
484
479/** 485/**
480 * container_of - cast a member of a structure out to the containing structure 486 * container_of - cast a member of a structure out to the containing structure
481 * @ptr: the pointer to the member. 487 * @ptr: the pointer to the member.
diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h
index 81b4207deb95..96eea90f01a8 100644
--- a/include/linux/leds-pca9532.h
+++ b/include/linux/leds-pca9532.h
@@ -15,6 +15,7 @@
15#define __LINUX_PCA9532_H 15#define __LINUX_PCA9532_H
16 16
17#include <linux/leds.h> 17#include <linux/leds.h>
18#include <linux/workqueue.h>
18 19
19enum pca9532_state { 20enum pca9532_state {
20 PCA9532_OFF = 0x0, 21 PCA9532_OFF = 0x0,
@@ -31,6 +32,7 @@ struct pca9532_led {
31 struct i2c_client *client; 32 struct i2c_client *client;
32 char *name; 33 char *name;
33 struct led_classdev ldev; 34 struct led_classdev ldev;
35 struct work_struct work;
34 enum pca9532_type type; 36 enum pca9532_type type;
35 enum pca9532_state state; 37 enum pca9532_state state;
36}; 38};
diff --git a/include/linux/leds.h b/include/linux/leds.h
index d3a73f5a48c3..24489da701e3 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -32,7 +32,10 @@ struct led_classdev {
32 int brightness; 32 int brightness;
33 int flags; 33 int flags;
34 34
35 /* Lower 16 bits reflect status */
35#define LED_SUSPENDED (1 << 0) 36#define LED_SUSPENDED (1 << 0)
37 /* Upper 16 bits reflect control information */
38#define LED_CORE_SUSPENDRESUME (1 << 16)
36 39
37 /* Set LED brightness level */ 40 /* Set LED brightness level */
38 /* Must not sleep, use a workqueue if needed */ 41 /* Must not sleep, use a workqueue if needed */
@@ -62,7 +65,7 @@ struct led_classdev {
62 65
63extern int led_classdev_register(struct device *parent, 66extern int led_classdev_register(struct device *parent,
64 struct led_classdev *led_cdev); 67 struct led_classdev *led_cdev);
65extern void led_classdev_unregister(struct led_classdev *lcd); 68extern void led_classdev_unregister(struct led_classdev *led_cdev);
66extern void led_classdev_suspend(struct led_classdev *led_cdev); 69extern void led_classdev_suspend(struct led_classdev *led_cdev);
67extern void led_classdev_resume(struct led_classdev *led_cdev); 70extern void led_classdev_resume(struct led_classdev *led_cdev);
68 71
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 3449de597eff..4f7c8fb4d3fe 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1518,6 +1518,7 @@ extern void sata_pmp_error_handler(struct ata_port *ap);
1518 1518
1519extern const struct ata_port_operations ata_sff_port_ops; 1519extern const struct ata_port_operations ata_sff_port_ops;
1520extern const struct ata_port_operations ata_bmdma_port_ops; 1520extern const struct ata_port_operations ata_bmdma_port_ops;
1521extern const struct ata_port_operations ata_bmdma32_port_ops;
1521 1522
1522/* PIO only, sg_tablesize and dma_boundary limits can be removed */ 1523/* PIO only, sg_tablesize and dma_boundary limits can be removed */
1523#define ATA_PIO_SHT(drv_name) \ 1524#define ATA_PIO_SHT(drv_name) \
@@ -1545,6 +1546,8 @@ extern void ata_sff_exec_command(struct ata_port *ap,
1545 const struct ata_taskfile *tf); 1546 const struct ata_taskfile *tf);
1546extern unsigned int ata_sff_data_xfer(struct ata_device *dev, 1547extern unsigned int ata_sff_data_xfer(struct ata_device *dev,
1547 unsigned char *buf, unsigned int buflen, int rw); 1548 unsigned char *buf, unsigned int buflen, int rw);
1549extern unsigned int ata_sff_data_xfer32(struct ata_device *dev,
1550 unsigned char *buf, unsigned int buflen, int rw);
1548extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, 1551extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev,
1549 unsigned char *buf, unsigned int buflen, int rw); 1552 unsigned char *buf, unsigned int buflen, int rw);
1550extern u8 ata_sff_irq_on(struct ata_port *ap); 1553extern u8 ata_sff_irq_on(struct ata_port *ap);
diff --git a/include/linux/magic.h b/include/linux/magic.h
index f7f3fdddbef0..439f6f3cb0c4 100644
--- a/include/linux/magic.h
+++ b/include/linux/magic.h
@@ -13,6 +13,7 @@
13#define EFS_SUPER_MAGIC 0x414A53 13#define EFS_SUPER_MAGIC 0x414A53
14#define EXT2_SUPER_MAGIC 0xEF53 14#define EXT2_SUPER_MAGIC 0xEF53
15#define EXT3_SUPER_MAGIC 0xEF53 15#define EXT3_SUPER_MAGIC 0xEF53
16#define XENFS_SUPER_MAGIC 0xabba1974
16#define EXT4_SUPER_MAGIC 0xEF53 17#define EXT4_SUPER_MAGIC 0xEF53
17#define HPFS_SUPER_MAGIC 0xf995e849 18#define HPFS_SUPER_MAGIC 0xf995e849
18#define ISOFS_SUPER_MAGIC 0x9660 19#define ISOFS_SUPER_MAGIC 0x9660
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 1fbe14d39521..326f45c86530 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -19,22 +19,45 @@
19 19
20#ifndef _LINUX_MEMCONTROL_H 20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H 21#define _LINUX_MEMCONTROL_H
22 22#include <linux/cgroup.h>
23struct mem_cgroup; 23struct mem_cgroup;
24struct page_cgroup; 24struct page_cgroup;
25struct page; 25struct page;
26struct mm_struct; 26struct mm_struct;
27 27
28#ifdef CONFIG_CGROUP_MEM_RES_CTLR 28#ifdef CONFIG_CGROUP_MEM_RES_CTLR
29/*
30 * All "charge" functions with gfp_mask should use GFP_KERNEL or
31 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
32 * alloc memory but reclaims memory from all available zones. So, "where I want
33 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
34 * available but adding a rule is better. charge functions' gfp_mask should
35 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
36 * codes.
37 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
38 */
29 39
30extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm, 40extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
31 gfp_t gfp_mask); 41 gfp_t gfp_mask);
42/* for swap handling */
43extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
44 struct page *page, gfp_t mask, struct mem_cgroup **ptr);
45extern void mem_cgroup_commit_charge_swapin(struct page *page,
46 struct mem_cgroup *ptr);
47extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
48
32extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 49extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
33 gfp_t gfp_mask); 50 gfp_t gfp_mask);
34extern void mem_cgroup_move_lists(struct page *page, enum lru_list lru); 51extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru);
52extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru);
53extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
54extern void mem_cgroup_del_lru(struct page *page);
55extern void mem_cgroup_move_lists(struct page *page,
56 enum lru_list from, enum lru_list to);
35extern void mem_cgroup_uncharge_page(struct page *page); 57extern void mem_cgroup_uncharge_page(struct page *page);
36extern void mem_cgroup_uncharge_cache_page(struct page *page); 58extern void mem_cgroup_uncharge_cache_page(struct page *page);
37extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask); 59extern int mem_cgroup_shrink_usage(struct page *page,
60 struct mm_struct *mm, gfp_t gfp_mask);
38 61
39extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 62extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
40 struct list_head *dst, 63 struct list_head *dst,
@@ -47,12 +70,20 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
47 70
48extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 71extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
49 72
50#define mm_match_cgroup(mm, cgroup) \ 73static inline
51 ((cgroup) == mem_cgroup_from_task((mm)->owner)) 74int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
75{
76 struct mem_cgroup *mem;
77 rcu_read_lock();
78 mem = mem_cgroup_from_task((mm)->owner);
79 rcu_read_unlock();
80 return cgroup == mem;
81}
52 82
53extern int 83extern int
54mem_cgroup_prepare_migration(struct page *page, struct page *newpage); 84mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr);
55extern void mem_cgroup_end_migration(struct page *page); 85extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
86 struct page *oldpage, struct page *newpage);
56 87
57/* 88/*
58 * For memory reclaim. 89 * For memory reclaim.
@@ -65,13 +96,32 @@ extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
65 int priority); 96 int priority);
66extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, 97extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
67 int priority); 98 int priority);
99int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
100unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
101 struct zone *zone,
102 enum lru_list lru);
103struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
104 struct zone *zone);
105struct zone_reclaim_stat*
106mem_cgroup_get_reclaim_stat_from_page(struct page *page);
68 107
69extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone, 108#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
70 int priority, enum lru_list lru); 109extern int do_swap_account;
110#endif
71 111
112static inline bool mem_cgroup_disabled(void)
113{
114 if (mem_cgroup_subsys.disabled)
115 return true;
116 return false;
117}
118
119extern bool mem_cgroup_oom_called(struct task_struct *task);
72 120
73#else /* CONFIG_CGROUP_MEM_RES_CTLR */ 121#else /* CONFIG_CGROUP_MEM_RES_CTLR */
74static inline int mem_cgroup_charge(struct page *page, 122struct mem_cgroup;
123
124static inline int mem_cgroup_newpage_charge(struct page *page,
75 struct mm_struct *mm, gfp_t gfp_mask) 125 struct mm_struct *mm, gfp_t gfp_mask)
76{ 126{
77 return 0; 127 return 0;
@@ -83,6 +133,21 @@ static inline int mem_cgroup_cache_charge(struct page *page,
83 return 0; 133 return 0;
84} 134}
85 135
136static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
137 struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr)
138{
139 return 0;
140}
141
142static inline void mem_cgroup_commit_charge_swapin(struct page *page,
143 struct mem_cgroup *ptr)
144{
145}
146
147static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
148{
149}
150
86static inline void mem_cgroup_uncharge_page(struct page *page) 151static inline void mem_cgroup_uncharge_page(struct page *page)
87{ 152{
88} 153}
@@ -91,12 +156,33 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page)
91{ 156{
92} 157}
93 158
94static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) 159static inline int mem_cgroup_shrink_usage(struct page *page,
160 struct mm_struct *mm, gfp_t gfp_mask)
95{ 161{
96 return 0; 162 return 0;
97} 163}
98 164
99static inline void mem_cgroup_move_lists(struct page *page, bool active) 165static inline void mem_cgroup_add_lru_list(struct page *page, int lru)
166{
167}
168
169static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
170{
171 return ;
172}
173
174static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru)
175{
176 return ;
177}
178
179static inline void mem_cgroup_del_lru(struct page *page)
180{
181 return ;
182}
183
184static inline void
185mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
100{ 186{
101} 187}
102 188
@@ -112,12 +198,14 @@ static inline int task_in_mem_cgroup(struct task_struct *task,
112} 198}
113 199
114static inline int 200static inline int
115mem_cgroup_prepare_migration(struct page *page, struct page *newpage) 201mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
116{ 202{
117 return 0; 203 return 0;
118} 204}
119 205
120static inline void mem_cgroup_end_migration(struct page *page) 206static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
207 struct page *oldpage,
208 struct page *newpage)
121{ 209{
122} 210}
123 211
@@ -146,12 +234,42 @@ static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
146{ 234{
147} 235}
148 236
149static inline long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, 237static inline bool mem_cgroup_disabled(void)
150 struct zone *zone, int priority, 238{
151 enum lru_list lru) 239 return true;
240}
241
242static inline bool mem_cgroup_oom_called(struct task_struct *task)
243{
244 return false;
245}
246
247static inline int
248mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
249{
250 return 1;
251}
252
253static inline unsigned long
254mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone,
255 enum lru_list lru)
152{ 256{
153 return 0; 257 return 0;
154} 258}
259
260
261static inline struct zone_reclaim_stat*
262mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
263{
264 return NULL;
265}
266
267static inline struct zone_reclaim_stat*
268mem_cgroup_get_reclaim_stat_from_page(struct page *page)
269{
270 return NULL;
271}
272
155#endif /* CONFIG_CGROUP_MEM_CONT */ 273#endif /* CONFIG_CGROUP_MEM_CONT */
156 274
157#endif /* _LINUX_MEMCONTROL_H */ 275#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/mfd/wm8350/pmic.h b/include/linux/mfd/wm8350/pmic.h
index 96acbfc8aa12..be3264e286e0 100644
--- a/include/linux/mfd/wm8350/pmic.h
+++ b/include/linux/mfd/wm8350/pmic.h
@@ -13,6 +13,10 @@
13#ifndef __LINUX_MFD_WM8350_PMIC_H 13#ifndef __LINUX_MFD_WM8350_PMIC_H
14#define __LINUX_MFD_WM8350_PMIC_H 14#define __LINUX_MFD_WM8350_PMIC_H
15 15
16#include <linux/platform_device.h>
17#include <linux/leds.h>
18#include <linux/regulator/machine.h>
19
16/* 20/*
17 * Register values. 21 * Register values.
18 */ 22 */
@@ -700,6 +704,33 @@ struct wm8350;
700struct platform_device; 704struct platform_device;
701struct regulator_init_data; 705struct regulator_init_data;
702 706
707/*
708 * WM8350 LED platform data
709 */
710struct wm8350_led_platform_data {
711 const char *name;
712 const char *default_trigger;
713 int max_uA;
714};
715
716struct wm8350_led {
717 struct platform_device *pdev;
718 struct mutex mutex;
719 struct work_struct work;
720 spinlock_t value_lock;
721 enum led_brightness value;
722 struct led_classdev cdev;
723 int max_uA_index;
724 int enabled;
725
726 struct regulator *isink;
727 struct regulator_consumer_supply isink_consumer;
728 struct regulator_init_data isink_init;
729 struct regulator *dcdc;
730 struct regulator_consumer_supply dcdc_consumer;
731 struct regulator_init_data dcdc_init;
732};
733
703struct wm8350_pmic { 734struct wm8350_pmic {
704 /* Number of regulators of each type on this device */ 735 /* Number of regulators of each type on this device */
705 int max_dcdc; 736 int max_dcdc;
@@ -717,10 +748,15 @@ struct wm8350_pmic {
717 748
718 /* regulator devices */ 749 /* regulator devices */
719 struct platform_device *pdev[NUM_WM8350_REGULATORS]; 750 struct platform_device *pdev[NUM_WM8350_REGULATORS];
751
752 /* LED devices */
753 struct wm8350_led led[2];
720}; 754};
721 755
722int wm8350_register_regulator(struct wm8350 *wm8350, int reg, 756int wm8350_register_regulator(struct wm8350 *wm8350, int reg,
723 struct regulator_init_data *initdata); 757 struct regulator_init_data *initdata);
758int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink,
759 struct wm8350_led_platform_data *pdata);
724 760
725/* 761/*
726 * Additional DCDC control not supported via regulator API 762 * Additional DCDC control not supported via regulator API
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index c948350c378e..7fbb97267556 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -28,6 +28,7 @@ add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
28{ 28{
29 list_add(&page->lru, &zone->lru[l].list); 29 list_add(&page->lru, &zone->lru[l].list);
30 __inc_zone_state(zone, NR_LRU_BASE + l); 30 __inc_zone_state(zone, NR_LRU_BASE + l);
31 mem_cgroup_add_lru_list(page, l);
31} 32}
32 33
33static inline void 34static inline void
@@ -35,6 +36,7 @@ del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
35{ 36{
36 list_del(&page->lru); 37 list_del(&page->lru);
37 __dec_zone_state(zone, NR_LRU_BASE + l); 38 __dec_zone_state(zone, NR_LRU_BASE + l);
39 mem_cgroup_del_lru_list(page, l);
38} 40}
39 41
40static inline void 42static inline void
@@ -54,6 +56,7 @@ del_page_from_lru(struct zone *zone, struct page *page)
54 l += page_is_file_cache(page); 56 l += page_is_file_cache(page);
55 } 57 }
56 __dec_zone_state(zone, NR_LRU_BASE + l); 58 __dec_zone_state(zone, NR_LRU_BASE + l);
59 mem_cgroup_del_lru_list(page, l);
57} 60}
58 61
59/** 62/**
@@ -78,23 +81,4 @@ static inline enum lru_list page_lru(struct page *page)
78 return lru; 81 return lru;
79} 82}
80 83
81/**
82 * inactive_anon_is_low - check if anonymous pages need to be deactivated
83 * @zone: zone to check
84 *
85 * Returns true if the zone does not have enough inactive anon pages,
86 * meaning some active anon pages need to be deactivated.
87 */
88static inline int inactive_anon_is_low(struct zone *zone)
89{
90 unsigned long active, inactive;
91
92 active = zone_page_state(zone, NR_ACTIVE_ANON);
93 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
94
95 if (inactive * zone->inactive_ratio < active)
96 return 1;
97
98 return 0;
99}
100#endif 84#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 35a7b5e19465..09c14e213b63 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -263,6 +263,19 @@ enum zone_type {
263#error ZONES_SHIFT -- too many zones configured adjust calculation 263#error ZONES_SHIFT -- too many zones configured adjust calculation
264#endif 264#endif
265 265
266struct zone_reclaim_stat {
267 /*
268 * The pageout code in vmscan.c keeps track of how many of the
269 * mem/swap backed and file backed pages are refeferenced.
270 * The higher the rotated/scanned ratio, the more valuable
271 * that cache is.
272 *
273 * The anon LRU stats live in [0], file LRU stats in [1]
274 */
275 unsigned long recent_rotated[2];
276 unsigned long recent_scanned[2];
277};
278
266struct zone { 279struct zone {
267 /* Fields commonly accessed by the page allocator */ 280 /* Fields commonly accessed by the page allocator */
268 unsigned long pages_min, pages_low, pages_high; 281 unsigned long pages_min, pages_low, pages_high;
@@ -315,16 +328,7 @@ struct zone {
315 unsigned long nr_scan; 328 unsigned long nr_scan;
316 } lru[NR_LRU_LISTS]; 329 } lru[NR_LRU_LISTS];
317 330
318 /* 331 struct zone_reclaim_stat reclaim_stat;
319 * The pageout code in vmscan.c keeps track of how many of the
320 * mem/swap backed and file backed pages are refeferenced.
321 * The higher the rotated/scanned ratio, the more valuable
322 * that cache is.
323 *
324 * The anon LRU stats live in [0], file LRU stats in [1]
325 */
326 unsigned long recent_rotated[2];
327 unsigned long recent_scanned[2];
328 332
329 unsigned long pages_scanned; /* since last reclaim */ 333 unsigned long pages_scanned; /* since last reclaim */
330 unsigned long flags; /* zone flags, see below */ 334 unsigned long flags; /* zone flags, see below */
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 00e2b575021f..88d3d8fbf9f2 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -520,6 +520,7 @@ struct cfi_fixup {
520 520
521#define CFI_MFR_AMD 0x0001 521#define CFI_MFR_AMD 0x0001
522#define CFI_MFR_ATMEL 0x001F 522#define CFI_MFR_ATMEL 0x001F
523#define CFI_MFR_SAMSUNG 0x00EC
523#define CFI_MFR_ST 0x0020 /* STMicroelectronics */ 524#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
524 525
525void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); 526void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
diff --git a/include/linux/mtd/ftl.h b/include/linux/mtd/ftl.h
index 0be442f881dd..0555f7a0b9ed 100644
--- a/include/linux/mtd/ftl.h
+++ b/include/linux/mtd/ftl.h
@@ -32,25 +32,25 @@
32#define _LINUX_FTL_H 32#define _LINUX_FTL_H
33 33
34typedef struct erase_unit_header_t { 34typedef struct erase_unit_header_t {
35 u_int8_t LinkTargetTuple[5]; 35 uint8_t LinkTargetTuple[5];
36 u_int8_t DataOrgTuple[10]; 36 uint8_t DataOrgTuple[10];
37 u_int8_t NumTransferUnits; 37 uint8_t NumTransferUnits;
38 u_int32_t EraseCount; 38 uint32_t EraseCount;
39 u_int16_t LogicalEUN; 39 uint16_t LogicalEUN;
40 u_int8_t BlockSize; 40 uint8_t BlockSize;
41 u_int8_t EraseUnitSize; 41 uint8_t EraseUnitSize;
42 u_int16_t FirstPhysicalEUN; 42 uint16_t FirstPhysicalEUN;
43 u_int16_t NumEraseUnits; 43 uint16_t NumEraseUnits;
44 u_int32_t FormattedSize; 44 uint32_t FormattedSize;
45 u_int32_t FirstVMAddress; 45 uint32_t FirstVMAddress;
46 u_int16_t NumVMPages; 46 uint16_t NumVMPages;
47 u_int8_t Flags; 47 uint8_t Flags;
48 u_int8_t Code; 48 uint8_t Code;
49 u_int32_t SerialNumber; 49 uint32_t SerialNumber;
50 u_int32_t AltEUHOffset; 50 uint32_t AltEUHOffset;
51 u_int32_t BAMOffset; 51 uint32_t BAMOffset;
52 u_int8_t Reserved[12]; 52 uint8_t Reserved[12];
53 u_int8_t EndTuple[2]; 53 uint8_t EndTuple[2];
54} erase_unit_header_t; 54} erase_unit_header_t;
55 55
56/* Flags in erase_unit_header_t */ 56/* Flags in erase_unit_header_t */
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index aa30244492c6..b981b8772217 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -223,6 +223,7 @@ struct map_info {
223 must leave it enabled. */ 223 must leave it enabled. */
224 void (*set_vpp)(struct map_info *, int); 224 void (*set_vpp)(struct map_info *, int);
225 225
226 unsigned long pfow_base;
226 unsigned long map_priv_1; 227 unsigned long map_priv_1;
227 unsigned long map_priv_2; 228 unsigned long map_priv_2;
228 void *fldrv_priv; 229 void *fldrv_priv;
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 64433eb411d7..3aa5d77c2cdb 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -15,6 +15,8 @@
15#include <linux/mtd/compatmac.h> 15#include <linux/mtd/compatmac.h>
16#include <mtd/mtd-abi.h> 16#include <mtd/mtd-abi.h>
17 17
18#include <asm/div64.h>
19
18#define MTD_CHAR_MAJOR 90 20#define MTD_CHAR_MAJOR 90
19#define MTD_BLOCK_MAJOR 31 21#define MTD_BLOCK_MAJOR 31
20#define MAX_MTD_DEVICES 32 22#define MAX_MTD_DEVICES 32
@@ -25,20 +27,20 @@
25#define MTD_ERASE_DONE 0x08 27#define MTD_ERASE_DONE 0x08
26#define MTD_ERASE_FAILED 0x10 28#define MTD_ERASE_FAILED 0x10
27 29
28#define MTD_FAIL_ADDR_UNKNOWN 0xffffffff 30#define MTD_FAIL_ADDR_UNKNOWN -1LL
29 31
30/* If the erase fails, fail_addr might indicate exactly which block failed. If 32/* If the erase fails, fail_addr might indicate exactly which block failed. If
31 fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level or was not 33 fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level or was not
32 specific to any particular block. */ 34 specific to any particular block. */
33struct erase_info { 35struct erase_info {
34 struct mtd_info *mtd; 36 struct mtd_info *mtd;
35 u_int32_t addr; 37 uint64_t addr;
36 u_int32_t len; 38 uint64_t len;
37 u_int32_t fail_addr; 39 uint64_t fail_addr;
38 u_long time; 40 u_long time;
39 u_long retries; 41 u_long retries;
40 u_int dev; 42 unsigned dev;
41 u_int cell; 43 unsigned cell;
42 void (*callback) (struct erase_info *self); 44 void (*callback) (struct erase_info *self);
43 u_long priv; 45 u_long priv;
44 u_char state; 46 u_char state;
@@ -46,9 +48,9 @@ struct erase_info {
46}; 48};
47 49
48struct mtd_erase_region_info { 50struct mtd_erase_region_info {
49 u_int32_t offset; /* At which this region starts, from the beginning of the MTD */ 51 uint64_t offset; /* At which this region starts, from the beginning of the MTD */
50 u_int32_t erasesize; /* For this region */ 52 uint32_t erasesize; /* For this region */
51 u_int32_t numblocks; /* Number of blocks of erasesize in this region */ 53 uint32_t numblocks; /* Number of blocks of erasesize in this region */
52 unsigned long *lockmap; /* If keeping bitmap of locks */ 54 unsigned long *lockmap; /* If keeping bitmap of locks */
53}; 55};
54 56
@@ -100,14 +102,14 @@ struct mtd_oob_ops {
100 102
101struct mtd_info { 103struct mtd_info {
102 u_char type; 104 u_char type;
103 u_int32_t flags; 105 uint32_t flags;
104 u_int32_t size; // Total size of the MTD 106 uint64_t size; // Total size of the MTD
105 107
106 /* "Major" erase size for the device. Naïve users may take this 108 /* "Major" erase size for the device. Naïve users may take this
107 * to be the only erase size available, or may use the more detailed 109 * to be the only erase size available, or may use the more detailed
108 * information below if they desire 110 * information below if they desire
109 */ 111 */
110 u_int32_t erasesize; 112 uint32_t erasesize;
111 /* Minimal writable flash unit size. In case of NOR flash it is 1 (even 113 /* Minimal writable flash unit size. In case of NOR flash it is 1 (even
112 * though individual bits can be cleared), in case of NAND flash it is 114 * though individual bits can be cleared), in case of NAND flash it is
113 * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR 115 * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR
@@ -115,10 +117,20 @@ struct mtd_info {
115 * Any driver registering a struct mtd_info must ensure a writesize of 117 * Any driver registering a struct mtd_info must ensure a writesize of
116 * 1 or larger. 118 * 1 or larger.
117 */ 119 */
118 u_int32_t writesize; 120 uint32_t writesize;
121
122 uint32_t oobsize; // Amount of OOB data per block (e.g. 16)
123 uint32_t oobavail; // Available OOB bytes per block
119 124
120 u_int32_t oobsize; // Amount of OOB data per block (e.g. 16) 125 /*
121 u_int32_t oobavail; // Available OOB bytes per block 126 * If erasesize is a power of 2 then the shift is stored in
127 * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize.
128 */
129 unsigned int erasesize_shift;
130 unsigned int writesize_shift;
131 /* Masks based on erasesize_shift and writesize_shift */
132 unsigned int erasesize_mask;
133 unsigned int writesize_mask;
122 134
123 // Kernel-only stuff starts here. 135 // Kernel-only stuff starts here.
124 const char *name; 136 const char *name;
@@ -190,8 +202,8 @@ struct mtd_info {
190 void (*sync) (struct mtd_info *mtd); 202 void (*sync) (struct mtd_info *mtd);
191 203
192 /* Chip-supported device locking */ 204 /* Chip-supported device locking */
193 int (*lock) (struct mtd_info *mtd, loff_t ofs, size_t len); 205 int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
194 int (*unlock) (struct mtd_info *mtd, loff_t ofs, size_t len); 206 int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
195 207
196 /* Power Management functions */ 208 /* Power Management functions */
197 int (*suspend) (struct mtd_info *mtd); 209 int (*suspend) (struct mtd_info *mtd);
@@ -221,6 +233,35 @@ struct mtd_info {
221 void (*put_device) (struct mtd_info *mtd); 233 void (*put_device) (struct mtd_info *mtd);
222}; 234};
223 235
236static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
237{
238 if (mtd->erasesize_shift)
239 return sz >> mtd->erasesize_shift;
240 do_div(sz, mtd->erasesize);
241 return sz;
242}
243
244static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
245{
246 if (mtd->erasesize_shift)
247 return sz & mtd->erasesize_mask;
248 return do_div(sz, mtd->erasesize);
249}
250
251static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
252{
253 if (mtd->writesize_shift)
254 return sz >> mtd->writesize_shift;
255 do_div(sz, mtd->writesize);
256 return sz;
257}
258
259static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
260{
261 if (mtd->writesize_shift)
262 return sz & mtd->writesize_mask;
263 return do_div(sz, mtd->writesize);
264}
224 265
225 /* Kernel-side ioctl definitions */ 266 /* Kernel-side ioctl definitions */
226 267
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 733d3f3b4eb8..db5b63da2a7e 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -335,17 +335,12 @@ struct nand_buffers {
335 * @erase_cmd: [INTERN] erase command write function, selectable due to AND support 335 * @erase_cmd: [INTERN] erase command write function, selectable due to AND support
336 * @scan_bbt: [REPLACEABLE] function to scan bad block table 336 * @scan_bbt: [REPLACEABLE] function to scan bad block table
337 * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR) 337 * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR)
338 * @wq: [INTERN] wait queue to sleep on if a NAND operation is in progress
339 * @state: [INTERN] the current state of the NAND device 338 * @state: [INTERN] the current state of the NAND device
340 * @oob_poi: poison value buffer 339 * @oob_poi: poison value buffer
341 * @page_shift: [INTERN] number of address bits in a page (column address bits) 340 * @page_shift: [INTERN] number of address bits in a page (column address bits)
342 * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock 341 * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock
343 * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry 342 * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
344 * @chip_shift: [INTERN] number of address bits in one chip 343 * @chip_shift: [INTERN] number of address bits in one chip
345 * @datbuf: [INTERN] internal buffer for one page + oob
346 * @oobbuf: [INTERN] oob buffer for one eraseblock
347 * @oobdirty: [INTERN] indicates that oob_buf must be reinitialized
348 * @data_poi: [INTERN] pointer to a data buffer
349 * @options: [BOARDSPECIFIC] various chip options. They can partly be set to inform nand_scan about 344 * @options: [BOARDSPECIFIC] various chip options. They can partly be set to inform nand_scan about
350 * special functionality. See the defines for further explanation 345 * special functionality. See the defines for further explanation
351 * @badblockpos: [INTERN] position of the bad block marker in the oob area 346 * @badblockpos: [INTERN] position of the bad block marker in the oob area
@@ -399,7 +394,7 @@ struct nand_chip {
399 int bbt_erase_shift; 394 int bbt_erase_shift;
400 int chip_shift; 395 int chip_shift;
401 int numchips; 396 int numchips;
402 unsigned long chipsize; 397 uint64_t chipsize;
403 int pagemask; 398 int pagemask;
404 int pagebuf; 399 int pagebuf;
405 int subpagesize; 400 int subpagesize;
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index c92b4d439609..a45dd831b3f8 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -36,9 +36,9 @@
36 36
37struct mtd_partition { 37struct mtd_partition {
38 char *name; /* identifier string */ 38 char *name; /* identifier string */
39 u_int32_t size; /* partition size */ 39 uint64_t size; /* partition size */
40 u_int32_t offset; /* offset within the master MTD space */ 40 uint64_t offset; /* offset within the master MTD space */
41 u_int32_t mask_flags; /* master MTD flags to mask out for this partition */ 41 uint32_t mask_flags; /* master MTD flags to mask out for this partition */
42 struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/ 42 struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/
43 struct mtd_info **mtdp; /* pointer to store the MTD object */ 43 struct mtd_info **mtdp; /* pointer to store the MTD object */
44}; 44};
diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h
new file mode 100644
index 000000000000..b730d4f84655
--- /dev/null
+++ b/include/linux/mtd/pfow.h
@@ -0,0 +1,159 @@
1/* Primary function overlay window definitions
2 * and service functions used by LPDDR chips
3 */
4#ifndef __LINUX_MTD_PFOW_H
5#define __LINUX_MTD_PFOW_H
6
7#include <linux/mtd/qinfo.h>
8
9/* PFOW registers addressing */
10/* Address of symbol "P" */
11#define PFOW_QUERY_STRING_P 0x0000
12/* Address of symbol "F" */
13#define PFOW_QUERY_STRING_F 0x0002
14/* Address of symbol "O" */
15#define PFOW_QUERY_STRING_O 0x0004
16/* Address of symbol "W" */
17#define PFOW_QUERY_STRING_W 0x0006
18/* Identification info for LPDDR chip */
19#define PFOW_MANUFACTURER_ID 0x0020
20#define PFOW_DEVICE_ID 0x0022
21/* Address in PFOW where prog buffer can can be found */
22#define PFOW_PROGRAM_BUFFER_OFFSET 0x0040
23/* Size of program buffer in words */
24#define PFOW_PROGRAM_BUFFER_SIZE 0x0042
25/* Address command code register */
26#define PFOW_COMMAND_CODE 0x0080
27/* command data register */
28#define PFOW_COMMAND_DATA 0x0084
29/* command address register lower address bits */
30#define PFOW_COMMAND_ADDRESS_L 0x0088
31/* command address register upper address bits */
32#define PFOW_COMMAND_ADDRESS_H 0x008a
33/* number of bytes to be proggrammed lower address bits */
34#define PFOW_DATA_COUNT_L 0x0090
35/* number of bytes to be proggrammed higher address bits */
36#define PFOW_DATA_COUNT_H 0x0092
37/* command execution register, the only possible value is 0x01 */
38#define PFOW_COMMAND_EXECUTE 0x00c0
39/* 0x01 should be written at this address to clear buffer */
40#define PFOW_CLEAR_PROGRAM_BUFFER 0x00c4
41/* device program/erase suspend register */
42#define PFOW_PROGRAM_ERASE_SUSPEND 0x00c8
43/* device status register */
44#define PFOW_DSR 0x00cc
45
46/* LPDDR memory device command codes */
47/* They are possible values of PFOW command code register */
48#define LPDDR_WORD_PROGRAM 0x0041
49#define LPDDR_BUFF_PROGRAM 0x00E9
50#define LPDDR_BLOCK_ERASE 0x0020
51#define LPDDR_LOCK_BLOCK 0x0061
52#define LPDDR_UNLOCK_BLOCK 0x0062
53#define LPDDR_READ_BLOCK_LOCK_STATUS 0x0065
54#define LPDDR_INFO_QUERY 0x0098
55#define LPDDR_READ_OTP 0x0097
56#define LPDDR_PROG_OTP 0x00C0
57#define LPDDR_RESUME 0x00D0
58
59/* Defines possible value of PFOW command execution register */
60#define LPDDR_START_EXECUTION 0x0001
61
62/* Defines possible value of PFOW program/erase suspend register */
63#define LPDDR_SUSPEND 0x0001
64
65/* Possible values of PFOW device status register */
66/* access R - read; RC read & clearable */
67#define DSR_DPS (1<<1) /* RC; device protect status
68 * 0 - not protected 1 - locked */
69#define DSR_PSS (1<<2) /* R; program suspend status;
70 * 0-prog in progress/completed,
71 * 1- prog suspended */
72#define DSR_VPPS (1<<3) /* RC; 0-Vpp OK, * 1-Vpp low */
73#define DSR_PROGRAM_STATUS (1<<4) /* RC; 0-successful, 1-error */
74#define DSR_ERASE_STATUS (1<<5) /* RC; erase or blank check status;
75 * 0-success erase/blank check,
76 * 1 blank check error */
77#define DSR_ESS (1<<6) /* R; erase suspend status;
78 * 0-erase in progress/complete,
79 * 1 erase suspended */
80#define DSR_READY_STATUS (1<<7) /* R; Device status
81 * 0-busy,
82 * 1-ready */
83#define DSR_RPS (0x3<<8) /* RC; region program status
84 * 00 - Success,
85 * 01-re-program attempt in region with
86 * object mode data,
87 * 10-object mode program w attempt in
88 * region with control mode data
89 * 11-attempt to program invalid half
90 * with 0x41 command */
91#define DSR_AOS (1<<12) /* RC; 1- AO related failure */
92#define DSR_AVAILABLE (1<<15) /* R; Device availbility
93 * 1 - Device available
94 * 0 - not available */
95
96/* The superset of all possible error bits in DSR */
97#define DSR_ERR 0x133A
98
99static inline void send_pfow_command(struct map_info *map,
100 unsigned long cmd_code, unsigned long adr,
101 unsigned long len, map_word *datum)
102{
103 int bits_per_chip = map_bankwidth(map) * 8;
104 int chipnum;
105 struct lpddr_private *lpddr = map->fldrv_priv;
106 chipnum = adr >> lpddr->chipshift;
107
108 map_write(map, CMD(cmd_code), map->pfow_base + PFOW_COMMAND_CODE);
109 map_write(map, CMD(adr & ((1<<bits_per_chip) - 1)),
110 map->pfow_base + PFOW_COMMAND_ADDRESS_L);
111 map_write(map, CMD(adr>>bits_per_chip),
112 map->pfow_base + PFOW_COMMAND_ADDRESS_H);
113 if (len) {
114 map_write(map, CMD(len & ((1<<bits_per_chip) - 1)),
115 map->pfow_base + PFOW_DATA_COUNT_L);
116 map_write(map, CMD(len>>bits_per_chip),
117 map->pfow_base + PFOW_DATA_COUNT_H);
118 }
119 if (datum)
120 map_write(map, *datum, map->pfow_base + PFOW_COMMAND_DATA);
121
122 /* Command execution start */
123 map_write(map, CMD(LPDDR_START_EXECUTION),
124 map->pfow_base + PFOW_COMMAND_EXECUTE);
125}
126
127static inline void print_drs_error(unsigned dsr)
128{
129 int prog_status = (dsr & DSR_RPS) >> 8;
130
131 if (!(dsr & DSR_AVAILABLE))
132 printk(KERN_NOTICE"DSR.15: (0) Device not Available\n");
133 if (prog_status & 0x03)
134 printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid "
135 "half with 41h command\n");
136 else if (prog_status & 0x02)
137 printk(KERN_NOTICE"DSR.9,8: (10) Object Mode Program attempt "
138 "in region with Control Mode data\n");
139 else if (prog_status & 0x01)
140 printk(KERN_NOTICE"DSR.9,8: (01) Program attempt in region "
141 "with Object Mode data\n");
142 if (!(dsr & DSR_READY_STATUS))
143 printk(KERN_NOTICE"DSR.7: (0) Device is Busy\n");
144 if (dsr & DSR_ESS)
145 printk(KERN_NOTICE"DSR.6: (1) Erase Suspended\n");
146 if (dsr & DSR_ERASE_STATUS)
147 printk(KERN_NOTICE"DSR.5: (1) Erase/Blank check error\n");
148 if (dsr & DSR_PROGRAM_STATUS)
149 printk(KERN_NOTICE"DSR.4: (1) Program Error\n");
150 if (dsr & DSR_VPPS)
151 printk(KERN_NOTICE"DSR.3: (1) Vpp low detect, operation "
152 "aborted\n");
153 if (dsr & DSR_PSS)
154 printk(KERN_NOTICE"DSR.2: (1) Program suspended\n");
155 if (dsr & DSR_DPS)
156 printk(KERN_NOTICE"DSR.1: (1) Aborted Erase/Program attempt "
157 "on locked block\n");
158}
159#endif /* __LINUX_MTD_PFOW_H */
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h
index c8e63a5ee72e..76f7cabf07d3 100644
--- a/include/linux/mtd/physmap.h
+++ b/include/linux/mtd/physmap.h
@@ -24,6 +24,7 @@ struct physmap_flash_data {
24 unsigned int width; 24 unsigned int width;
25 void (*set_vpp)(struct map_info *, int); 25 void (*set_vpp)(struct map_info *, int);
26 unsigned int nr_parts; 26 unsigned int nr_parts;
27 unsigned int pfow_base;
27 struct mtd_partition *parts; 28 struct mtd_partition *parts;
28}; 29};
29 30
diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h
new file mode 100644
index 000000000000..7b3d487d8b3f
--- /dev/null
+++ b/include/linux/mtd/qinfo.h
@@ -0,0 +1,91 @@
1#ifndef __LINUX_MTD_QINFO_H
2#define __LINUX_MTD_QINFO_H
3
4#include <linux/mtd/map.h>
5#include <linux/wait.h>
6#include <linux/spinlock.h>
7#include <linux/delay.h>
8#include <linux/mtd/mtd.h>
9#include <linux/mtd/flashchip.h>
10#include <linux/mtd/partitions.h>
11
12/* lpddr_private describes lpddr flash chip in memory map
13 * @ManufactId - Chip Manufacture ID
14 * @DevId - Chip Device ID
15 * @qinfo - pointer to qinfo records describing the chip
16 * @numchips - number of chips including virual RWW partitions
17 * @chipshift - Chip/partiton size 2^chipshift
18 * @chips - per-chip data structure
19 */
20struct lpddr_private {
21 uint16_t ManufactId;
22 uint16_t DevId;
23 struct qinfo_chip *qinfo;
24 int numchips;
25 unsigned long chipshift;
26 struct flchip chips[0];
27};
28
29/* qinfo_query_info structure contains request information for
30 * each qinfo record
31 * @major - major number of qinfo record
32 * @major - minor number of qinfo record
33 * @id_str - descriptive string to access the record
34 * @desc - detailed description for the qinfo record
35 */
36struct qinfo_query_info {
37 uint8_t major;
38 uint8_t minor;
39 char *id_str;
40 char *desc;
41};
42
43/*
44 * qinfo_chip structure contains necessary qinfo records data
45 * @DevSizeShift - Device size 2^n bytes
46 * @BufSizeShift - Program buffer size 2^n bytes
47 * @TotalBlocksNum - Total number of blocks
48 * @UniformBlockSizeShift - Uniform block size 2^UniformBlockSizeShift bytes
49 * @HWPartsNum - Number of hardware partitions
50 * @SuspEraseSupp - Suspend erase supported
51 * @SingleWordProgTime - Single word program 2^SingleWordProgTime u-sec
52 * @ProgBufferTime - Program buffer write 2^ProgBufferTime u-sec
53 * @BlockEraseTime - Block erase 2^BlockEraseTime m-sec
54 */
55struct qinfo_chip {
56 /* General device info */
57 uint16_t DevSizeShift;
58 uint16_t BufSizeShift;
59 /* Erase block information */
60 uint16_t TotalBlocksNum;
61 uint16_t UniformBlockSizeShift;
62 /* Partition information */
63 uint16_t HWPartsNum;
64 /* Optional features */
65 uint16_t SuspEraseSupp;
66 /* Operation typical time */
67 uint16_t SingleWordProgTime;
68 uint16_t ProgBufferTime;
69 uint16_t BlockEraseTime;
70};
71
72/* defines for fixup usage */
73#define LPDDR_MFR_ANY 0xffff
74#define LPDDR_ID_ANY 0xffff
75#define NUMONYX_MFGR_ID 0x0089
76#define R18_DEVICE_ID_1G 0x893c
77
78static inline map_word lpddr_build_cmd(u_long cmd, struct map_info *map)
79{
80 map_word val = { {0} };
81 val.x[0] = cmd;
82 return val;
83}
84
85#define CMD(x) lpddr_build_cmd(x, map)
86#define CMDVAL(cmd) cmd.x[0]
87
88struct mtd_info *lpddr_cmdset(struct map_info *);
89
90#endif
91
diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h
new file mode 100644
index 000000000000..25f4d2a845c1
--- /dev/null
+++ b/include/linux/mtd/sharpsl.h
@@ -0,0 +1,20 @@
1/*
2 * SharpSL NAND support
3 *
4 * Copyright (C) 2008 Dmitry Baryshkov
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/mtd/nand.h>
12#include <linux/mtd/nand_ecc.h>
13#include <linux/mtd/partitions.h>
14
15struct sharpsl_nand_platform_data {
16 struct nand_bbt_descr *badblock_pattern;
17 struct nand_ecclayout *ecc_layout;
18 struct mtd_partition *partitions;
19 unsigned int nr_partitions;
20};
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c28bbba3c23d..f24556813375 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1125,9 +1125,6 @@ struct softnet_data
1125 struct sk_buff *completion_queue; 1125 struct sk_buff *completion_queue;
1126 1126
1127 struct napi_struct backlog; 1127 struct napi_struct backlog;
1128#ifdef CONFIG_NET_DMA
1129 struct dma_chan *net_dma;
1130#endif
1131}; 1128};
1132 1129
1133DECLARE_PER_CPU(struct softnet_data,softnet_data); 1130DECLARE_PER_CPU(struct softnet_data,softnet_data);
@@ -1373,8 +1370,14 @@ extern int netif_rx_ni(struct sk_buff *skb);
1373#define HAVE_NETIF_RECEIVE_SKB 1 1370#define HAVE_NETIF_RECEIVE_SKB 1
1374extern int netif_receive_skb(struct sk_buff *skb); 1371extern int netif_receive_skb(struct sk_buff *skb);
1375extern void napi_gro_flush(struct napi_struct *napi); 1372extern void napi_gro_flush(struct napi_struct *napi);
1373extern int dev_gro_receive(struct napi_struct *napi,
1374 struct sk_buff *skb);
1376extern int napi_gro_receive(struct napi_struct *napi, 1375extern int napi_gro_receive(struct napi_struct *napi,
1377 struct sk_buff *skb); 1376 struct sk_buff *skb);
1377extern void napi_reuse_skb(struct napi_struct *napi,
1378 struct sk_buff *skb);
1379extern struct sk_buff * napi_fraginfo_skb(struct napi_struct *napi,
1380 struct napi_gro_fraginfo *info);
1378extern int napi_gro_frags(struct napi_struct *napi, 1381extern int napi_gro_frags(struct napi_struct *napi,
1379 struct napi_gro_fraginfo *info); 1382 struct napi_gro_fraginfo *info);
1380extern void netif_nit_deliver(struct sk_buff *skb); 1383extern void netif_nit_deliver(struct sk_buff *skb);
diff --git a/include/linux/nwpserial.h b/include/linux/nwpserial.h
new file mode 100644
index 000000000000..9acb21572eaf
--- /dev/null
+++ b/include/linux/nwpserial.h
@@ -0,0 +1,18 @@
1/*
2 * Serial Port driver for a NWP uart device
3 *
4 * Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12#ifndef _NWPSERIAL_H
13#define _NWPSERIAL_H
14
15int nwpserial_register_port(struct uart_port *port);
16void nwpserial_unregister_port(int line);
17
18#endif /* _NWPSERIAL_H */
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 1ce9fe572e51..1d9518bc4c58 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -164,4 +164,22 @@ void oprofile_put_buff(unsigned long *buf, unsigned int start,
164unsigned long oprofile_get_cpu_buffer_size(void); 164unsigned long oprofile_get_cpu_buffer_size(void);
165void oprofile_cpu_buffer_inc_smpl_lost(void); 165void oprofile_cpu_buffer_inc_smpl_lost(void);
166 166
167/* cpu buffer functions */
168
169struct op_sample;
170
171struct op_entry {
172 struct ring_buffer_event *event;
173 struct op_sample *sample;
174 unsigned long irq_flags;
175 unsigned long size;
176 unsigned long *data;
177};
178
179void oprofile_write_reserve(struct op_entry *entry,
180 struct pt_regs * const regs,
181 unsigned long pc, int code, int size);
182int oprofile_add_data(struct op_entry *entry, unsigned long val);
183int oprofile_write_commit(struct op_entry *entry);
184
167#endif /* OPROFILE_H */ 185#endif /* OPROFILE_H */
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 1e6d34bfa094..602cc1fdee90 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -26,10 +26,6 @@ enum {
26 PCG_LOCK, /* page cgroup is locked */ 26 PCG_LOCK, /* page cgroup is locked */
27 PCG_CACHE, /* charged as cache */ 27 PCG_CACHE, /* charged as cache */
28 PCG_USED, /* this object is in use. */ 28 PCG_USED, /* this object is in use. */
29 /* flags for LRU placement */
30 PCG_ACTIVE, /* page is active in this cgroup */
31 PCG_FILE, /* page is file system backed */
32 PCG_UNEVICTABLE, /* page is unevictableable */
33}; 29};
34 30
35#define TESTPCGFLAG(uname, lname) \ 31#define TESTPCGFLAG(uname, lname) \
@@ -50,19 +46,6 @@ TESTPCGFLAG(Cache, CACHE)
50TESTPCGFLAG(Used, USED) 46TESTPCGFLAG(Used, USED)
51CLEARPCGFLAG(Used, USED) 47CLEARPCGFLAG(Used, USED)
52 48
53/* LRU management flags (from global-lru definition) */
54TESTPCGFLAG(File, FILE)
55SETPCGFLAG(File, FILE)
56CLEARPCGFLAG(File, FILE)
57
58TESTPCGFLAG(Active, ACTIVE)
59SETPCGFLAG(Active, ACTIVE)
60CLEARPCGFLAG(Active, ACTIVE)
61
62TESTPCGFLAG(Unevictable, UNEVICTABLE)
63SETPCGFLAG(Unevictable, UNEVICTABLE)
64CLEARPCGFLAG(Unevictable, UNEVICTABLE)
65
66static inline int page_cgroup_nid(struct page_cgroup *pc) 49static inline int page_cgroup_nid(struct page_cgroup *pc)
67{ 50{
68 return page_to_nid(pc->page); 51 return page_to_nid(pc->page);
@@ -105,4 +88,39 @@ static inline void page_cgroup_init(void)
105} 88}
106 89
107#endif 90#endif
91
92#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
93#include <linux/swap.h>
94extern struct mem_cgroup *
95swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem);
96extern struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent);
97extern int swap_cgroup_swapon(int type, unsigned long max_pages);
98extern void swap_cgroup_swapoff(int type);
99#else
100#include <linux/swap.h>
101
102static inline
103struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem)
104{
105 return NULL;
106}
107
108static inline
109struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent)
110{
111 return NULL;
112}
113
114static inline int
115swap_cgroup_swapon(int type, unsigned long max_pages)
116{
117 return 0;
118}
119
120static inline void swap_cgroup_swapoff(int type)
121{
122 return;
123}
124
125#endif
108#endif 126#endif
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index f7cc204fab07..20998746518e 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -223,7 +223,6 @@ struct hotplug_params {
223#ifdef CONFIG_ACPI 223#ifdef CONFIG_ACPI
224#include <acpi/acpi.h> 224#include <acpi/acpi.h>
225#include <acpi/acpi_bus.h> 225#include <acpi/acpi_bus.h>
226#include <acpi/actypes.h>
227extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, 226extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
228 struct hotplug_params *hpp); 227 struct hotplug_params *hpp);
229int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags); 228int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags);
diff --git a/include/linux/pid.h b/include/linux/pid.h
index bb206c56d1f0..49f1c2f66e95 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -123,6 +123,24 @@ extern struct pid *alloc_pid(struct pid_namespace *ns);
123extern void free_pid(struct pid *pid); 123extern void free_pid(struct pid *pid);
124 124
125/* 125/*
126 * ns_of_pid() returns the pid namespace in which the specified pid was
127 * allocated.
128 *
129 * NOTE:
130 * ns_of_pid() is expected to be called for a process (task) that has
131 * an attached 'struct pid' (see attach_pid(), detach_pid()) i.e @pid
132 * is expected to be non-NULL. If @pid is NULL, caller should handle
133 * the resulting NULL pid-ns.
134 */
135static inline struct pid_namespace *ns_of_pid(struct pid *pid)
136{
137 struct pid_namespace *ns = NULL;
138 if (pid)
139 ns = pid->numbers[pid->level].ns;
140 return ns;
141}
142
143/*
126 * the helpers to get the pid's id seen from different namespaces 144 * the helpers to get the pid's id seen from different namespaces
127 * 145 *
128 * pid_nr() : global id, i.e. the id seen from the init namespace; 146 * pid_nr() : global id, i.e. the id seen from the init namespace;
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index d82fe825d62f..38d10326246a 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -79,11 +79,7 @@ static inline void zap_pid_ns_processes(struct pid_namespace *ns)
79} 79}
80#endif /* CONFIG_PID_NS */ 80#endif /* CONFIG_PID_NS */
81 81
82static inline struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) 82extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk);
83{
84 return tsk->nsproxy->pid_ns;
85}
86
87void pidhash_init(void); 83void pidhash_init(void);
88void pidmap_init(void); 84void pidmap_init(void);
89 85
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index 8fc909ef6787..9743e4dbc918 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -137,6 +137,9 @@ struct mddev_s
137 struct gendisk *gendisk; 137 struct gendisk *gendisk;
138 138
139 struct kobject kobj; 139 struct kobject kobj;
140 int hold_active;
141#define UNTIL_IOCTL 1
142#define UNTIL_STOP 2
140 143
141 /* Superblock information */ 144 /* Superblock information */
142 int major_version, 145 int major_version,
@@ -215,6 +218,9 @@ struct mddev_s
215#define MD_RECOVERY_FROZEN 9 218#define MD_RECOVERY_FROZEN 9
216 219
217 unsigned long recovery; 220 unsigned long recovery;
221 int recovery_disabled; /* if we detect that recovery
222 * will always fail, set this
223 * so we don't loop trying */
218 224
219 int in_sync; /* know to not need resync */ 225 int in_sync; /* know to not need resync */
220 struct mutex reconfig_mutex; 226 struct mutex reconfig_mutex;
@@ -244,6 +250,9 @@ struct mddev_s
244 struct sysfs_dirent *sysfs_state; /* handle for 'array_state' 250 struct sysfs_dirent *sysfs_state; /* handle for 'array_state'
245 * file in sysfs. 251 * file in sysfs.
246 */ 252 */
253 struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */
254
255 struct work_struct del_work; /* used for delayed sysfs removal */
247 256
248 spinlock_t write_lock; 257 spinlock_t write_lock;
249 wait_queue_head_t sb_wait; /* for waiting on superblock updates */ 258 wait_queue_head_t sb_wait; /* for waiting on superblock updates */
@@ -334,17 +343,14 @@ static inline char * mdname (mddev_t * mddev)
334 * iterates through some rdev ringlist. It's safe to remove the 343 * iterates through some rdev ringlist. It's safe to remove the
335 * current 'rdev'. Dont touch 'tmp' though. 344 * current 'rdev'. Dont touch 'tmp' though.
336 */ 345 */
337#define rdev_for_each_list(rdev, tmp, list) \ 346#define rdev_for_each_list(rdev, tmp, head) \
338 \ 347 list_for_each_entry_safe(rdev, tmp, head, same_set)
339 for ((tmp) = (list).next; \ 348
340 (rdev) = (list_entry((tmp), mdk_rdev_t, same_set)), \
341 (tmp) = (tmp)->next, (tmp)->prev != &(list) \
342 ; )
343/* 349/*
344 * iterates through the 'same array disks' ringlist 350 * iterates through the 'same array disks' ringlist
345 */ 351 */
346#define rdev_for_each(rdev, tmp, mddev) \ 352#define rdev_for_each(rdev, tmp, mddev) \
347 rdev_for_each_list(rdev, tmp, (mddev)->disks) 353 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
348 354
349#define rdev_for_each_rcu(rdev, mddev) \ 355#define rdev_for_each_rcu(rdev, mddev) \
350 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) 356 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h
index 8b4de4a41ff1..9491026afe66 100644
--- a/include/linux/raid/md_p.h
+++ b/include/linux/raid/md_p.h
@@ -194,6 +194,8 @@ static inline __u64 md_event(mdp_super_t *sb) {
194 return (ev<<32)| sb->events_lo; 194 return (ev<<32)| sb->events_lo;
195} 195}
196 196
197#define MD_SUPERBLOCK_1_TIME_SEC_MASK ((1ULL<<40) - 1)
198
197/* 199/*
198 * The version-1 superblock : 200 * The version-1 superblock :
199 * All numeric fields are little-endian. 201 * All numeric fields are little-endian.
diff --git a/include/linux/raid/raid0.h b/include/linux/raid/raid0.h
index 1b2dda035f8e..fd42aa87c391 100644
--- a/include/linux/raid/raid0.h
+++ b/include/linux/raid/raid0.h
@@ -5,9 +5,9 @@
5 5
6struct strip_zone 6struct strip_zone
7{ 7{
8 sector_t zone_offset; /* Zone offset in md_dev */ 8 sector_t zone_start; /* Zone offset in md_dev (in sectors) */
9 sector_t dev_offset; /* Zone offset in real dev */ 9 sector_t dev_start; /* Zone offset in real dev (in sectors) */
10 sector_t size; /* Zone size */ 10 sector_t sectors; /* Zone size in sectors */
11 int nb_dev; /* # of devices attached to the zone */ 11 int nb_dev; /* # of devices attached to the zone */
12 mdk_rdev_t **dev; /* Devices attached to the zone */ 12 mdk_rdev_t **dev; /* Devices attached to the zone */
13}; 13};
@@ -19,8 +19,8 @@ struct raid0_private_data
19 mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */ 19 mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
20 int nr_strip_zones; 20 int nr_strip_zones;
21 21
22 sector_t hash_spacing; 22 sector_t spacing;
23 int preshift; /* shift this before divide by hash_spacing */ 23 int sector_shift; /* shift this before divide by spacing */
24}; 24};
25 25
26typedef struct raid0_private_data raid0_conf_t; 26typedef struct raid0_private_data raid0_conf_t;
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index afdc4558bb94..801bf77ff4e2 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -104,10 +104,10 @@ struct regulator;
104/** 104/**
105 * struct regulator_bulk_data - Data used for bulk regulator operations. 105 * struct regulator_bulk_data - Data used for bulk regulator operations.
106 * 106 *
107 * @supply The name of the supply. Initialised by the user before 107 * @supply: The name of the supply. Initialised by the user before
108 * using the bulk regulator APIs. 108 * using the bulk regulator APIs.
109 * @consumer The regulator consumer for the supply. This will be managed 109 * @consumer: The regulator consumer for the supply. This will be managed
110 * by the bulk API. 110 * by the bulk API.
111 * 111 *
112 * The regulator APIs provide a series of regulator_bulk_() API calls as 112 * The regulator APIs provide a series of regulator_bulk_() API calls as
113 * a convenience to consumers which require multiple supplies. This 113 * a convenience to consumers which require multiple supplies. This
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index e37d80561985..2dae05705f13 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -24,7 +24,33 @@ struct regulator_init_data;
24/** 24/**
25 * struct regulator_ops - regulator operations. 25 * struct regulator_ops - regulator operations.
26 * 26 *
27 * This struct describes regulator operations. 27 * This struct describes regulator operations which can be implemented by
28 * regulator chip drivers.
29 *
30 * @enable: Enable the regulator.
31 * @disable: Disable the regulator.
32 * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise.
33 *
34 * @set_voltage: Set the voltage for the regulator within the range specified.
35 * The driver should select the voltage closest to min_uV.
36 * @get_voltage: Return the currently configured voltage for the regulator.
37 *
38 * @set_current_limit: Configure a limit for a current-limited regulator.
39 * @get_current_limit: Get the limit for a current-limited regulator.
40 *
41 * @set_mode: Set the operating mode for the regulator.
42 * @get_mode: Get the current operating mode for the regulator.
43 * @get_optimum_mode: Get the most efficient operating mode for the regulator
44 * when running with the specified parameters.
45 *
46 * @set_suspend_voltage: Set the voltage for the regulator when the system
47 * is suspended.
48 * @set_suspend_enable: Mark the regulator as enabled when the system is
49 * suspended.
50 * @set_suspend_disable: Mark the regulator as disabled when the system is
51 * suspended.
52 * @set_suspend_mode: Set the operating mode for the regulator when the
53 * system is suspended.
28 */ 54 */
29struct regulator_ops { 55struct regulator_ops {
30 56
@@ -75,6 +101,15 @@ enum regulator_type {
75/** 101/**
76 * struct regulator_desc - Regulator descriptor 102 * struct regulator_desc - Regulator descriptor
77 * 103 *
104 * Each regulator registered with the core is described with a structure of
105 * this type.
106 *
107 * @name: Identifying name for the regulator.
108 * @id: Numerical identifier for the regulator.
109 * @ops: Regulator operations table.
110 * @irq: Interrupt number for the regulator.
111 * @type: Indicates if the regulator is a voltage or current regulator.
112 * @owner: Module providing the regulator, used for refcounting.
78 */ 113 */
79struct regulator_desc { 114struct regulator_desc {
80 const char *name; 115 const char *name;
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index c6d69331a81e..3794773b23d2 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -44,6 +44,10 @@ struct regulator;
44 * struct regulator_state - regulator state during low power syatem states 44 * struct regulator_state - regulator state during low power syatem states
45 * 45 *
46 * This describes a regulators state during a system wide low power state. 46 * This describes a regulators state during a system wide low power state.
47 *
48 * @uV: Operating voltage during suspend.
49 * @mode: Operating mode during suspend.
50 * @enabled: Enabled during suspend.
47 */ 51 */
48struct regulator_state { 52struct regulator_state {
49 int uV; /* suspend voltage */ 53 int uV; /* suspend voltage */
@@ -55,6 +59,30 @@ struct regulator_state {
55 * struct regulation_constraints - regulator operating constraints. 59 * struct regulation_constraints - regulator operating constraints.
56 * 60 *
57 * This struct describes regulator and board/machine specific constraints. 61 * This struct describes regulator and board/machine specific constraints.
62 *
63 * @name: Descriptive name for the constraints, used for display purposes.
64 *
65 * @min_uV: Smallest voltage consumers may set.
66 * @max_uV: Largest voltage consumers may set.
67 *
68 * @min_uA: Smallest consumers consumers may set.
69 * @max_uA: Largest current consumers may set.
70 *
71 * @valid_modes_mask: Mask of modes which may be configured by consumers.
72 * @valid_ops_mask: Operations which may be performed by consumers.
73 *
74 * @always_on: Set if the regulator should never be disabled.
75 * @boot_on: Set if the regulator is enabled when the system is initially
76 * started.
77 * @apply_uV: Apply the voltage constraint when initialising.
78 *
79 * @input_uV: Input voltage for regulator when supplied by another regulator.
80 *
81 * @state_disk: State for regulator when system is suspended in disk mode.
82 * @state_mem: State for regulator when system is suspended in mem mode.
83 * @state_standby: State for regulator when system is suspended in standby
84 * mode.
85 * @initial_state: Suspend state to set by default.
58 */ 86 */
59struct regulation_constraints { 87struct regulation_constraints {
60 88
@@ -93,6 +121,9 @@ struct regulation_constraints {
93 * struct regulator_consumer_supply - supply -> device mapping 121 * struct regulator_consumer_supply - supply -> device mapping
94 * 122 *
95 * This maps a supply name to a device. 123 * This maps a supply name to a device.
124 *
125 * @dev: Device structure for the consumer.
126 * @supply: Name for the supply.
96 */ 127 */
97struct regulator_consumer_supply { 128struct regulator_consumer_supply {
98 struct device *dev; /* consumer */ 129 struct device *dev; /* consumer */
@@ -103,6 +134,16 @@ struct regulator_consumer_supply {
103 * struct regulator_init_data - regulator platform initialisation data. 134 * struct regulator_init_data - regulator platform initialisation data.
104 * 135 *
105 * Initialisation constraints, our supply and consumers supplies. 136 * Initialisation constraints, our supply and consumers supplies.
137 *
138 * @supply_regulator_dev: Parent regulator (if any).
139 *
140 * @constraints: Constraints. These must be specified for the regulator to
141 * be usable.
142 * @num_consumer_supplies: Number of consumer device supplies.
143 * @consumer_supplies: Consumer device supply configuration.
144 *
145 * @regulator_init: Callback invoked when the regulator has been registered.
146 * @driver_data: Data passed to regulator_init.
106 */ 147 */
107struct regulator_init_data { 148struct regulator_init_data {
108 struct device *supply_regulator_dev; /* or NULL for LINE */ 149 struct device *supply_regulator_dev; /* or NULL for LINE */
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index 271c1c2c9f6f..dede0a2cfc45 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -43,6 +43,10 @@ struct res_counter {
43 * the routines below consider this to be IRQ-safe 43 * the routines below consider this to be IRQ-safe
44 */ 44 */
45 spinlock_t lock; 45 spinlock_t lock;
46 /*
47 * Parent counter, used for hierarchial resource accounting
48 */
49 struct res_counter *parent;
46}; 50};
47 51
48/** 52/**
@@ -87,7 +91,7 @@ enum {
87 * helpers for accounting 91 * helpers for accounting
88 */ 92 */
89 93
90void res_counter_init(struct res_counter *counter); 94void res_counter_init(struct res_counter *counter, struct res_counter *parent);
91 95
92/* 96/*
93 * charge - try to consume more resource. 97 * charge - try to consume more resource.
@@ -103,7 +107,7 @@ void res_counter_init(struct res_counter *counter);
103int __must_check res_counter_charge_locked(struct res_counter *counter, 107int __must_check res_counter_charge_locked(struct res_counter *counter,
104 unsigned long val); 108 unsigned long val);
105int __must_check res_counter_charge(struct res_counter *counter, 109int __must_check res_counter_charge(struct res_counter *counter,
106 unsigned long val); 110 unsigned long val, struct res_counter **limit_fail_at);
107 111
108/* 112/*
109 * uncharge - tell that some portion of the resource is released 113 * uncharge - tell that some portion of the resource is released
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index b4199841f1fc..90bbbf0b1161 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -161,6 +161,9 @@
161 161
162#define PORT_S3C6400 84 162#define PORT_S3C6400 84
163 163
164/* NWPSERIAL */
165#define PORT_NWPSERIAL 85
166
164#ifdef __KERNEL__ 167#ifdef __KERNEL__
165 168
166#include <linux/compiler.h> 169#include <linux/compiler.h>
diff --git a/include/linux/spi/tdo24m.h b/include/linux/spi/tdo24m.h
new file mode 100644
index 000000000000..7572d4e1fe76
--- /dev/null
+++ b/include/linux/spi/tdo24m.h
@@ -0,0 +1,13 @@
1#ifndef __TDO24M_H__
2#define __TDO24M_H__
3
4enum tdo24m_model {
5 TDO24M,
6 TDO35S,
7};
8
9struct tdo24m_platform_data {
10 enum tdo24m_model model;
11};
12
13#endif /* __TDO24M_H__ */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 2ce8207686e2..2b409c44db83 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -232,6 +232,11 @@ extern unsigned long get_safe_page(gfp_t gfp_mask);
232 232
233extern void hibernation_set_ops(struct platform_hibernation_ops *ops); 233extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
234extern int hibernate(void); 234extern int hibernate(void);
235extern int hibernate_nvs_register(unsigned long start, unsigned long size);
236extern int hibernate_nvs_alloc(void);
237extern void hibernate_nvs_free(void);
238extern void hibernate_nvs_save(void);
239extern void hibernate_nvs_restore(void);
235#else /* CONFIG_HIBERNATION */ 240#else /* CONFIG_HIBERNATION */
236static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } 241static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
237static inline void swsusp_set_page_free(struct page *p) {} 242static inline void swsusp_set_page_free(struct page *p) {}
@@ -239,6 +244,14 @@ static inline void swsusp_unset_page_free(struct page *p) {}
239 244
240static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {} 245static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
241static inline int hibernate(void) { return -ENOSYS; } 246static inline int hibernate(void) { return -ENOSYS; }
247static inline int hibernate_nvs_register(unsigned long a, unsigned long b)
248{
249 return 0;
250}
251static inline int hibernate_nvs_alloc(void) { return 0; }
252static inline void hibernate_nvs_free(void) {}
253static inline void hibernate_nvs_save(void) {}
254static inline void hibernate_nvs_restore(void) {}
242#endif /* CONFIG_HIBERNATION */ 255#endif /* CONFIG_HIBERNATION */
243 256
244#ifdef CONFIG_PM_SLEEP 257#ifdef CONFIG_PM_SLEEP
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 91dee50fe260..d30215578877 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -214,7 +214,8 @@ static inline void lru_cache_add_active_file(struct page *page)
214extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 214extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
215 gfp_t gfp_mask); 215 gfp_t gfp_mask);
216extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, 216extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
217 gfp_t gfp_mask); 217 gfp_t gfp_mask, bool noswap,
218 unsigned int swappiness);
218extern int __isolate_lru_page(struct page *page, int mode, int file); 219extern int __isolate_lru_page(struct page *page, int mode, int file);
219extern unsigned long shrink_all_memory(unsigned long nr_pages); 220extern unsigned long shrink_all_memory(unsigned long nr_pages);
220extern int vm_swappiness; 221extern int vm_swappiness;
@@ -333,6 +334,22 @@ static inline void disable_swap_token(void)
333 put_swap_token(swap_token_mm); 334 put_swap_token(swap_token_mm);
334} 335}
335 336
337#ifdef CONFIG_CGROUP_MEM_RES_CTLR
338extern void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent);
339#else
340static inline void
341mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
342{
343}
344#endif
345#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
346extern void mem_cgroup_uncharge_swap(swp_entry_t ent);
347#else
348static inline void mem_cgroup_uncharge_swap(swp_entry_t ent)
349{
350}
351#endif
352
336#else /* CONFIG_SWAP */ 353#else /* CONFIG_SWAP */
337 354
338#define nr_swap_pages 0L 355#define nr_swap_pages 0L
@@ -409,6 +426,12 @@ static inline swp_entry_t get_swap_page(void)
409#define has_swap_token(x) 0 426#define has_swap_token(x) 0
410#define disable_swap_token() do { } while(0) 427#define disable_swap_token() do { } while(0)
411 428
429static inline int mem_cgroup_cache_charge_swapin(struct page *page,
430 struct mm_struct *mm, gfp_t mask, bool locked)
431{
432 return 0;
433}
434
412#endif /* CONFIG_SWAP */ 435#endif /* CONFIG_SWAP */
413#endif /* __KERNEL__*/ 436#endif /* __KERNEL__*/
414#endif /* _LINUX_SWAP_H */ 437#endif /* _LINUX_SWAP_H */
diff --git a/include/net/netdma.h b/include/net/netdma.h
index f28c6e064e8f..8ba8ce284eeb 100644
--- a/include/net/netdma.h
+++ b/include/net/netdma.h
@@ -24,17 +24,6 @@
24#include <linux/dmaengine.h> 24#include <linux/dmaengine.h>
25#include <linux/skbuff.h> 25#include <linux/skbuff.h>
26 26
27static inline struct dma_chan *get_softnet_dma(void)
28{
29 struct dma_chan *chan;
30 rcu_read_lock();
31 chan = rcu_dereference(__get_cpu_var(softnet_data).net_dma);
32 if (chan)
33 dma_chan_get(chan);
34 rcu_read_unlock();
35 return chan;
36}
37
38int dma_skb_copy_datagram_iovec(struct dma_chan* chan, 27int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
39 struct sk_buff *skb, int offset, struct iovec *to, 28 struct sk_buff *skb, int offset, struct iovec *to,
40 size_t len, struct dma_pinned_list *pinned_list); 29 size_t len, struct dma_pinned_list *pinned_list);
diff --git a/include/net/protocol.h b/include/net/protocol.h
index cb2965aa1b62..ffa5b8b1f1df 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -59,6 +59,9 @@ struct inet6_protocol
59 int (*gso_send_check)(struct sk_buff *skb); 59 int (*gso_send_check)(struct sk_buff *skb);
60 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 60 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
61 int features); 61 int features);
62 struct sk_buff **(*gro_receive)(struct sk_buff **head,
63 struct sk_buff *skb);
64 int (*gro_complete)(struct sk_buff *skb);
62 65
63 unsigned int flags; /* INET6_PROTO_xxx */ 66 unsigned int flags; /* INET6_PROTO_xxx */
64}; 67};
diff --git a/include/net/wimax.h b/include/net/wimax.h
index 1602614fdaf9..073809ce94f8 100644
--- a/include/net/wimax.h
+++ b/include/net/wimax.h
@@ -323,6 +323,9 @@ struct input_dev;
323 * 323 *
324 * @rf_hw: [private] State of the hardware radio switch (OFF/ON) 324 * @rf_hw: [private] State of the hardware radio switch (OFF/ON)
325 * 325 *
326 * @debufs_dentry: [private] Used to hook up a debugfs entry. This
327 * shows up in the debugfs root as wimax:DEVICENAME.
328 *
326 * Description: 329 * Description:
327 * This structure defines a common interface to access all WiMAX 330 * This structure defines a common interface to access all WiMAX
328 * devices from different vendors and provides a common API as well as 331 * devices from different vendors and provides a common API as well as
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 6e04e6fe79c7..c9184f756cad 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -358,6 +358,7 @@ struct fc_rport { /* aka fc_starget_attrs */
358#define FC_RPORT_DEVLOSS_PENDING 0x01 358#define FC_RPORT_DEVLOSS_PENDING 0x01
359#define FC_RPORT_SCAN_PENDING 0x02 359#define FC_RPORT_SCAN_PENDING 0x02
360#define FC_RPORT_FAST_FAIL_TIMEDOUT 0x04 360#define FC_RPORT_FAST_FAIL_TIMEDOUT 0x04
361#define FC_RPORT_DEVLOSS_CALLBK_DONE 0x08
361 362
362#define dev_to_rport(d) \ 363#define dev_to_rport(d) \
363 container_of(d, struct fc_rport, dev) 364 container_of(d, struct fc_rport, dev)
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 6369d89c25d5..f87f9614844d 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -136,8 +136,6 @@ struct xenbus_transaction
136/* Nil transaction ID. */ 136/* Nil transaction ID. */
137#define XBT_NIL ((struct xenbus_transaction) { 0 }) 137#define XBT_NIL ((struct xenbus_transaction) { 0 })
138 138
139int __init xenbus_dev_init(void);
140
141char **xenbus_directory(struct xenbus_transaction t, 139char **xenbus_directory(struct xenbus_transaction t,
142 const char *dir, const char *node, unsigned int *num); 140 const char *dir, const char *node, unsigned int *num);
143void *xenbus_read(struct xenbus_transaction t, 141void *xenbus_read(struct xenbus_transaction t,
diff --git a/init/Kconfig b/init/Kconfig
index e7893b1d3e42..a724a149bf3f 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -271,59 +271,6 @@ config LOG_BUF_SHIFT
271 13 => 8 KB 271 13 => 8 KB
272 12 => 4 KB 272 12 => 4 KB
273 273
274config CGROUPS
275 bool "Control Group support"
276 help
277 This option will let you use process cgroup subsystems
278 such as Cpusets
279
280 Say N if unsure.
281
282config CGROUP_DEBUG
283 bool "Example debug cgroup subsystem"
284 depends on CGROUPS
285 default n
286 help
287 This option enables a simple cgroup subsystem that
288 exports useful debugging information about the cgroups
289 framework
290
291 Say N if unsure
292
293config CGROUP_NS
294 bool "Namespace cgroup subsystem"
295 depends on CGROUPS
296 help
297 Provides a simple namespace cgroup subsystem to
298 provide hierarchical naming of sets of namespaces,
299 for instance virtual servers and checkpoint/restart
300 jobs.
301
302config CGROUP_FREEZER
303 bool "control group freezer subsystem"
304 depends on CGROUPS
305 help
306 Provides a way to freeze and unfreeze all tasks in a
307 cgroup.
308
309config CGROUP_DEVICE
310 bool "Device controller for cgroups"
311 depends on CGROUPS && EXPERIMENTAL
312 help
313 Provides a cgroup implementing whitelists for devices which
314 a process in the cgroup can mknod or open.
315
316config CPUSETS
317 bool "Cpuset support"
318 depends on SMP && CGROUPS
319 help
320 This option will let you create and manage CPUSETs which
321 allow dynamically partitioning a system into sets of CPUs and
322 Memory Nodes and assigning tasks to run only within those sets.
323 This is primarily useful on large SMP or NUMA systems.
324
325 Say N if unsure.
326
327# 274#
328# Architectures with an unreliable sched_clock() should select this: 275# Architectures with an unreliable sched_clock() should select this:
329# 276#
@@ -337,6 +284,8 @@ config GROUP_SCHED
337 help 284 help
338 This feature lets CPU scheduler recognize task groups and control CPU 285 This feature lets CPU scheduler recognize task groups and control CPU
339 bandwidth allocation to such task groups. 286 bandwidth allocation to such task groups.
287 In order to create a group from arbitrary set of processes, use
288 CONFIG_CGROUPS. (See Control Group support.)
340 289
341config FAIR_GROUP_SCHED 290config FAIR_GROUP_SCHED
342 bool "Group scheduling for SCHED_OTHER" 291 bool "Group scheduling for SCHED_OTHER"
@@ -379,6 +328,66 @@ config CGROUP_SCHED
379 328
380endchoice 329endchoice
381 330
331menu "Control Group support"
332config CGROUPS
333 bool "Control Group support"
334 help
335 This option add support for grouping sets of processes together, for
336 use with process control subsystems such as Cpusets, CFS, memory
337 controls or device isolation.
338 See
339 - Documentation/cpusets.txt (Cpusets)
340 - Documentation/scheduler/sched-design-CFS.txt (CFS)
341 - Documentation/cgroups/ (features for grouping, isolation)
342 - Documentation/controllers/ (features for resource control)
343
344 Say N if unsure.
345
346config CGROUP_DEBUG
347 bool "Example debug cgroup subsystem"
348 depends on CGROUPS
349 default n
350 help
351 This option enables a simple cgroup subsystem that
352 exports useful debugging information about the cgroups
353 framework
354
355 Say N if unsure
356
357config CGROUP_NS
358 bool "Namespace cgroup subsystem"
359 depends on CGROUPS
360 help
361 Provides a simple namespace cgroup subsystem to
362 provide hierarchical naming of sets of namespaces,
363 for instance virtual servers and checkpoint/restart
364 jobs.
365
366config CGROUP_FREEZER
367 bool "control group freezer subsystem"
368 depends on CGROUPS
369 help
370 Provides a way to freeze and unfreeze all tasks in a
371 cgroup.
372
373config CGROUP_DEVICE
374 bool "Device controller for cgroups"
375 depends on CGROUPS && EXPERIMENTAL
376 help
377 Provides a cgroup implementing whitelists for devices which
378 a process in the cgroup can mknod or open.
379
380config CPUSETS
381 bool "Cpuset support"
382 depends on SMP && CGROUPS
383 help
384 This option will let you create and manage CPUSETs which
385 allow dynamically partitioning a system into sets of CPUs and
386 Memory Nodes and assigning tasks to run only within those sets.
387 This is primarily useful on large SMP or NUMA systems.
388
389 Say N if unsure.
390
382config CGROUP_CPUACCT 391config CGROUP_CPUACCT
383 bool "Simple CPU accounting cgroup subsystem" 392 bool "Simple CPU accounting cgroup subsystem"
384 depends on CGROUPS 393 depends on CGROUPS
@@ -393,9 +402,6 @@ config RESOURCE_COUNTERS
393 infrastructure that works with cgroups 402 infrastructure that works with cgroups
394 depends on CGROUPS 403 depends on CGROUPS
395 404
396config MM_OWNER
397 bool
398
399config CGROUP_MEM_RES_CTLR 405config CGROUP_MEM_RES_CTLR
400 bool "Memory Resource Controller for Control Groups" 406 bool "Memory Resource Controller for Control Groups"
401 depends on CGROUPS && RESOURCE_COUNTERS 407 depends on CGROUPS && RESOURCE_COUNTERS
@@ -414,11 +420,33 @@ config CGROUP_MEM_RES_CTLR
414 sure you need the memory resource controller. Even when you enable 420 sure you need the memory resource controller. Even when you enable
415 this, you can set "cgroup_disable=memory" at your boot option to 421 this, you can set "cgroup_disable=memory" at your boot option to
416 disable memory resource controller and you can avoid overheads. 422 disable memory resource controller and you can avoid overheads.
417 (and lose benefits of memory resource contoller) 423 (and lose benefits of memory resource controller)
418 424
419 This config option also selects MM_OWNER config option, which 425 This config option also selects MM_OWNER config option, which
420 could in turn add some fork/exit overhead. 426 could in turn add some fork/exit overhead.
421 427
428config MM_OWNER
429 bool
430
431config CGROUP_MEM_RES_CTLR_SWAP
432 bool "Memory Resource Controller Swap Extension(EXPERIMENTAL)"
433 depends on CGROUP_MEM_RES_CTLR && SWAP && EXPERIMENTAL
434 help
435 Add swap management feature to memory resource controller. When you
436 enable this, you can limit mem+swap usage per cgroup. In other words,
437 when you disable this, memory resource controller has no cares to
438 usage of swap...a process can exhaust all of the swap. This extension
439 is useful when you want to avoid exhaustion swap but this itself
440 adds more overheads and consumes memory for remembering information.
441 Especially if you use 32bit system or small memory system, please
442 be careful about enabling this. When memory resource controller
443 is disabled by boot option, this will be automatically disabled and
444 there will be no overhead from this. Even when you set this config=y,
445 if boot option "noswapaccount" is set, swap will not be accounted.
446
447
448endmenu
449
422config SYSFS_DEPRECATED 450config SYSFS_DEPRECATED
423 bool 451 bool
424 452
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index eddb6247a553..23fdb8492b8e 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -505,7 +505,8 @@ static void __do_notify(struct mqueue_inode_info *info)
505 sig_i.si_errno = 0; 505 sig_i.si_errno = 0;
506 sig_i.si_code = SI_MESGQ; 506 sig_i.si_code = SI_MESGQ;
507 sig_i.si_value = info->notify.sigev_value; 507 sig_i.si_value = info->notify.sigev_value;
508 sig_i.si_pid = task_tgid_vnr(current); 508 sig_i.si_pid = task_tgid_nr_ns(current,
509 ns_of_pid(info->notify_owner));
509 sig_i.si_uid = current_uid(); 510 sig_i.si_uid = current_uid();
510 511
511 kill_pid_info(info->notify.sigev_signo, 512 kill_pid_info(info->notify.sigev_signo,
diff --git a/kernel/async.c b/kernel/async.c
index 97373380c9e7..64cc916299a5 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -206,7 +206,9 @@ EXPORT_SYMBOL_GPL(async_schedule_special);
206 206
207void async_synchronize_full(void) 207void async_synchronize_full(void)
208{ 208{
209 async_synchronize_cookie(next_cookie); 209 do {
210 async_synchronize_cookie(next_cookie);
211 } while (!list_empty(&async_running) || !list_empty(&async_pending));
210} 212}
211EXPORT_SYMBOL_GPL(async_synchronize_full); 213EXPORT_SYMBOL_GPL(async_synchronize_full);
212 214
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index f221446aa02d..c29831076e7a 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -84,7 +84,7 @@ struct cgroupfs_root {
84 /* Tracks how many cgroups are currently defined in hierarchy.*/ 84 /* Tracks how many cgroups are currently defined in hierarchy.*/
85 int number_of_cgroups; 85 int number_of_cgroups;
86 86
87 /* A list running through the mounted hierarchies */ 87 /* A list running through the active hierarchies */
88 struct list_head root_list; 88 struct list_head root_list;
89 89
90 /* Hierarchy-specific flags */ 90 /* Hierarchy-specific flags */
@@ -148,8 +148,8 @@ static int notify_on_release(const struct cgroup *cgrp)
148#define for_each_subsys(_root, _ss) \ 148#define for_each_subsys(_root, _ss) \
149list_for_each_entry(_ss, &_root->subsys_list, sibling) 149list_for_each_entry(_ss, &_root->subsys_list, sibling)
150 150
151/* for_each_root() allows you to iterate across the active hierarchies */ 151/* for_each_active_root() allows you to iterate across the active hierarchies */
152#define for_each_root(_root) \ 152#define for_each_active_root(_root) \
153list_for_each_entry(_root, &roots, root_list) 153list_for_each_entry(_root, &roots, root_list)
154 154
155/* the list of cgroups eligible for automatic release. Protected by 155/* the list of cgroups eligible for automatic release. Protected by
@@ -271,7 +271,7 @@ static void __put_css_set(struct css_set *cg, int taskexit)
271 271
272 rcu_read_lock(); 272 rcu_read_lock();
273 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { 273 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
274 struct cgroup *cgrp = cg->subsys[i]->cgroup; 274 struct cgroup *cgrp = rcu_dereference(cg->subsys[i]->cgroup);
275 if (atomic_dec_and_test(&cgrp->count) && 275 if (atomic_dec_and_test(&cgrp->count) &&
276 notify_on_release(cgrp)) { 276 notify_on_release(cgrp)) {
277 if (taskexit) 277 if (taskexit)
@@ -384,6 +384,25 @@ static int allocate_cg_links(int count, struct list_head *tmp)
384 return 0; 384 return 0;
385} 385}
386 386
387/**
388 * link_css_set - a helper function to link a css_set to a cgroup
389 * @tmp_cg_links: cg_cgroup_link objects allocated by allocate_cg_links()
390 * @cg: the css_set to be linked
391 * @cgrp: the destination cgroup
392 */
393static void link_css_set(struct list_head *tmp_cg_links,
394 struct css_set *cg, struct cgroup *cgrp)
395{
396 struct cg_cgroup_link *link;
397
398 BUG_ON(list_empty(tmp_cg_links));
399 link = list_first_entry(tmp_cg_links, struct cg_cgroup_link,
400 cgrp_link_list);
401 link->cg = cg;
402 list_move(&link->cgrp_link_list, &cgrp->css_sets);
403 list_add(&link->cg_link_list, &cg->cg_links);
404}
405
387/* 406/*
388 * find_css_set() takes an existing cgroup group and a 407 * find_css_set() takes an existing cgroup group and a
389 * cgroup object, and returns a css_set object that's 408 * cgroup object, and returns a css_set object that's
@@ -399,7 +418,6 @@ static struct css_set *find_css_set(
399 int i; 418 int i;
400 419
401 struct list_head tmp_cg_links; 420 struct list_head tmp_cg_links;
402 struct cg_cgroup_link *link;
403 421
404 struct hlist_head *hhead; 422 struct hlist_head *hhead;
405 423
@@ -444,26 +462,11 @@ static struct css_set *find_css_set(
444 * only do it for the first subsystem in each 462 * only do it for the first subsystem in each
445 * hierarchy 463 * hierarchy
446 */ 464 */
447 if (ss->root->subsys_list.next == &ss->sibling) { 465 if (ss->root->subsys_list.next == &ss->sibling)
448 BUG_ON(list_empty(&tmp_cg_links)); 466 link_css_set(&tmp_cg_links, res, cgrp);
449 link = list_entry(tmp_cg_links.next,
450 struct cg_cgroup_link,
451 cgrp_link_list);
452 list_del(&link->cgrp_link_list);
453 list_add(&link->cgrp_link_list, &cgrp->css_sets);
454 link->cg = res;
455 list_add(&link->cg_link_list, &res->cg_links);
456 }
457 }
458 if (list_empty(&rootnode.subsys_list)) {
459 link = list_entry(tmp_cg_links.next,
460 struct cg_cgroup_link,
461 cgrp_link_list);
462 list_del(&link->cgrp_link_list);
463 list_add(&link->cgrp_link_list, &dummytop->css_sets);
464 link->cg = res;
465 list_add(&link->cg_link_list, &res->cg_links);
466 } 467 }
468 if (list_empty(&rootnode.subsys_list))
469 link_css_set(&tmp_cg_links, res, dummytop);
467 470
468 BUG_ON(!list_empty(&tmp_cg_links)); 471 BUG_ON(!list_empty(&tmp_cg_links));
469 472
@@ -586,11 +589,18 @@ static void cgroup_call_pre_destroy(struct cgroup *cgrp)
586{ 589{
587 struct cgroup_subsys *ss; 590 struct cgroup_subsys *ss;
588 for_each_subsys(cgrp->root, ss) 591 for_each_subsys(cgrp->root, ss)
589 if (ss->pre_destroy && cgrp->subsys[ss->subsys_id]) 592 if (ss->pre_destroy)
590 ss->pre_destroy(ss, cgrp); 593 ss->pre_destroy(ss, cgrp);
591 return; 594 return;
592} 595}
593 596
597static void free_cgroup_rcu(struct rcu_head *obj)
598{
599 struct cgroup *cgrp = container_of(obj, struct cgroup, rcu_head);
600
601 kfree(cgrp);
602}
603
594static void cgroup_diput(struct dentry *dentry, struct inode *inode) 604static void cgroup_diput(struct dentry *dentry, struct inode *inode)
595{ 605{
596 /* is dentry a directory ? if so, kfree() associated cgroup */ 606 /* is dentry a directory ? if so, kfree() associated cgroup */
@@ -610,19 +620,19 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
610 /* 620 /*
611 * Release the subsystem state objects. 621 * Release the subsystem state objects.
612 */ 622 */
613 for_each_subsys(cgrp->root, ss) { 623 for_each_subsys(cgrp->root, ss)
614 if (cgrp->subsys[ss->subsys_id]) 624 ss->destroy(ss, cgrp);
615 ss->destroy(ss, cgrp);
616 }
617 625
618 cgrp->root->number_of_cgroups--; 626 cgrp->root->number_of_cgroups--;
619 mutex_unlock(&cgroup_mutex); 627 mutex_unlock(&cgroup_mutex);
620 628
621 /* Drop the active superblock reference that we took when we 629 /*
622 * created the cgroup */ 630 * Drop the active superblock reference that we took when we
631 * created the cgroup
632 */
623 deactivate_super(cgrp->root->sb); 633 deactivate_super(cgrp->root->sb);
624 634
625 kfree(cgrp); 635 call_rcu(&cgrp->rcu_head, free_cgroup_rcu);
626 } 636 }
627 iput(inode); 637 iput(inode);
628} 638}
@@ -712,23 +722,26 @@ static int rebind_subsystems(struct cgroupfs_root *root,
712 BUG_ON(cgrp->subsys[i]); 722 BUG_ON(cgrp->subsys[i]);
713 BUG_ON(!dummytop->subsys[i]); 723 BUG_ON(!dummytop->subsys[i]);
714 BUG_ON(dummytop->subsys[i]->cgroup != dummytop); 724 BUG_ON(dummytop->subsys[i]->cgroup != dummytop);
725 mutex_lock(&ss->hierarchy_mutex);
715 cgrp->subsys[i] = dummytop->subsys[i]; 726 cgrp->subsys[i] = dummytop->subsys[i];
716 cgrp->subsys[i]->cgroup = cgrp; 727 cgrp->subsys[i]->cgroup = cgrp;
717 list_add(&ss->sibling, &root->subsys_list); 728 list_move(&ss->sibling, &root->subsys_list);
718 rcu_assign_pointer(ss->root, root); 729 ss->root = root;
719 if (ss->bind) 730 if (ss->bind)
720 ss->bind(ss, cgrp); 731 ss->bind(ss, cgrp);
721 732 mutex_unlock(&ss->hierarchy_mutex);
722 } else if (bit & removed_bits) { 733 } else if (bit & removed_bits) {
723 /* We're removing this subsystem */ 734 /* We're removing this subsystem */
724 BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]); 735 BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]);
725 BUG_ON(cgrp->subsys[i]->cgroup != cgrp); 736 BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
737 mutex_lock(&ss->hierarchy_mutex);
726 if (ss->bind) 738 if (ss->bind)
727 ss->bind(ss, dummytop); 739 ss->bind(ss, dummytop);
728 dummytop->subsys[i]->cgroup = dummytop; 740 dummytop->subsys[i]->cgroup = dummytop;
729 cgrp->subsys[i] = NULL; 741 cgrp->subsys[i] = NULL;
730 rcu_assign_pointer(subsys[i]->root, &rootnode); 742 subsys[i]->root = &rootnode;
731 list_del(&ss->sibling); 743 list_move(&ss->sibling, &rootnode.subsys_list);
744 mutex_unlock(&ss->hierarchy_mutex);
732 } else if (bit & final_bits) { 745 } else if (bit & final_bits) {
733 /* Subsystem state should already exist */ 746 /* Subsystem state should already exist */
734 BUG_ON(!cgrp->subsys[i]); 747 BUG_ON(!cgrp->subsys[i]);
@@ -990,7 +1003,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
990 root = NULL; 1003 root = NULL;
991 } else { 1004 } else {
992 /* New superblock */ 1005 /* New superblock */
993 struct cgroup *cgrp = &root->top_cgroup; 1006 struct cgroup *root_cgrp = &root->top_cgroup;
994 struct inode *inode; 1007 struct inode *inode;
995 int i; 1008 int i;
996 1009
@@ -1031,7 +1044,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
1031 list_add(&root->root_list, &roots); 1044 list_add(&root->root_list, &roots);
1032 root_count++; 1045 root_count++;
1033 1046
1034 sb->s_root->d_fsdata = &root->top_cgroup; 1047 sb->s_root->d_fsdata = root_cgrp;
1035 root->top_cgroup.dentry = sb->s_root; 1048 root->top_cgroup.dentry = sb->s_root;
1036 1049
1037 /* Link the top cgroup in this hierarchy into all 1050 /* Link the top cgroup in this hierarchy into all
@@ -1042,29 +1055,18 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
1042 struct hlist_node *node; 1055 struct hlist_node *node;
1043 struct css_set *cg; 1056 struct css_set *cg;
1044 1057
1045 hlist_for_each_entry(cg, node, hhead, hlist) { 1058 hlist_for_each_entry(cg, node, hhead, hlist)
1046 struct cg_cgroup_link *link; 1059 link_css_set(&tmp_cg_links, cg, root_cgrp);
1047
1048 BUG_ON(list_empty(&tmp_cg_links));
1049 link = list_entry(tmp_cg_links.next,
1050 struct cg_cgroup_link,
1051 cgrp_link_list);
1052 list_del(&link->cgrp_link_list);
1053 link->cg = cg;
1054 list_add(&link->cgrp_link_list,
1055 &root->top_cgroup.css_sets);
1056 list_add(&link->cg_link_list, &cg->cg_links);
1057 }
1058 } 1060 }
1059 write_unlock(&css_set_lock); 1061 write_unlock(&css_set_lock);
1060 1062
1061 free_cg_links(&tmp_cg_links); 1063 free_cg_links(&tmp_cg_links);
1062 1064
1063 BUG_ON(!list_empty(&cgrp->sibling)); 1065 BUG_ON(!list_empty(&root_cgrp->sibling));
1064 BUG_ON(!list_empty(&cgrp->children)); 1066 BUG_ON(!list_empty(&root_cgrp->children));
1065 BUG_ON(root->number_of_cgroups != 1); 1067 BUG_ON(root->number_of_cgroups != 1);
1066 1068
1067 cgroup_populate_dir(cgrp); 1069 cgroup_populate_dir(root_cgrp);
1068 mutex_unlock(&inode->i_mutex); 1070 mutex_unlock(&inode->i_mutex);
1069 mutex_unlock(&cgroup_mutex); 1071 mutex_unlock(&cgroup_mutex);
1070 } 1072 }
@@ -1113,10 +1115,9 @@ static void cgroup_kill_sb(struct super_block *sb) {
1113 } 1115 }
1114 write_unlock(&css_set_lock); 1116 write_unlock(&css_set_lock);
1115 1117
1116 if (!list_empty(&root->root_list)) { 1118 list_del(&root->root_list);
1117 list_del(&root->root_list); 1119 root_count--;
1118 root_count--; 1120
1119 }
1120 mutex_unlock(&cgroup_mutex); 1121 mutex_unlock(&cgroup_mutex);
1121 1122
1122 kfree(root); 1123 kfree(root);
@@ -1145,14 +1146,16 @@ static inline struct cftype *__d_cft(struct dentry *dentry)
1145 * @buf: the buffer to write the path into 1146 * @buf: the buffer to write the path into
1146 * @buflen: the length of the buffer 1147 * @buflen: the length of the buffer
1147 * 1148 *
1148 * Called with cgroup_mutex held. Writes path of cgroup into buf. 1149 * Called with cgroup_mutex held or else with an RCU-protected cgroup
1149 * Returns 0 on success, -errno on error. 1150 * reference. Writes path of cgroup into buf. Returns 0 on success,
1151 * -errno on error.
1150 */ 1152 */
1151int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) 1153int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1152{ 1154{
1153 char *start; 1155 char *start;
1156 struct dentry *dentry = rcu_dereference(cgrp->dentry);
1154 1157
1155 if (cgrp == dummytop) { 1158 if (!dentry || cgrp == dummytop) {
1156 /* 1159 /*
1157 * Inactive subsystems have no dentry for their root 1160 * Inactive subsystems have no dentry for their root
1158 * cgroup 1161 * cgroup
@@ -1165,13 +1168,14 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1165 1168
1166 *--start = '\0'; 1169 *--start = '\0';
1167 for (;;) { 1170 for (;;) {
1168 int len = cgrp->dentry->d_name.len; 1171 int len = dentry->d_name.len;
1169 if ((start -= len) < buf) 1172 if ((start -= len) < buf)
1170 return -ENAMETOOLONG; 1173 return -ENAMETOOLONG;
1171 memcpy(start, cgrp->dentry->d_name.name, len); 1174 memcpy(start, cgrp->dentry->d_name.name, len);
1172 cgrp = cgrp->parent; 1175 cgrp = cgrp->parent;
1173 if (!cgrp) 1176 if (!cgrp)
1174 break; 1177 break;
1178 dentry = rcu_dereference(cgrp->dentry);
1175 if (!cgrp->parent) 1179 if (!cgrp->parent)
1176 continue; 1180 continue;
1177 if (--start < buf) 1181 if (--start < buf)
@@ -1216,7 +1220,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1216 int retval = 0; 1220 int retval = 0;
1217 struct cgroup_subsys *ss; 1221 struct cgroup_subsys *ss;
1218 struct cgroup *oldcgrp; 1222 struct cgroup *oldcgrp;
1219 struct css_set *cg = tsk->cgroups; 1223 struct css_set *cg;
1220 struct css_set *newcg; 1224 struct css_set *newcg;
1221 struct cgroupfs_root *root = cgrp->root; 1225 struct cgroupfs_root *root = cgrp->root;
1222 int subsys_id; 1226 int subsys_id;
@@ -1236,11 +1240,16 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1236 } 1240 }
1237 } 1241 }
1238 1242
1243 task_lock(tsk);
1244 cg = tsk->cgroups;
1245 get_css_set(cg);
1246 task_unlock(tsk);
1239 /* 1247 /*
1240 * Locate or allocate a new css_set for this task, 1248 * Locate or allocate a new css_set for this task,
1241 * based on its final set of cgroups 1249 * based on its final set of cgroups
1242 */ 1250 */
1243 newcg = find_css_set(cg, cgrp); 1251 newcg = find_css_set(cg, cgrp);
1252 put_css_set(cg);
1244 if (!newcg) 1253 if (!newcg)
1245 return -ENOMEM; 1254 return -ENOMEM;
1246 1255
@@ -1445,7 +1454,7 @@ static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
1445 struct cftype *cft = __d_cft(file->f_dentry); 1454 struct cftype *cft = __d_cft(file->f_dentry);
1446 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); 1455 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
1447 1456
1448 if (!cft || cgroup_is_removed(cgrp)) 1457 if (cgroup_is_removed(cgrp))
1449 return -ENODEV; 1458 return -ENODEV;
1450 if (cft->write) 1459 if (cft->write)
1451 return cft->write(cgrp, cft, file, buf, nbytes, ppos); 1460 return cft->write(cgrp, cft, file, buf, nbytes, ppos);
@@ -1490,7 +1499,7 @@ static ssize_t cgroup_file_read(struct file *file, char __user *buf,
1490 struct cftype *cft = __d_cft(file->f_dentry); 1499 struct cftype *cft = __d_cft(file->f_dentry);
1491 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); 1500 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
1492 1501
1493 if (!cft || cgroup_is_removed(cgrp)) 1502 if (cgroup_is_removed(cgrp))
1494 return -ENODEV; 1503 return -ENODEV;
1495 1504
1496 if (cft->read) 1505 if (cft->read)
@@ -1554,10 +1563,8 @@ static int cgroup_file_open(struct inode *inode, struct file *file)
1554 err = generic_file_open(inode, file); 1563 err = generic_file_open(inode, file);
1555 if (err) 1564 if (err)
1556 return err; 1565 return err;
1557
1558 cft = __d_cft(file->f_dentry); 1566 cft = __d_cft(file->f_dentry);
1559 if (!cft) 1567
1560 return -ENODEV;
1561 if (cft->read_map || cft->read_seq_string) { 1568 if (cft->read_map || cft->read_seq_string) {
1562 struct cgroup_seqfile_state *state = 1569 struct cgroup_seqfile_state *state =
1563 kzalloc(sizeof(*state), GFP_USER); 1570 kzalloc(sizeof(*state), GFP_USER);
@@ -1671,7 +1678,7 @@ static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
1671 if (!error) { 1678 if (!error) {
1672 dentry->d_fsdata = cgrp; 1679 dentry->d_fsdata = cgrp;
1673 inc_nlink(parent->d_inode); 1680 inc_nlink(parent->d_inode);
1674 cgrp->dentry = dentry; 1681 rcu_assign_pointer(cgrp->dentry, dentry);
1675 dget(dentry); 1682 dget(dentry);
1676 } 1683 }
1677 dput(dentry); 1684 dput(dentry);
@@ -1812,6 +1819,7 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
1812{ 1819{
1813 struct task_struct *res; 1820 struct task_struct *res;
1814 struct list_head *l = it->task; 1821 struct list_head *l = it->task;
1822 struct cg_cgroup_link *link;
1815 1823
1816 /* If the iterator cg is NULL, we have no tasks */ 1824 /* If the iterator cg is NULL, we have no tasks */
1817 if (!it->cg_link) 1825 if (!it->cg_link)
@@ -1819,7 +1827,8 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
1819 res = list_entry(l, struct task_struct, cg_list); 1827 res = list_entry(l, struct task_struct, cg_list);
1820 /* Advance iterator to find next entry */ 1828 /* Advance iterator to find next entry */
1821 l = l->next; 1829 l = l->next;
1822 if (l == &res->cgroups->tasks) { 1830 link = list_entry(it->cg_link, struct cg_cgroup_link, cgrp_link_list);
1831 if (l == &link->cg->tasks) {
1823 /* We reached the end of this task list - move on to 1832 /* We reached the end of this task list - move on to
1824 * the next cg_cgroup_link */ 1833 * the next cg_cgroup_link */
1825 cgroup_advance_iter(cgrp, it); 1834 cgroup_advance_iter(cgrp, it);
@@ -2013,14 +2022,16 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
2013 */ 2022 */
2014static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cgrp) 2023static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cgrp)
2015{ 2024{
2016 int n = 0; 2025 int n = 0, pid;
2017 struct cgroup_iter it; 2026 struct cgroup_iter it;
2018 struct task_struct *tsk; 2027 struct task_struct *tsk;
2019 cgroup_iter_start(cgrp, &it); 2028 cgroup_iter_start(cgrp, &it);
2020 while ((tsk = cgroup_iter_next(cgrp, &it))) { 2029 while ((tsk = cgroup_iter_next(cgrp, &it))) {
2021 if (unlikely(n == npids)) 2030 if (unlikely(n == npids))
2022 break; 2031 break;
2023 pidarray[n++] = task_pid_vnr(tsk); 2032 pid = task_pid_vnr(tsk);
2033 if (pid > 0)
2034 pidarray[n++] = pid;
2024 } 2035 }
2025 cgroup_iter_end(cgrp, &it); 2036 cgroup_iter_end(cgrp, &it);
2026 return n; 2037 return n;
@@ -2052,7 +2063,6 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
2052 2063
2053 ret = 0; 2064 ret = 0;
2054 cgrp = dentry->d_fsdata; 2065 cgrp = dentry->d_fsdata;
2055 rcu_read_lock();
2056 2066
2057 cgroup_iter_start(cgrp, &it); 2067 cgroup_iter_start(cgrp, &it);
2058 while ((tsk = cgroup_iter_next(cgrp, &it))) { 2068 while ((tsk = cgroup_iter_next(cgrp, &it))) {
@@ -2077,7 +2087,6 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
2077 } 2087 }
2078 cgroup_iter_end(cgrp, &it); 2088 cgroup_iter_end(cgrp, &it);
2079 2089
2080 rcu_read_unlock();
2081err: 2090err:
2082 return ret; 2091 return ret;
2083} 2092}
@@ -2324,7 +2333,7 @@ static void init_cgroup_css(struct cgroup_subsys_state *css,
2324 struct cgroup *cgrp) 2333 struct cgroup *cgrp)
2325{ 2334{
2326 css->cgroup = cgrp; 2335 css->cgroup = cgrp;
2327 atomic_set(&css->refcnt, 0); 2336 atomic_set(&css->refcnt, 1);
2328 css->flags = 0; 2337 css->flags = 0;
2329 if (cgrp == dummytop) 2338 if (cgrp == dummytop)
2330 set_bit(CSS_ROOT, &css->flags); 2339 set_bit(CSS_ROOT, &css->flags);
@@ -2332,6 +2341,29 @@ static void init_cgroup_css(struct cgroup_subsys_state *css,
2332 cgrp->subsys[ss->subsys_id] = css; 2341 cgrp->subsys[ss->subsys_id] = css;
2333} 2342}
2334 2343
2344static void cgroup_lock_hierarchy(struct cgroupfs_root *root)
2345{
2346 /* We need to take each hierarchy_mutex in a consistent order */
2347 int i;
2348
2349 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2350 struct cgroup_subsys *ss = subsys[i];
2351 if (ss->root == root)
2352 mutex_lock_nested(&ss->hierarchy_mutex, i);
2353 }
2354}
2355
2356static void cgroup_unlock_hierarchy(struct cgroupfs_root *root)
2357{
2358 int i;
2359
2360 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2361 struct cgroup_subsys *ss = subsys[i];
2362 if (ss->root == root)
2363 mutex_unlock(&ss->hierarchy_mutex);
2364 }
2365}
2366
2335/* 2367/*
2336 * cgroup_create - create a cgroup 2368 * cgroup_create - create a cgroup
2337 * @parent: cgroup that will be parent of the new cgroup 2369 * @parent: cgroup that will be parent of the new cgroup
@@ -2380,7 +2412,9 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
2380 init_cgroup_css(css, ss, cgrp); 2412 init_cgroup_css(css, ss, cgrp);
2381 } 2413 }
2382 2414
2415 cgroup_lock_hierarchy(root);
2383 list_add(&cgrp->sibling, &cgrp->parent->children); 2416 list_add(&cgrp->sibling, &cgrp->parent->children);
2417 cgroup_unlock_hierarchy(root);
2384 root->number_of_cgroups++; 2418 root->number_of_cgroups++;
2385 2419
2386 err = cgroup_create_dir(cgrp, dentry, mode); 2420 err = cgroup_create_dir(cgrp, dentry, mode);
@@ -2431,7 +2465,7 @@ static int cgroup_has_css_refs(struct cgroup *cgrp)
2431{ 2465{
2432 /* Check the reference count on each subsystem. Since we 2466 /* Check the reference count on each subsystem. Since we
2433 * already established that there are no tasks in the 2467 * already established that there are no tasks in the
2434 * cgroup, if the css refcount is also 0, then there should 2468 * cgroup, if the css refcount is also 1, then there should
2435 * be no outstanding references, so the subsystem is safe to 2469 * be no outstanding references, so the subsystem is safe to
2436 * destroy. We scan across all subsystems rather than using 2470 * destroy. We scan across all subsystems rather than using
2437 * the per-hierarchy linked list of mounted subsystems since 2471 * the per-hierarchy linked list of mounted subsystems since
@@ -2452,19 +2486,67 @@ static int cgroup_has_css_refs(struct cgroup *cgrp)
2452 * matter, since it can only happen if the cgroup 2486 * matter, since it can only happen if the cgroup
2453 * has been deleted and hence no longer needs the 2487 * has been deleted and hence no longer needs the
2454 * release agent to be called anyway. */ 2488 * release agent to be called anyway. */
2455 if (css && atomic_read(&css->refcnt)) 2489 if (css && (atomic_read(&css->refcnt) > 1))
2456 return 1; 2490 return 1;
2457 } 2491 }
2458 return 0; 2492 return 0;
2459} 2493}
2460 2494
2495/*
2496 * Atomically mark all (or else none) of the cgroup's CSS objects as
2497 * CSS_REMOVED. Return true on success, or false if the cgroup has
2498 * busy subsystems. Call with cgroup_mutex held
2499 */
2500
2501static int cgroup_clear_css_refs(struct cgroup *cgrp)
2502{
2503 struct cgroup_subsys *ss;
2504 unsigned long flags;
2505 bool failed = false;
2506 local_irq_save(flags);
2507 for_each_subsys(cgrp->root, ss) {
2508 struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
2509 int refcnt;
2510 do {
2511 /* We can only remove a CSS with a refcnt==1 */
2512 refcnt = atomic_read(&css->refcnt);
2513 if (refcnt > 1) {
2514 failed = true;
2515 goto done;
2516 }
2517 BUG_ON(!refcnt);
2518 /*
2519 * Drop the refcnt to 0 while we check other
2520 * subsystems. This will cause any racing
2521 * css_tryget() to spin until we set the
2522 * CSS_REMOVED bits or abort
2523 */
2524 } while (atomic_cmpxchg(&css->refcnt, refcnt, 0) != refcnt);
2525 }
2526 done:
2527 for_each_subsys(cgrp->root, ss) {
2528 struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
2529 if (failed) {
2530 /*
2531 * Restore old refcnt if we previously managed
2532 * to clear it from 1 to 0
2533 */
2534 if (!atomic_read(&css->refcnt))
2535 atomic_set(&css->refcnt, 1);
2536 } else {
2537 /* Commit the fact that the CSS is removed */
2538 set_bit(CSS_REMOVED, &css->flags);
2539 }
2540 }
2541 local_irq_restore(flags);
2542 return !failed;
2543}
2544
2461static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) 2545static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
2462{ 2546{
2463 struct cgroup *cgrp = dentry->d_fsdata; 2547 struct cgroup *cgrp = dentry->d_fsdata;
2464 struct dentry *d; 2548 struct dentry *d;
2465 struct cgroup *parent; 2549 struct cgroup *parent;
2466 struct super_block *sb;
2467 struct cgroupfs_root *root;
2468 2550
2469 /* the vfs holds both inode->i_mutex already */ 2551 /* the vfs holds both inode->i_mutex already */
2470 2552
@@ -2487,12 +2569,10 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
2487 2569
2488 mutex_lock(&cgroup_mutex); 2570 mutex_lock(&cgroup_mutex);
2489 parent = cgrp->parent; 2571 parent = cgrp->parent;
2490 root = cgrp->root;
2491 sb = root->sb;
2492 2572
2493 if (atomic_read(&cgrp->count) 2573 if (atomic_read(&cgrp->count)
2494 || !list_empty(&cgrp->children) 2574 || !list_empty(&cgrp->children)
2495 || cgroup_has_css_refs(cgrp)) { 2575 || !cgroup_clear_css_refs(cgrp)) {
2496 mutex_unlock(&cgroup_mutex); 2576 mutex_unlock(&cgroup_mutex);
2497 return -EBUSY; 2577 return -EBUSY;
2498 } 2578 }
@@ -2502,8 +2582,12 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
2502 if (!list_empty(&cgrp->release_list)) 2582 if (!list_empty(&cgrp->release_list))
2503 list_del(&cgrp->release_list); 2583 list_del(&cgrp->release_list);
2504 spin_unlock(&release_list_lock); 2584 spin_unlock(&release_list_lock);
2505 /* delete my sibling from parent->children */ 2585
2586 cgroup_lock_hierarchy(cgrp->root);
2587 /* delete this cgroup from parent->children */
2506 list_del(&cgrp->sibling); 2588 list_del(&cgrp->sibling);
2589 cgroup_unlock_hierarchy(cgrp->root);
2590
2507 spin_lock(&cgrp->dentry->d_lock); 2591 spin_lock(&cgrp->dentry->d_lock);
2508 d = dget(cgrp->dentry); 2592 d = dget(cgrp->dentry);
2509 spin_unlock(&d->d_lock); 2593 spin_unlock(&d->d_lock);
@@ -2525,6 +2609,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
2525 printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name); 2609 printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
2526 2610
2527 /* Create the top cgroup state for this subsystem */ 2611 /* Create the top cgroup state for this subsystem */
2612 list_add(&ss->sibling, &rootnode.subsys_list);
2528 ss->root = &rootnode; 2613 ss->root = &rootnode;
2529 css = ss->create(ss, dummytop); 2614 css = ss->create(ss, dummytop);
2530 /* We don't handle early failures gracefully */ 2615 /* We don't handle early failures gracefully */
@@ -2544,6 +2629,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
2544 * need to invoke fork callbacks here. */ 2629 * need to invoke fork callbacks here. */
2545 BUG_ON(!list_empty(&init_task.tasks)); 2630 BUG_ON(!list_empty(&init_task.tasks));
2546 2631
2632 mutex_init(&ss->hierarchy_mutex);
2547 ss->active = 1; 2633 ss->active = 1;
2548} 2634}
2549 2635
@@ -2562,7 +2648,6 @@ int __init cgroup_init_early(void)
2562 INIT_HLIST_NODE(&init_css_set.hlist); 2648 INIT_HLIST_NODE(&init_css_set.hlist);
2563 css_set_count = 1; 2649 css_set_count = 1;
2564 init_cgroup_root(&rootnode); 2650 init_cgroup_root(&rootnode);
2565 list_add(&rootnode.root_list, &roots);
2566 root_count = 1; 2651 root_count = 1;
2567 init_task.cgroups = &init_css_set; 2652 init_task.cgroups = &init_css_set;
2568 2653
@@ -2669,15 +2754,12 @@ static int proc_cgroup_show(struct seq_file *m, void *v)
2669 2754
2670 mutex_lock(&cgroup_mutex); 2755 mutex_lock(&cgroup_mutex);
2671 2756
2672 for_each_root(root) { 2757 for_each_active_root(root) {
2673 struct cgroup_subsys *ss; 2758 struct cgroup_subsys *ss;
2674 struct cgroup *cgrp; 2759 struct cgroup *cgrp;
2675 int subsys_id; 2760 int subsys_id;
2676 int count = 0; 2761 int count = 0;
2677 2762
2678 /* Skip this hierarchy if it has no active subsystems */
2679 if (!root->actual_subsys_bits)
2680 continue;
2681 seq_printf(m, "%lu:", root->subsys_bits); 2763 seq_printf(m, "%lu:", root->subsys_bits);
2682 for_each_subsys(root, ss) 2764 for_each_subsys(root, ss)
2683 seq_printf(m, "%s%s", count++ ? "," : "", ss->name); 2765 seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
@@ -2800,8 +2882,10 @@ void cgroup_post_fork(struct task_struct *child)
2800{ 2882{
2801 if (use_task_css_set_links) { 2883 if (use_task_css_set_links) {
2802 write_lock(&css_set_lock); 2884 write_lock(&css_set_lock);
2885 task_lock(child);
2803 if (list_empty(&child->cg_list)) 2886 if (list_empty(&child->cg_list))
2804 list_add(&child->cg_list, &child->cgroups->tasks); 2887 list_add(&child->cg_list, &child->cgroups->tasks);
2888 task_unlock(child);
2805 write_unlock(&css_set_lock); 2889 write_unlock(&css_set_lock);
2806 } 2890 }
2807} 2891}
@@ -2907,6 +2991,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
2907 mutex_unlock(&cgroup_mutex); 2991 mutex_unlock(&cgroup_mutex);
2908 return 0; 2992 return 0;
2909 } 2993 }
2994 task_lock(tsk);
2910 cg = tsk->cgroups; 2995 cg = tsk->cgroups;
2911 parent = task_cgroup(tsk, subsys->subsys_id); 2996 parent = task_cgroup(tsk, subsys->subsys_id);
2912 2997
@@ -2919,6 +3004,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
2919 3004
2920 /* Keep the cgroup alive */ 3005 /* Keep the cgroup alive */
2921 get_css_set(cg); 3006 get_css_set(cg);
3007 task_unlock(tsk);
2922 mutex_unlock(&cgroup_mutex); 3008 mutex_unlock(&cgroup_mutex);
2923 3009
2924 /* Now do the VFS work to create a cgroup */ 3010 /* Now do the VFS work to create a cgroup */
@@ -2937,7 +3023,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
2937 } 3023 }
2938 3024
2939 /* Create the cgroup directory, which also creates the cgroup */ 3025 /* Create the cgroup directory, which also creates the cgroup */
2940 ret = vfs_mkdir(inode, dentry, S_IFDIR | 0755); 3026 ret = vfs_mkdir(inode, dentry, 0755);
2941 child = __d_cgrp(dentry); 3027 child = __d_cgrp(dentry);
2942 dput(dentry); 3028 dput(dentry);
2943 if (ret) { 3029 if (ret) {
@@ -2947,13 +3033,6 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
2947 goto out_release; 3033 goto out_release;
2948 } 3034 }
2949 3035
2950 if (!child) {
2951 printk(KERN_INFO
2952 "Couldn't find new cgroup %s\n", nodename);
2953 ret = -ENOMEM;
2954 goto out_release;
2955 }
2956
2957 /* The cgroup now exists. Retake cgroup_mutex and check 3036 /* The cgroup now exists. Retake cgroup_mutex and check
2958 * that we're still in the same state that we thought we 3037 * that we're still in the same state that we thought we
2959 * were. */ 3038 * were. */
@@ -3049,7 +3128,8 @@ void __css_put(struct cgroup_subsys_state *css)
3049{ 3128{
3050 struct cgroup *cgrp = css->cgroup; 3129 struct cgroup *cgrp = css->cgroup;
3051 rcu_read_lock(); 3130 rcu_read_lock();
3052 if (atomic_dec_and_test(&css->refcnt) && notify_on_release(cgrp)) { 3131 if ((atomic_dec_return(&css->refcnt) == 1) &&
3132 notify_on_release(cgrp)) {
3053 set_bit(CGRP_RELEASABLE, &cgrp->flags); 3133 set_bit(CGRP_RELEASABLE, &cgrp->flags);
3054 check_for_release(cgrp); 3134 check_for_release(cgrp);
3055 } 3135 }
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 345ace5117de..647c77a88fcb 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -84,7 +84,7 @@ struct cpuset {
84 struct cgroup_subsys_state css; 84 struct cgroup_subsys_state css;
85 85
86 unsigned long flags; /* "unsigned long" so bitops work */ 86 unsigned long flags; /* "unsigned long" so bitops work */
87 cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */ 87 cpumask_var_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
88 nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */ 88 nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */
89 89
90 struct cpuset *parent; /* my parent */ 90 struct cpuset *parent; /* my parent */
@@ -195,8 +195,6 @@ static int cpuset_mems_generation;
195 195
196static struct cpuset top_cpuset = { 196static struct cpuset top_cpuset = {
197 .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)), 197 .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
198 .cpus_allowed = CPU_MASK_ALL,
199 .mems_allowed = NODE_MASK_ALL,
200}; 198};
201 199
202/* 200/*
@@ -278,7 +276,7 @@ static struct file_system_type cpuset_fs_type = {
278}; 276};
279 277
280/* 278/*
281 * Return in *pmask the portion of a cpusets's cpus_allowed that 279 * Return in pmask the portion of a cpusets's cpus_allowed that
282 * are online. If none are online, walk up the cpuset hierarchy 280 * are online. If none are online, walk up the cpuset hierarchy
283 * until we find one that does have some online cpus. If we get 281 * until we find one that does have some online cpus. If we get
284 * all the way to the top and still haven't found any online cpus, 282 * all the way to the top and still haven't found any online cpus,
@@ -291,15 +289,16 @@ static struct file_system_type cpuset_fs_type = {
291 * Call with callback_mutex held. 289 * Call with callback_mutex held.
292 */ 290 */
293 291
294static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask) 292static void guarantee_online_cpus(const struct cpuset *cs,
293 struct cpumask *pmask)
295{ 294{
296 while (cs && !cpus_intersects(cs->cpus_allowed, cpu_online_map)) 295 while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
297 cs = cs->parent; 296 cs = cs->parent;
298 if (cs) 297 if (cs)
299 cpus_and(*pmask, cs->cpus_allowed, cpu_online_map); 298 cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
300 else 299 else
301 *pmask = cpu_online_map; 300 cpumask_copy(pmask, cpu_online_mask);
302 BUG_ON(!cpus_intersects(*pmask, cpu_online_map)); 301 BUG_ON(!cpumask_intersects(pmask, cpu_online_mask));
303} 302}
304 303
305/* 304/*
@@ -375,14 +374,9 @@ void cpuset_update_task_memory_state(void)
375 struct task_struct *tsk = current; 374 struct task_struct *tsk = current;
376 struct cpuset *cs; 375 struct cpuset *cs;
377 376
378 if (task_cs(tsk) == &top_cpuset) { 377 rcu_read_lock();
379 /* Don't need rcu for top_cpuset. It's never freed. */ 378 my_cpusets_mem_gen = task_cs(tsk)->mems_generation;
380 my_cpusets_mem_gen = top_cpuset.mems_generation; 379 rcu_read_unlock();
381 } else {
382 rcu_read_lock();
383 my_cpusets_mem_gen = task_cs(tsk)->mems_generation;
384 rcu_read_unlock();
385 }
386 380
387 if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) { 381 if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) {
388 mutex_lock(&callback_mutex); 382 mutex_lock(&callback_mutex);
@@ -414,12 +408,43 @@ void cpuset_update_task_memory_state(void)
414 408
415static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) 409static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
416{ 410{
417 return cpus_subset(p->cpus_allowed, q->cpus_allowed) && 411 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
418 nodes_subset(p->mems_allowed, q->mems_allowed) && 412 nodes_subset(p->mems_allowed, q->mems_allowed) &&
419 is_cpu_exclusive(p) <= is_cpu_exclusive(q) && 413 is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
420 is_mem_exclusive(p) <= is_mem_exclusive(q); 414 is_mem_exclusive(p) <= is_mem_exclusive(q);
421} 415}
422 416
417/**
418 * alloc_trial_cpuset - allocate a trial cpuset
419 * @cs: the cpuset that the trial cpuset duplicates
420 */
421static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
422{
423 struct cpuset *trial;
424
425 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
426 if (!trial)
427 return NULL;
428
429 if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
430 kfree(trial);
431 return NULL;
432 }
433 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
434
435 return trial;
436}
437
438/**
439 * free_trial_cpuset - free the trial cpuset
440 * @trial: the trial cpuset to be freed
441 */
442static void free_trial_cpuset(struct cpuset *trial)
443{
444 free_cpumask_var(trial->cpus_allowed);
445 kfree(trial);
446}
447
423/* 448/*
424 * validate_change() - Used to validate that any proposed cpuset change 449 * validate_change() - Used to validate that any proposed cpuset change
425 * follows the structural rules for cpusets. 450 * follows the structural rules for cpusets.
@@ -469,7 +494,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
469 c = cgroup_cs(cont); 494 c = cgroup_cs(cont);
470 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && 495 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
471 c != cur && 496 c != cur &&
472 cpus_intersects(trial->cpus_allowed, c->cpus_allowed)) 497 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
473 return -EINVAL; 498 return -EINVAL;
474 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && 499 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
475 c != cur && 500 c != cur &&
@@ -479,7 +504,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
479 504
480 /* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */ 505 /* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */
481 if (cgroup_task_count(cur->css.cgroup)) { 506 if (cgroup_task_count(cur->css.cgroup)) {
482 if (cpus_empty(trial->cpus_allowed) || 507 if (cpumask_empty(trial->cpus_allowed) ||
483 nodes_empty(trial->mems_allowed)) { 508 nodes_empty(trial->mems_allowed)) {
484 return -ENOSPC; 509 return -ENOSPC;
485 } 510 }
@@ -494,7 +519,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
494 */ 519 */
495static int cpusets_overlap(struct cpuset *a, struct cpuset *b) 520static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
496{ 521{
497 return cpus_intersects(a->cpus_allowed, b->cpus_allowed); 522 return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
498} 523}
499 524
500static void 525static void
@@ -519,7 +544,7 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
519 cp = list_first_entry(&q, struct cpuset, stack_list); 544 cp = list_first_entry(&q, struct cpuset, stack_list);
520 list_del(q.next); 545 list_del(q.next);
521 546
522 if (cpus_empty(cp->cpus_allowed)) 547 if (cpumask_empty(cp->cpus_allowed))
523 continue; 548 continue;
524 549
525 if (is_sched_load_balance(cp)) 550 if (is_sched_load_balance(cp))
@@ -586,7 +611,8 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
586 * element of the partition (one sched domain) to be passed to 611 * element of the partition (one sched domain) to be passed to
587 * partition_sched_domains(). 612 * partition_sched_domains().
588 */ 613 */
589static int generate_sched_domains(cpumask_t **domains, 614/* FIXME: see the FIXME in partition_sched_domains() */
615static int generate_sched_domains(struct cpumask **domains,
590 struct sched_domain_attr **attributes) 616 struct sched_domain_attr **attributes)
591{ 617{
592 LIST_HEAD(q); /* queue of cpusets to be scanned */ 618 LIST_HEAD(q); /* queue of cpusets to be scanned */
@@ -594,10 +620,10 @@ static int generate_sched_domains(cpumask_t **domains,
594 struct cpuset **csa; /* array of all cpuset ptrs */ 620 struct cpuset **csa; /* array of all cpuset ptrs */
595 int csn; /* how many cpuset ptrs in csa so far */ 621 int csn; /* how many cpuset ptrs in csa so far */
596 int i, j, k; /* indices for partition finding loops */ 622 int i, j, k; /* indices for partition finding loops */
597 cpumask_t *doms; /* resulting partition; i.e. sched domains */ 623 struct cpumask *doms; /* resulting partition; i.e. sched domains */
598 struct sched_domain_attr *dattr; /* attributes for custom domains */ 624 struct sched_domain_attr *dattr; /* attributes for custom domains */
599 int ndoms = 0; /* number of sched domains in result */ 625 int ndoms = 0; /* number of sched domains in result */
600 int nslot; /* next empty doms[] cpumask_t slot */ 626 int nslot; /* next empty doms[] struct cpumask slot */
601 627
602 doms = NULL; 628 doms = NULL;
603 dattr = NULL; 629 dattr = NULL;
@@ -605,7 +631,7 @@ static int generate_sched_domains(cpumask_t **domains,
605 631
606 /* Special case for the 99% of systems with one, full, sched domain */ 632 /* Special case for the 99% of systems with one, full, sched domain */
607 if (is_sched_load_balance(&top_cpuset)) { 633 if (is_sched_load_balance(&top_cpuset)) {
608 doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL); 634 doms = kmalloc(cpumask_size(), GFP_KERNEL);
609 if (!doms) 635 if (!doms)
610 goto done; 636 goto done;
611 637
@@ -614,7 +640,7 @@ static int generate_sched_domains(cpumask_t **domains,
614 *dattr = SD_ATTR_INIT; 640 *dattr = SD_ATTR_INIT;
615 update_domain_attr_tree(dattr, &top_cpuset); 641 update_domain_attr_tree(dattr, &top_cpuset);
616 } 642 }
617 *doms = top_cpuset.cpus_allowed; 643 cpumask_copy(doms, top_cpuset.cpus_allowed);
618 644
619 ndoms = 1; 645 ndoms = 1;
620 goto done; 646 goto done;
@@ -633,7 +659,7 @@ static int generate_sched_domains(cpumask_t **domains,
633 cp = list_first_entry(&q, struct cpuset, stack_list); 659 cp = list_first_entry(&q, struct cpuset, stack_list);
634 list_del(q.next); 660 list_del(q.next);
635 661
636 if (cpus_empty(cp->cpus_allowed)) 662 if (cpumask_empty(cp->cpus_allowed))
637 continue; 663 continue;
638 664
639 /* 665 /*
@@ -684,7 +710,7 @@ restart:
684 * Now we know how many domains to create. 710 * Now we know how many domains to create.
685 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. 711 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
686 */ 712 */
687 doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); 713 doms = kmalloc(ndoms * cpumask_size(), GFP_KERNEL);
688 if (!doms) 714 if (!doms)
689 goto done; 715 goto done;
690 716
@@ -696,7 +722,7 @@ restart:
696 722
697 for (nslot = 0, i = 0; i < csn; i++) { 723 for (nslot = 0, i = 0; i < csn; i++) {
698 struct cpuset *a = csa[i]; 724 struct cpuset *a = csa[i];
699 cpumask_t *dp; 725 struct cpumask *dp;
700 int apn = a->pn; 726 int apn = a->pn;
701 727
702 if (apn < 0) { 728 if (apn < 0) {
@@ -719,14 +745,14 @@ restart:
719 continue; 745 continue;
720 } 746 }
721 747
722 cpus_clear(*dp); 748 cpumask_clear(dp);
723 if (dattr) 749 if (dattr)
724 *(dattr + nslot) = SD_ATTR_INIT; 750 *(dattr + nslot) = SD_ATTR_INIT;
725 for (j = i; j < csn; j++) { 751 for (j = i; j < csn; j++) {
726 struct cpuset *b = csa[j]; 752 struct cpuset *b = csa[j];
727 753
728 if (apn == b->pn) { 754 if (apn == b->pn) {
729 cpus_or(*dp, *dp, b->cpus_allowed); 755 cpumask_or(dp, dp, b->cpus_allowed);
730 if (dattr) 756 if (dattr)
731 update_domain_attr_tree(dattr + nslot, b); 757 update_domain_attr_tree(dattr + nslot, b);
732 758
@@ -766,7 +792,7 @@ done:
766static void do_rebuild_sched_domains(struct work_struct *unused) 792static void do_rebuild_sched_domains(struct work_struct *unused)
767{ 793{
768 struct sched_domain_attr *attr; 794 struct sched_domain_attr *attr;
769 cpumask_t *doms; 795 struct cpumask *doms;
770 int ndoms; 796 int ndoms;
771 797
772 get_online_cpus(); 798 get_online_cpus();
@@ -835,7 +861,7 @@ void rebuild_sched_domains(void)
835static int cpuset_test_cpumask(struct task_struct *tsk, 861static int cpuset_test_cpumask(struct task_struct *tsk,
836 struct cgroup_scanner *scan) 862 struct cgroup_scanner *scan)
837{ 863{
838 return !cpus_equal(tsk->cpus_allowed, 864 return !cpumask_equal(&tsk->cpus_allowed,
839 (cgroup_cs(scan->cg))->cpus_allowed); 865 (cgroup_cs(scan->cg))->cpus_allowed);
840} 866}
841 867
@@ -853,7 +879,7 @@ static int cpuset_test_cpumask(struct task_struct *tsk,
853static void cpuset_change_cpumask(struct task_struct *tsk, 879static void cpuset_change_cpumask(struct task_struct *tsk,
854 struct cgroup_scanner *scan) 880 struct cgroup_scanner *scan)
855{ 881{
856 set_cpus_allowed_ptr(tsk, &((cgroup_cs(scan->cg))->cpus_allowed)); 882 set_cpus_allowed_ptr(tsk, ((cgroup_cs(scan->cg))->cpus_allowed));
857} 883}
858 884
859/** 885/**
@@ -885,10 +911,10 @@ static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
885 * @cs: the cpuset to consider 911 * @cs: the cpuset to consider
886 * @buf: buffer of cpu numbers written to this cpuset 912 * @buf: buffer of cpu numbers written to this cpuset
887 */ 913 */
888static int update_cpumask(struct cpuset *cs, const char *buf) 914static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
915 const char *buf)
889{ 916{
890 struct ptr_heap heap; 917 struct ptr_heap heap;
891 struct cpuset trialcs;
892 int retval; 918 int retval;
893 int is_load_balanced; 919 int is_load_balanced;
894 920
@@ -896,8 +922,6 @@ static int update_cpumask(struct cpuset *cs, const char *buf)
896 if (cs == &top_cpuset) 922 if (cs == &top_cpuset)
897 return -EACCES; 923 return -EACCES;
898 924
899 trialcs = *cs;
900
901 /* 925 /*
902 * An empty cpus_allowed is ok only if the cpuset has no tasks. 926 * An empty cpus_allowed is ok only if the cpuset has no tasks.
903 * Since cpulist_parse() fails on an empty mask, we special case 927 * Since cpulist_parse() fails on an empty mask, we special case
@@ -905,31 +929,31 @@ static int update_cpumask(struct cpuset *cs, const char *buf)
905 * with tasks have cpus. 929 * with tasks have cpus.
906 */ 930 */
907 if (!*buf) { 931 if (!*buf) {
908 cpus_clear(trialcs.cpus_allowed); 932 cpumask_clear(trialcs->cpus_allowed);
909 } else { 933 } else {
910 retval = cpulist_parse(buf, &trialcs.cpus_allowed); 934 retval = cpulist_parse(buf, trialcs->cpus_allowed);
911 if (retval < 0) 935 if (retval < 0)
912 return retval; 936 return retval;
913 937
914 if (!cpus_subset(trialcs.cpus_allowed, cpu_online_map)) 938 if (!cpumask_subset(trialcs->cpus_allowed, cpu_online_mask))
915 return -EINVAL; 939 return -EINVAL;
916 } 940 }
917 retval = validate_change(cs, &trialcs); 941 retval = validate_change(cs, trialcs);
918 if (retval < 0) 942 if (retval < 0)
919 return retval; 943 return retval;
920 944
921 /* Nothing to do if the cpus didn't change */ 945 /* Nothing to do if the cpus didn't change */
922 if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed)) 946 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
923 return 0; 947 return 0;
924 948
925 retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); 949 retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
926 if (retval) 950 if (retval)
927 return retval; 951 return retval;
928 952
929 is_load_balanced = is_sched_load_balance(&trialcs); 953 is_load_balanced = is_sched_load_balance(trialcs);
930 954
931 mutex_lock(&callback_mutex); 955 mutex_lock(&callback_mutex);
932 cs->cpus_allowed = trialcs.cpus_allowed; 956 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
933 mutex_unlock(&callback_mutex); 957 mutex_unlock(&callback_mutex);
934 958
935 /* 959 /*
@@ -1017,7 +1041,7 @@ static int update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem)
1017 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ 1041 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
1018 1042
1019 fudge = 10; /* spare mmarray[] slots */ 1043 fudge = 10; /* spare mmarray[] slots */
1020 fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */ 1044 fudge += cpumask_weight(cs->cpus_allowed);/* imagine 1 fork-bomb/cpu */
1021 retval = -ENOMEM; 1045 retval = -ENOMEM;
1022 1046
1023 /* 1047 /*
@@ -1104,9 +1128,9 @@ done:
1104 * lock each such tasks mm->mmap_sem, scan its vma's and rebind 1128 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1105 * their mempolicies to the cpusets new mems_allowed. 1129 * their mempolicies to the cpusets new mems_allowed.
1106 */ 1130 */
1107static int update_nodemask(struct cpuset *cs, const char *buf) 1131static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1132 const char *buf)
1108{ 1133{
1109 struct cpuset trialcs;
1110 nodemask_t oldmem; 1134 nodemask_t oldmem;
1111 int retval; 1135 int retval;
1112 1136
@@ -1117,8 +1141,6 @@ static int update_nodemask(struct cpuset *cs, const char *buf)
1117 if (cs == &top_cpuset) 1141 if (cs == &top_cpuset)
1118 return -EACCES; 1142 return -EACCES;
1119 1143
1120 trialcs = *cs;
1121
1122 /* 1144 /*
1123 * An empty mems_allowed is ok iff there are no tasks in the cpuset. 1145 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1124 * Since nodelist_parse() fails on an empty mask, we special case 1146 * Since nodelist_parse() fails on an empty mask, we special case
@@ -1126,27 +1148,27 @@ static int update_nodemask(struct cpuset *cs, const char *buf)
1126 * with tasks have memory. 1148 * with tasks have memory.
1127 */ 1149 */
1128 if (!*buf) { 1150 if (!*buf) {
1129 nodes_clear(trialcs.mems_allowed); 1151 nodes_clear(trialcs->mems_allowed);
1130 } else { 1152 } else {
1131 retval = nodelist_parse(buf, trialcs.mems_allowed); 1153 retval = nodelist_parse(buf, trialcs->mems_allowed);
1132 if (retval < 0) 1154 if (retval < 0)
1133 goto done; 1155 goto done;
1134 1156
1135 if (!nodes_subset(trialcs.mems_allowed, 1157 if (!nodes_subset(trialcs->mems_allowed,
1136 node_states[N_HIGH_MEMORY])) 1158 node_states[N_HIGH_MEMORY]))
1137 return -EINVAL; 1159 return -EINVAL;
1138 } 1160 }
1139 oldmem = cs->mems_allowed; 1161 oldmem = cs->mems_allowed;
1140 if (nodes_equal(oldmem, trialcs.mems_allowed)) { 1162 if (nodes_equal(oldmem, trialcs->mems_allowed)) {
1141 retval = 0; /* Too easy - nothing to do */ 1163 retval = 0; /* Too easy - nothing to do */
1142 goto done; 1164 goto done;
1143 } 1165 }
1144 retval = validate_change(cs, &trialcs); 1166 retval = validate_change(cs, trialcs);
1145 if (retval < 0) 1167 if (retval < 0)
1146 goto done; 1168 goto done;
1147 1169
1148 mutex_lock(&callback_mutex); 1170 mutex_lock(&callback_mutex);
1149 cs->mems_allowed = trialcs.mems_allowed; 1171 cs->mems_allowed = trialcs->mems_allowed;
1150 cs->mems_generation = cpuset_mems_generation++; 1172 cs->mems_generation = cpuset_mems_generation++;
1151 mutex_unlock(&callback_mutex); 1173 mutex_unlock(&callback_mutex);
1152 1174
@@ -1167,7 +1189,8 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
1167 1189
1168 if (val != cs->relax_domain_level) { 1190 if (val != cs->relax_domain_level) {
1169 cs->relax_domain_level = val; 1191 cs->relax_domain_level = val;
1170 if (!cpus_empty(cs->cpus_allowed) && is_sched_load_balance(cs)) 1192 if (!cpumask_empty(cs->cpus_allowed) &&
1193 is_sched_load_balance(cs))
1171 async_rebuild_sched_domains(); 1194 async_rebuild_sched_domains();
1172 } 1195 }
1173 1196
@@ -1186,31 +1209,36 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
1186static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, 1209static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1187 int turning_on) 1210 int turning_on)
1188{ 1211{
1189 struct cpuset trialcs; 1212 struct cpuset *trialcs;
1190 int err; 1213 int err;
1191 int balance_flag_changed; 1214 int balance_flag_changed;
1192 1215
1193 trialcs = *cs; 1216 trialcs = alloc_trial_cpuset(cs);
1217 if (!trialcs)
1218 return -ENOMEM;
1219
1194 if (turning_on) 1220 if (turning_on)
1195 set_bit(bit, &trialcs.flags); 1221 set_bit(bit, &trialcs->flags);
1196 else 1222 else
1197 clear_bit(bit, &trialcs.flags); 1223 clear_bit(bit, &trialcs->flags);
1198 1224
1199 err = validate_change(cs, &trialcs); 1225 err = validate_change(cs, trialcs);
1200 if (err < 0) 1226 if (err < 0)
1201 return err; 1227 goto out;
1202 1228
1203 balance_flag_changed = (is_sched_load_balance(cs) != 1229 balance_flag_changed = (is_sched_load_balance(cs) !=
1204 is_sched_load_balance(&trialcs)); 1230 is_sched_load_balance(trialcs));
1205 1231
1206 mutex_lock(&callback_mutex); 1232 mutex_lock(&callback_mutex);
1207 cs->flags = trialcs.flags; 1233 cs->flags = trialcs->flags;
1208 mutex_unlock(&callback_mutex); 1234 mutex_unlock(&callback_mutex);
1209 1235
1210 if (!cpus_empty(trialcs.cpus_allowed) && balance_flag_changed) 1236 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1211 async_rebuild_sched_domains(); 1237 async_rebuild_sched_domains();
1212 1238
1213 return 0; 1239out:
1240 free_trial_cpuset(trialcs);
1241 return err;
1214} 1242}
1215 1243
1216/* 1244/*
@@ -1311,42 +1339,47 @@ static int fmeter_getrate(struct fmeter *fmp)
1311 return val; 1339 return val;
1312} 1340}
1313 1341
1342/* Protected by cgroup_lock */
1343static cpumask_var_t cpus_attach;
1344
1314/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ 1345/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
1315static int cpuset_can_attach(struct cgroup_subsys *ss, 1346static int cpuset_can_attach(struct cgroup_subsys *ss,
1316 struct cgroup *cont, struct task_struct *tsk) 1347 struct cgroup *cont, struct task_struct *tsk)
1317{ 1348{
1318 struct cpuset *cs = cgroup_cs(cont); 1349 struct cpuset *cs = cgroup_cs(cont);
1350 int ret = 0;
1319 1351
1320 if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) 1352 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1321 return -ENOSPC; 1353 return -ENOSPC;
1322 if (tsk->flags & PF_THREAD_BOUND) {
1323 cpumask_t mask;
1324 1354
1355 if (tsk->flags & PF_THREAD_BOUND) {
1325 mutex_lock(&callback_mutex); 1356 mutex_lock(&callback_mutex);
1326 mask = cs->cpus_allowed; 1357 if (!cpumask_equal(&tsk->cpus_allowed, cs->cpus_allowed))
1358 ret = -EINVAL;
1327 mutex_unlock(&callback_mutex); 1359 mutex_unlock(&callback_mutex);
1328 if (!cpus_equal(tsk->cpus_allowed, mask))
1329 return -EINVAL;
1330 } 1360 }
1331 1361
1332 return security_task_setscheduler(tsk, 0, NULL); 1362 return ret < 0 ? ret : security_task_setscheduler(tsk, 0, NULL);
1333} 1363}
1334 1364
1335static void cpuset_attach(struct cgroup_subsys *ss, 1365static void cpuset_attach(struct cgroup_subsys *ss,
1336 struct cgroup *cont, struct cgroup *oldcont, 1366 struct cgroup *cont, struct cgroup *oldcont,
1337 struct task_struct *tsk) 1367 struct task_struct *tsk)
1338{ 1368{
1339 cpumask_t cpus;
1340 nodemask_t from, to; 1369 nodemask_t from, to;
1341 struct mm_struct *mm; 1370 struct mm_struct *mm;
1342 struct cpuset *cs = cgroup_cs(cont); 1371 struct cpuset *cs = cgroup_cs(cont);
1343 struct cpuset *oldcs = cgroup_cs(oldcont); 1372 struct cpuset *oldcs = cgroup_cs(oldcont);
1344 int err; 1373 int err;
1345 1374
1346 mutex_lock(&callback_mutex); 1375 if (cs == &top_cpuset) {
1347 guarantee_online_cpus(cs, &cpus); 1376 cpumask_copy(cpus_attach, cpu_possible_mask);
1348 err = set_cpus_allowed_ptr(tsk, &cpus); 1377 } else {
1349 mutex_unlock(&callback_mutex); 1378 mutex_lock(&callback_mutex);
1379 guarantee_online_cpus(cs, cpus_attach);
1380 mutex_unlock(&callback_mutex);
1381 }
1382 err = set_cpus_allowed_ptr(tsk, cpus_attach);
1350 if (err) 1383 if (err)
1351 return; 1384 return;
1352 1385
@@ -1359,7 +1392,6 @@ static void cpuset_attach(struct cgroup_subsys *ss,
1359 cpuset_migrate_mm(mm, &from, &to); 1392 cpuset_migrate_mm(mm, &from, &to);
1360 mmput(mm); 1393 mmput(mm);
1361 } 1394 }
1362
1363} 1395}
1364 1396
1365/* The various types of files and directories in a cpuset file system */ 1397/* The various types of files and directories in a cpuset file system */
@@ -1454,21 +1486,29 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1454 const char *buf) 1486 const char *buf)
1455{ 1487{
1456 int retval = 0; 1488 int retval = 0;
1489 struct cpuset *cs = cgroup_cs(cgrp);
1490 struct cpuset *trialcs;
1457 1491
1458 if (!cgroup_lock_live_group(cgrp)) 1492 if (!cgroup_lock_live_group(cgrp))
1459 return -ENODEV; 1493 return -ENODEV;
1460 1494
1495 trialcs = alloc_trial_cpuset(cs);
1496 if (!trialcs)
1497 return -ENOMEM;
1498
1461 switch (cft->private) { 1499 switch (cft->private) {
1462 case FILE_CPULIST: 1500 case FILE_CPULIST:
1463 retval = update_cpumask(cgroup_cs(cgrp), buf); 1501 retval = update_cpumask(cs, trialcs, buf);
1464 break; 1502 break;
1465 case FILE_MEMLIST: 1503 case FILE_MEMLIST:
1466 retval = update_nodemask(cgroup_cs(cgrp), buf); 1504 retval = update_nodemask(cs, trialcs, buf);
1467 break; 1505 break;
1468 default: 1506 default:
1469 retval = -EINVAL; 1507 retval = -EINVAL;
1470 break; 1508 break;
1471 } 1509 }
1510
1511 free_trial_cpuset(trialcs);
1472 cgroup_unlock(); 1512 cgroup_unlock();
1473 return retval; 1513 return retval;
1474} 1514}
@@ -1487,13 +1527,13 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1487 1527
1488static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs) 1528static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1489{ 1529{
1490 cpumask_t mask; 1530 int ret;
1491 1531
1492 mutex_lock(&callback_mutex); 1532 mutex_lock(&callback_mutex);
1493 mask = cs->cpus_allowed; 1533 ret = cpulist_scnprintf(page, PAGE_SIZE, cs->cpus_allowed);
1494 mutex_unlock(&callback_mutex); 1534 mutex_unlock(&callback_mutex);
1495 1535
1496 return cpulist_scnprintf(page, PAGE_SIZE, &mask); 1536 return ret;
1497} 1537}
1498 1538
1499static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) 1539static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
@@ -1729,7 +1769,7 @@ static void cpuset_post_clone(struct cgroup_subsys *ss,
1729 parent_cs = cgroup_cs(parent); 1769 parent_cs = cgroup_cs(parent);
1730 1770
1731 cs->mems_allowed = parent_cs->mems_allowed; 1771 cs->mems_allowed = parent_cs->mems_allowed;
1732 cs->cpus_allowed = parent_cs->cpus_allowed; 1772 cpumask_copy(cs->cpus_allowed, parent_cs->cpus_allowed);
1733 return; 1773 return;
1734} 1774}
1735 1775
@@ -1755,6 +1795,10 @@ static struct cgroup_subsys_state *cpuset_create(
1755 cs = kmalloc(sizeof(*cs), GFP_KERNEL); 1795 cs = kmalloc(sizeof(*cs), GFP_KERNEL);
1756 if (!cs) 1796 if (!cs)
1757 return ERR_PTR(-ENOMEM); 1797 return ERR_PTR(-ENOMEM);
1798 if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
1799 kfree(cs);
1800 return ERR_PTR(-ENOMEM);
1801 }
1758 1802
1759 cpuset_update_task_memory_state(); 1803 cpuset_update_task_memory_state();
1760 cs->flags = 0; 1804 cs->flags = 0;
@@ -1763,7 +1807,7 @@ static struct cgroup_subsys_state *cpuset_create(
1763 if (is_spread_slab(parent)) 1807 if (is_spread_slab(parent))
1764 set_bit(CS_SPREAD_SLAB, &cs->flags); 1808 set_bit(CS_SPREAD_SLAB, &cs->flags);
1765 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 1809 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1766 cpus_clear(cs->cpus_allowed); 1810 cpumask_clear(cs->cpus_allowed);
1767 nodes_clear(cs->mems_allowed); 1811 nodes_clear(cs->mems_allowed);
1768 cs->mems_generation = cpuset_mems_generation++; 1812 cs->mems_generation = cpuset_mems_generation++;
1769 fmeter_init(&cs->fmeter); 1813 fmeter_init(&cs->fmeter);
@@ -1790,6 +1834,7 @@ static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
1790 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); 1834 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
1791 1835
1792 number_of_cpusets--; 1836 number_of_cpusets--;
1837 free_cpumask_var(cs->cpus_allowed);
1793 kfree(cs); 1838 kfree(cs);
1794} 1839}
1795 1840
@@ -1813,6 +1858,8 @@ struct cgroup_subsys cpuset_subsys = {
1813 1858
1814int __init cpuset_init_early(void) 1859int __init cpuset_init_early(void)
1815{ 1860{
1861 alloc_bootmem_cpumask_var(&top_cpuset.cpus_allowed);
1862
1816 top_cpuset.mems_generation = cpuset_mems_generation++; 1863 top_cpuset.mems_generation = cpuset_mems_generation++;
1817 return 0; 1864 return 0;
1818} 1865}
@@ -1828,7 +1875,7 @@ int __init cpuset_init(void)
1828{ 1875{
1829 int err = 0; 1876 int err = 0;
1830 1877
1831 cpus_setall(top_cpuset.cpus_allowed); 1878 cpumask_setall(top_cpuset.cpus_allowed);
1832 nodes_setall(top_cpuset.mems_allowed); 1879 nodes_setall(top_cpuset.mems_allowed);
1833 1880
1834 fmeter_init(&top_cpuset.fmeter); 1881 fmeter_init(&top_cpuset.fmeter);
@@ -1840,6 +1887,9 @@ int __init cpuset_init(void)
1840 if (err < 0) 1887 if (err < 0)
1841 return err; 1888 return err;
1842 1889
1890 if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
1891 BUG();
1892
1843 number_of_cpusets = 1; 1893 number_of_cpusets = 1;
1844 return 0; 1894 return 0;
1845} 1895}
@@ -1914,7 +1964,7 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
1914 * has online cpus, so can't be empty). 1964 * has online cpus, so can't be empty).
1915 */ 1965 */
1916 parent = cs->parent; 1966 parent = cs->parent;
1917 while (cpus_empty(parent->cpus_allowed) || 1967 while (cpumask_empty(parent->cpus_allowed) ||
1918 nodes_empty(parent->mems_allowed)) 1968 nodes_empty(parent->mems_allowed))
1919 parent = parent->parent; 1969 parent = parent->parent;
1920 1970
@@ -1955,7 +2005,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
1955 } 2005 }
1956 2006
1957 /* Continue past cpusets with all cpus, mems online */ 2007 /* Continue past cpusets with all cpus, mems online */
1958 if (cpus_subset(cp->cpus_allowed, cpu_online_map) && 2008 if (cpumask_subset(cp->cpus_allowed, cpu_online_mask) &&
1959 nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) 2009 nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
1960 continue; 2010 continue;
1961 2011
@@ -1963,13 +2013,14 @@ static void scan_for_empty_cpusets(struct cpuset *root)
1963 2013
1964 /* Remove offline cpus and mems from this cpuset. */ 2014 /* Remove offline cpus and mems from this cpuset. */
1965 mutex_lock(&callback_mutex); 2015 mutex_lock(&callback_mutex);
1966 cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map); 2016 cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
2017 cpu_online_mask);
1967 nodes_and(cp->mems_allowed, cp->mems_allowed, 2018 nodes_and(cp->mems_allowed, cp->mems_allowed,
1968 node_states[N_HIGH_MEMORY]); 2019 node_states[N_HIGH_MEMORY]);
1969 mutex_unlock(&callback_mutex); 2020 mutex_unlock(&callback_mutex);
1970 2021
1971 /* Move tasks from the empty cpuset to a parent */ 2022 /* Move tasks from the empty cpuset to a parent */
1972 if (cpus_empty(cp->cpus_allowed) || 2023 if (cpumask_empty(cp->cpus_allowed) ||
1973 nodes_empty(cp->mems_allowed)) 2024 nodes_empty(cp->mems_allowed))
1974 remove_tasks_in_empty_cpuset(cp); 2025 remove_tasks_in_empty_cpuset(cp);
1975 else { 2026 else {
@@ -1995,7 +2046,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
1995 unsigned long phase, void *unused_cpu) 2046 unsigned long phase, void *unused_cpu)
1996{ 2047{
1997 struct sched_domain_attr *attr; 2048 struct sched_domain_attr *attr;
1998 cpumask_t *doms; 2049 struct cpumask *doms;
1999 int ndoms; 2050 int ndoms;
2000 2051
2001 switch (phase) { 2052 switch (phase) {
@@ -2010,7 +2061,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
2010 } 2061 }
2011 2062
2012 cgroup_lock(); 2063 cgroup_lock();
2013 top_cpuset.cpus_allowed = cpu_online_map; 2064 cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
2014 scan_for_empty_cpusets(&top_cpuset); 2065 scan_for_empty_cpusets(&top_cpuset);
2015 ndoms = generate_sched_domains(&doms, &attr); 2066 ndoms = generate_sched_domains(&doms, &attr);
2016 cgroup_unlock(); 2067 cgroup_unlock();
@@ -2055,7 +2106,7 @@ static int cpuset_track_online_nodes(struct notifier_block *self,
2055 2106
2056void __init cpuset_init_smp(void) 2107void __init cpuset_init_smp(void)
2057{ 2108{
2058 top_cpuset.cpus_allowed = cpu_online_map; 2109 cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
2059 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; 2110 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2060 2111
2061 hotcpu_notifier(cpuset_track_online_cpus, 0); 2112 hotcpu_notifier(cpuset_track_online_cpus, 0);
@@ -2065,15 +2116,15 @@ void __init cpuset_init_smp(void)
2065/** 2116/**
2066 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. 2117 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2067 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. 2118 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2068 * @pmask: pointer to cpumask_t variable to receive cpus_allowed set. 2119 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
2069 * 2120 *
2070 * Description: Returns the cpumask_t cpus_allowed of the cpuset 2121 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
2071 * attached to the specified @tsk. Guaranteed to return some non-empty 2122 * attached to the specified @tsk. Guaranteed to return some non-empty
2072 * subset of cpu_online_map, even if this means going outside the 2123 * subset of cpu_online_map, even if this means going outside the
2073 * tasks cpuset. 2124 * tasks cpuset.
2074 **/ 2125 **/
2075 2126
2076void cpuset_cpus_allowed(struct task_struct *tsk, cpumask_t *pmask) 2127void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2077{ 2128{
2078 mutex_lock(&callback_mutex); 2129 mutex_lock(&callback_mutex);
2079 cpuset_cpus_allowed_locked(tsk, pmask); 2130 cpuset_cpus_allowed_locked(tsk, pmask);
@@ -2084,7 +2135,7 @@ void cpuset_cpus_allowed(struct task_struct *tsk, cpumask_t *pmask)
2084 * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset. 2135 * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
2085 * Must be called with callback_mutex held. 2136 * Must be called with callback_mutex held.
2086 **/ 2137 **/
2087void cpuset_cpus_allowed_locked(struct task_struct *tsk, cpumask_t *pmask) 2138void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
2088{ 2139{
2089 task_lock(tsk); 2140 task_lock(tsk);
2090 guarantee_online_cpus(task_cs(tsk), pmask); 2141 guarantee_online_cpus(task_cs(tsk), pmask);
diff --git a/kernel/cred.c b/kernel/cred.c
index 480a61aec805..3a039189d707 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -507,6 +507,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
507 else 507 else
508 old = get_cred(&init_cred); 508 old = get_cred(&init_cred);
509 509
510 *new = *old;
510 get_uid(new->user); 511 get_uid(new->user);
511 get_group_info(new->group_info); 512 get_group_info(new->group_info);
512 513
@@ -530,6 +531,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
530 531
531error: 532error:
532 put_cred(new); 533 put_cred(new);
534 put_cred(old);
533 return NULL; 535 return NULL;
534} 536}
535EXPORT_SYMBOL(prepare_kernel_cred); 537EXPORT_SYMBOL(prepare_kernel_cred);
diff --git a/kernel/fork.c b/kernel/fork.c
index 7b8f2a78be3d..4018308048cf 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1126,12 +1126,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1126 1126
1127 if (pid != &init_struct_pid) { 1127 if (pid != &init_struct_pid) {
1128 retval = -ENOMEM; 1128 retval = -ENOMEM;
1129 pid = alloc_pid(task_active_pid_ns(p)); 1129 pid = alloc_pid(p->nsproxy->pid_ns);
1130 if (!pid) 1130 if (!pid)
1131 goto bad_fork_cleanup_io; 1131 goto bad_fork_cleanup_io;
1132 1132
1133 if (clone_flags & CLONE_NEWPID) { 1133 if (clone_flags & CLONE_NEWPID) {
1134 retval = pid_ns_prepare_proc(task_active_pid_ns(p)); 1134 retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
1135 if (retval < 0) 1135 if (retval < 0)
1136 goto bad_fork_free_pid; 1136 goto bad_fork_free_pid;
1137 } 1137 }
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c
index 43c2111cd54d..78bc3fdac0d2 100644
--- a/kernel/ns_cgroup.c
+++ b/kernel/ns_cgroup.c
@@ -13,7 +13,6 @@
13 13
14struct ns_cgroup { 14struct ns_cgroup {
15 struct cgroup_subsys_state css; 15 struct cgroup_subsys_state css;
16 spinlock_t lock;
17}; 16};
18 17
19struct cgroup_subsys ns_subsys; 18struct cgroup_subsys ns_subsys;
@@ -84,7 +83,6 @@ static struct cgroup_subsys_state *ns_create(struct cgroup_subsys *ss,
84 ns_cgroup = kzalloc(sizeof(*ns_cgroup), GFP_KERNEL); 83 ns_cgroup = kzalloc(sizeof(*ns_cgroup), GFP_KERNEL);
85 if (!ns_cgroup) 84 if (!ns_cgroup)
86 return ERR_PTR(-ENOMEM); 85 return ERR_PTR(-ENOMEM);
87 spin_lock_init(&ns_cgroup->lock);
88 return &ns_cgroup->css; 86 return &ns_cgroup->css;
89} 87}
90 88
diff --git a/kernel/pid.c b/kernel/pid.c
index af9224cdd6c0..1b3586fe753a 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -474,6 +474,12 @@ pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
474} 474}
475EXPORT_SYMBOL(task_session_nr_ns); 475EXPORT_SYMBOL(task_session_nr_ns);
476 476
477struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
478{
479 return ns_of_pid(task_pid(tsk));
480}
481EXPORT_SYMBOL_GPL(task_active_pid_ns);
482
477/* 483/*
478 * Used by proc to find the first pid that is greater than or equal to nr. 484 * Used by proc to find the first pid that is greater than or equal to nr.
479 * 485 *
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index f77d3819ef57..45e8541ab7e3 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -258,12 +258,12 @@ int hibernation_snapshot(int platform_mode)
258{ 258{
259 int error; 259 int error;
260 260
261 /* Free memory before shutting down devices. */ 261 error = platform_begin(platform_mode);
262 error = swsusp_shrink_memory();
263 if (error) 262 if (error)
264 return error; 263 return error;
265 264
266 error = platform_begin(platform_mode); 265 /* Free memory before shutting down devices. */
266 error = swsusp_shrink_memory();
267 if (error) 267 if (error)
268 goto Close; 268 goto Close;
269 269
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 5d2ab836e998..f5fc2d7680f2 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -25,6 +25,7 @@
25#include <linux/syscalls.h> 25#include <linux/syscalls.h>
26#include <linux/console.h> 26#include <linux/console.h>
27#include <linux/highmem.h> 27#include <linux/highmem.h>
28#include <linux/list.h>
28 29
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30#include <asm/mmu_context.h> 31#include <asm/mmu_context.h>
@@ -192,12 +193,6 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
192 return ret; 193 return ret;
193} 194}
194 195
195static void chain_free(struct chain_allocator *ca, int clear_page_nosave)
196{
197 free_list_of_pages(ca->chain, clear_page_nosave);
198 memset(ca, 0, sizeof(struct chain_allocator));
199}
200
201/** 196/**
202 * Data types related to memory bitmaps. 197 * Data types related to memory bitmaps.
203 * 198 *
@@ -233,7 +228,7 @@ static void chain_free(struct chain_allocator *ca, int clear_page_nosave)
233#define BM_BITS_PER_BLOCK (PAGE_SIZE << 3) 228#define BM_BITS_PER_BLOCK (PAGE_SIZE << 3)
234 229
235struct bm_block { 230struct bm_block {
236 struct bm_block *next; /* next element of the list */ 231 struct list_head hook; /* hook into a list of bitmap blocks */
237 unsigned long start_pfn; /* pfn represented by the first bit */ 232 unsigned long start_pfn; /* pfn represented by the first bit */
238 unsigned long end_pfn; /* pfn represented by the last bit plus 1 */ 233 unsigned long end_pfn; /* pfn represented by the last bit plus 1 */
239 unsigned long *data; /* bitmap representing pages */ 234 unsigned long *data; /* bitmap representing pages */
@@ -244,24 +239,15 @@ static inline unsigned long bm_block_bits(struct bm_block *bb)
244 return bb->end_pfn - bb->start_pfn; 239 return bb->end_pfn - bb->start_pfn;
245} 240}
246 241
247struct zone_bitmap {
248 struct zone_bitmap *next; /* next element of the list */
249 unsigned long start_pfn; /* minimal pfn in this zone */
250 unsigned long end_pfn; /* maximal pfn in this zone plus 1 */
251 struct bm_block *bm_blocks; /* list of bitmap blocks */
252 struct bm_block *cur_block; /* recently used bitmap block */
253};
254
255/* strcut bm_position is used for browsing memory bitmaps */ 242/* strcut bm_position is used for browsing memory bitmaps */
256 243
257struct bm_position { 244struct bm_position {
258 struct zone_bitmap *zone_bm;
259 struct bm_block *block; 245 struct bm_block *block;
260 int bit; 246 int bit;
261}; 247};
262 248
263struct memory_bitmap { 249struct memory_bitmap {
264 struct zone_bitmap *zone_bm_list; /* list of zone bitmaps */ 250 struct list_head blocks; /* list of bitmap blocks */
265 struct linked_page *p_list; /* list of pages used to store zone 251 struct linked_page *p_list; /* list of pages used to store zone
266 * bitmap objects and bitmap block 252 * bitmap objects and bitmap block
267 * objects 253 * objects
@@ -273,11 +259,7 @@ struct memory_bitmap {
273 259
274static void memory_bm_position_reset(struct memory_bitmap *bm) 260static void memory_bm_position_reset(struct memory_bitmap *bm)
275{ 261{
276 struct zone_bitmap *zone_bm; 262 bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook);
277
278 zone_bm = bm->zone_bm_list;
279 bm->cur.zone_bm = zone_bm;
280 bm->cur.block = zone_bm->bm_blocks;
281 bm->cur.bit = 0; 263 bm->cur.bit = 0;
282} 264}
283 265
@@ -285,151 +267,184 @@ static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
285 267
286/** 268/**
287 * create_bm_block_list - create a list of block bitmap objects 269 * create_bm_block_list - create a list of block bitmap objects
270 * @nr_blocks - number of blocks to allocate
271 * @list - list to put the allocated blocks into
272 * @ca - chain allocator to be used for allocating memory
288 */ 273 */
289 274static int create_bm_block_list(unsigned long pages,
290static inline struct bm_block * 275 struct list_head *list,
291create_bm_block_list(unsigned int nr_blocks, struct chain_allocator *ca) 276 struct chain_allocator *ca)
292{ 277{
293 struct bm_block *bblist = NULL; 278 unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
294 279
295 while (nr_blocks-- > 0) { 280 while (nr_blocks-- > 0) {
296 struct bm_block *bb; 281 struct bm_block *bb;
297 282
298 bb = chain_alloc(ca, sizeof(struct bm_block)); 283 bb = chain_alloc(ca, sizeof(struct bm_block));
299 if (!bb) 284 if (!bb)
300 return NULL; 285 return -ENOMEM;
301 286 list_add(&bb->hook, list);
302 bb->next = bblist;
303 bblist = bb;
304 } 287 }
305 return bblist; 288
289 return 0;
306} 290}
307 291
292struct mem_extent {
293 struct list_head hook;
294 unsigned long start;
295 unsigned long end;
296};
297
308/** 298/**
309 * create_zone_bm_list - create a list of zone bitmap objects 299 * free_mem_extents - free a list of memory extents
300 * @list - list of extents to empty
310 */ 301 */
302static void free_mem_extents(struct list_head *list)
303{
304 struct mem_extent *ext, *aux;
311 305
312static inline struct zone_bitmap * 306 list_for_each_entry_safe(ext, aux, list, hook) {
313create_zone_bm_list(unsigned int nr_zones, struct chain_allocator *ca) 307 list_del(&ext->hook);
308 kfree(ext);
309 }
310}
311
312/**
313 * create_mem_extents - create a list of memory extents representing
314 * contiguous ranges of PFNs
315 * @list - list to put the extents into
316 * @gfp_mask - mask to use for memory allocations
317 */
318static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
314{ 319{
315 struct zone_bitmap *zbmlist = NULL; 320 struct zone *zone;
316 321
317 while (nr_zones-- > 0) { 322 INIT_LIST_HEAD(list);
318 struct zone_bitmap *zbm;
319 323
320 zbm = chain_alloc(ca, sizeof(struct zone_bitmap)); 324 for_each_zone(zone) {
321 if (!zbm) 325 unsigned long zone_start, zone_end;
322 return NULL; 326 struct mem_extent *ext, *cur, *aux;
327
328 if (!populated_zone(zone))
329 continue;
323 330
324 zbm->next = zbmlist; 331 zone_start = zone->zone_start_pfn;
325 zbmlist = zbm; 332 zone_end = zone->zone_start_pfn + zone->spanned_pages;
333
334 list_for_each_entry(ext, list, hook)
335 if (zone_start <= ext->end)
336 break;
337
338 if (&ext->hook == list || zone_end < ext->start) {
339 /* New extent is necessary */
340 struct mem_extent *new_ext;
341
342 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
343 if (!new_ext) {
344 free_mem_extents(list);
345 return -ENOMEM;
346 }
347 new_ext->start = zone_start;
348 new_ext->end = zone_end;
349 list_add_tail(&new_ext->hook, &ext->hook);
350 continue;
351 }
352
353 /* Merge this zone's range of PFNs with the existing one */
354 if (zone_start < ext->start)
355 ext->start = zone_start;
356 if (zone_end > ext->end)
357 ext->end = zone_end;
358
359 /* More merging may be possible */
360 cur = ext;
361 list_for_each_entry_safe_continue(cur, aux, list, hook) {
362 if (zone_end < cur->start)
363 break;
364 if (zone_end < cur->end)
365 ext->end = cur->end;
366 list_del(&cur->hook);
367 kfree(cur);
368 }
326 } 369 }
327 return zbmlist; 370
371 return 0;
328} 372}
329 373
330/** 374/**
331 * memory_bm_create - allocate memory for a memory bitmap 375 * memory_bm_create - allocate memory for a memory bitmap
332 */ 376 */
333
334static int 377static int
335memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) 378memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
336{ 379{
337 struct chain_allocator ca; 380 struct chain_allocator ca;
338 struct zone *zone; 381 struct list_head mem_extents;
339 struct zone_bitmap *zone_bm; 382 struct mem_extent *ext;
340 struct bm_block *bb; 383 int error;
341 unsigned int nr;
342 384
343 chain_init(&ca, gfp_mask, safe_needed); 385 chain_init(&ca, gfp_mask, safe_needed);
386 INIT_LIST_HEAD(&bm->blocks);
344 387
345 /* Compute the number of zones */ 388 error = create_mem_extents(&mem_extents, gfp_mask);
346 nr = 0; 389 if (error)
347 for_each_zone(zone) 390 return error;
348 if (populated_zone(zone))
349 nr++;
350
351 /* Allocate the list of zones bitmap objects */
352 zone_bm = create_zone_bm_list(nr, &ca);
353 bm->zone_bm_list = zone_bm;
354 if (!zone_bm) {
355 chain_free(&ca, PG_UNSAFE_CLEAR);
356 return -ENOMEM;
357 }
358
359 /* Initialize the zone bitmap objects */
360 for_each_zone(zone) {
361 unsigned long pfn;
362 391
363 if (!populated_zone(zone)) 392 list_for_each_entry(ext, &mem_extents, hook) {
364 continue; 393 struct bm_block *bb;
394 unsigned long pfn = ext->start;
395 unsigned long pages = ext->end - ext->start;
365 396
366 zone_bm->start_pfn = zone->zone_start_pfn; 397 bb = list_entry(bm->blocks.prev, struct bm_block, hook);
367 zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages;
368 /* Allocate the list of bitmap block objects */
369 nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
370 bb = create_bm_block_list(nr, &ca);
371 zone_bm->bm_blocks = bb;
372 zone_bm->cur_block = bb;
373 if (!bb)
374 goto Free;
375 398
376 nr = zone->spanned_pages; 399 error = create_bm_block_list(pages, bm->blocks.prev, &ca);
377 pfn = zone->zone_start_pfn; 400 if (error)
378 /* Initialize the bitmap block objects */ 401 goto Error;
379 while (bb) {
380 unsigned long *ptr;
381 402
382 ptr = get_image_page(gfp_mask, safe_needed); 403 list_for_each_entry_continue(bb, &bm->blocks, hook) {
383 bb->data = ptr; 404 bb->data = get_image_page(gfp_mask, safe_needed);
384 if (!ptr) 405 if (!bb->data) {
385 goto Free; 406 error = -ENOMEM;
407 goto Error;
408 }
386 409
387 bb->start_pfn = pfn; 410 bb->start_pfn = pfn;
388 if (nr >= BM_BITS_PER_BLOCK) { 411 if (pages >= BM_BITS_PER_BLOCK) {
389 pfn += BM_BITS_PER_BLOCK; 412 pfn += BM_BITS_PER_BLOCK;
390 nr -= BM_BITS_PER_BLOCK; 413 pages -= BM_BITS_PER_BLOCK;
391 } else { 414 } else {
392 /* This is executed only once in the loop */ 415 /* This is executed only once in the loop */
393 pfn += nr; 416 pfn += pages;
394 } 417 }
395 bb->end_pfn = pfn; 418 bb->end_pfn = pfn;
396 bb = bb->next;
397 } 419 }
398 zone_bm = zone_bm->next;
399 } 420 }
421
400 bm->p_list = ca.chain; 422 bm->p_list = ca.chain;
401 memory_bm_position_reset(bm); 423 memory_bm_position_reset(bm);
402 return 0; 424 Exit:
425 free_mem_extents(&mem_extents);
426 return error;
403 427
404 Free: 428 Error:
405 bm->p_list = ca.chain; 429 bm->p_list = ca.chain;
406 memory_bm_free(bm, PG_UNSAFE_CLEAR); 430 memory_bm_free(bm, PG_UNSAFE_CLEAR);
407 return -ENOMEM; 431 goto Exit;
408} 432}
409 433
410/** 434/**
411 * memory_bm_free - free memory occupied by the memory bitmap @bm 435 * memory_bm_free - free memory occupied by the memory bitmap @bm
412 */ 436 */
413
414static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) 437static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
415{ 438{
416 struct zone_bitmap *zone_bm; 439 struct bm_block *bb;
417 440
418 /* Free the list of bit blocks for each zone_bitmap object */ 441 list_for_each_entry(bb, &bm->blocks, hook)
419 zone_bm = bm->zone_bm_list; 442 if (bb->data)
420 while (zone_bm) { 443 free_image_page(bb->data, clear_nosave_free);
421 struct bm_block *bb;
422 444
423 bb = zone_bm->bm_blocks;
424 while (bb) {
425 if (bb->data)
426 free_image_page(bb->data, clear_nosave_free);
427 bb = bb->next;
428 }
429 zone_bm = zone_bm->next;
430 }
431 free_list_of_pages(bm->p_list, clear_nosave_free); 445 free_list_of_pages(bm->p_list, clear_nosave_free);
432 bm->zone_bm_list = NULL; 446
447 INIT_LIST_HEAD(&bm->blocks);
433} 448}
434 449
435/** 450/**
@@ -437,38 +452,33 @@ static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
437 * to given pfn. The cur_zone_bm member of @bm and the cur_block member 452 * to given pfn. The cur_zone_bm member of @bm and the cur_block member
438 * of @bm->cur_zone_bm are updated. 453 * of @bm->cur_zone_bm are updated.
439 */ 454 */
440
441static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, 455static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
442 void **addr, unsigned int *bit_nr) 456 void **addr, unsigned int *bit_nr)
443{ 457{
444 struct zone_bitmap *zone_bm;
445 struct bm_block *bb; 458 struct bm_block *bb;
446 459
447 /* Check if the pfn is from the current zone */ 460 /*
448 zone_bm = bm->cur.zone_bm; 461 * Check if the pfn corresponds to the current bitmap block and find
449 if (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) { 462 * the block where it fits if this is not the case.
450 zone_bm = bm->zone_bm_list; 463 */
451 /* We don't assume that the zones are sorted by pfns */ 464 bb = bm->cur.block;
452 while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
453 zone_bm = zone_bm->next;
454
455 if (!zone_bm)
456 return -EFAULT;
457 }
458 bm->cur.zone_bm = zone_bm;
459 }
460 /* Check if the pfn corresponds to the current bitmap block */
461 bb = zone_bm->cur_block;
462 if (pfn < bb->start_pfn) 465 if (pfn < bb->start_pfn)
463 bb = zone_bm->bm_blocks; 466 list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
467 if (pfn >= bb->start_pfn)
468 break;
464 469
465 while (pfn >= bb->end_pfn) { 470 if (pfn >= bb->end_pfn)
466 bb = bb->next; 471 list_for_each_entry_continue(bb, &bm->blocks, hook)
472 if (pfn >= bb->start_pfn && pfn < bb->end_pfn)
473 break;
467 474
468 BUG_ON(!bb); 475 if (&bb->hook == &bm->blocks)
469 } 476 return -EFAULT;
470 zone_bm->cur_block = bb; 477
478 /* The block has been found */
479 bm->cur.block = bb;
471 pfn -= bb->start_pfn; 480 pfn -= bb->start_pfn;
481 bm->cur.bit = pfn + 1;
472 *bit_nr = pfn; 482 *bit_nr = pfn;
473 *addr = bb->data; 483 *addr = bb->data;
474 return 0; 484 return 0;
@@ -519,6 +529,14 @@ static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
519 return test_bit(bit, addr); 529 return test_bit(bit, addr);
520} 530}
521 531
532static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
533{
534 void *addr;
535 unsigned int bit;
536
537 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
538}
539
522/** 540/**
523 * memory_bm_next_pfn - find the pfn that corresponds to the next set bit 541 * memory_bm_next_pfn - find the pfn that corresponds to the next set bit
524 * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is 542 * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is
@@ -530,29 +548,21 @@ static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
530 548
531static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) 549static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
532{ 550{
533 struct zone_bitmap *zone_bm;
534 struct bm_block *bb; 551 struct bm_block *bb;
535 int bit; 552 int bit;
536 553
554 bb = bm->cur.block;
537 do { 555 do {
538 bb = bm->cur.block; 556 bit = bm->cur.bit;
539 do { 557 bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
540 bit = bm->cur.bit; 558 if (bit < bm_block_bits(bb))
541 bit = find_next_bit(bb->data, bm_block_bits(bb), bit); 559 goto Return_pfn;
542 if (bit < bm_block_bits(bb)) 560
543 goto Return_pfn; 561 bb = list_entry(bb->hook.next, struct bm_block, hook);
544 562 bm->cur.block = bb;
545 bb = bb->next; 563 bm->cur.bit = 0;
546 bm->cur.block = bb; 564 } while (&bb->hook != &bm->blocks);
547 bm->cur.bit = 0; 565
548 } while (bb);
549 zone_bm = bm->cur.zone_bm->next;
550 if (zone_bm) {
551 bm->cur.zone_bm = zone_bm;
552 bm->cur.block = zone_bm->bm_blocks;
553 bm->cur.bit = 0;
554 }
555 } while (zone_bm);
556 memory_bm_position_reset(bm); 566 memory_bm_position_reset(bm);
557 return BM_END_OF_MAP; 567 return BM_END_OF_MAP;
558 568
@@ -808,8 +818,7 @@ static unsigned int count_free_highmem_pages(void)
808 * We should save the page if it isn't Nosave or NosaveFree, or Reserved, 818 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
809 * and it isn't a part of a free chunk of pages. 819 * and it isn't a part of a free chunk of pages.
810 */ 820 */
811 821static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
812static struct page *saveable_highmem_page(unsigned long pfn)
813{ 822{
814 struct page *page; 823 struct page *page;
815 824
@@ -817,6 +826,8 @@ static struct page *saveable_highmem_page(unsigned long pfn)
817 return NULL; 826 return NULL;
818 827
819 page = pfn_to_page(pfn); 828 page = pfn_to_page(pfn);
829 if (page_zone(page) != zone)
830 return NULL;
820 831
821 BUG_ON(!PageHighMem(page)); 832 BUG_ON(!PageHighMem(page));
822 833
@@ -846,13 +857,16 @@ unsigned int count_highmem_pages(void)
846 mark_free_pages(zone); 857 mark_free_pages(zone);
847 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 858 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
848 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 859 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
849 if (saveable_highmem_page(pfn)) 860 if (saveable_highmem_page(zone, pfn))
850 n++; 861 n++;
851 } 862 }
852 return n; 863 return n;
853} 864}
854#else 865#else
855static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; } 866static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
867{
868 return NULL;
869}
856#endif /* CONFIG_HIGHMEM */ 870#endif /* CONFIG_HIGHMEM */
857 871
858/** 872/**
@@ -863,8 +877,7 @@ static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }
863 * of pages statically defined as 'unsaveable', and it isn't a part of 877 * of pages statically defined as 'unsaveable', and it isn't a part of
864 * a free chunk of pages. 878 * a free chunk of pages.
865 */ 879 */
866 880static struct page *saveable_page(struct zone *zone, unsigned long pfn)
867static struct page *saveable_page(unsigned long pfn)
868{ 881{
869 struct page *page; 882 struct page *page;
870 883
@@ -872,6 +885,8 @@ static struct page *saveable_page(unsigned long pfn)
872 return NULL; 885 return NULL;
873 886
874 page = pfn_to_page(pfn); 887 page = pfn_to_page(pfn);
888 if (page_zone(page) != zone)
889 return NULL;
875 890
876 BUG_ON(PageHighMem(page)); 891 BUG_ON(PageHighMem(page));
877 892
@@ -903,7 +918,7 @@ unsigned int count_data_pages(void)
903 mark_free_pages(zone); 918 mark_free_pages(zone);
904 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 919 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
905 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 920 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
906 if(saveable_page(pfn)) 921 if (saveable_page(zone, pfn))
907 n++; 922 n++;
908 } 923 }
909 return n; 924 return n;
@@ -944,7 +959,7 @@ static inline struct page *
944page_is_saveable(struct zone *zone, unsigned long pfn) 959page_is_saveable(struct zone *zone, unsigned long pfn)
945{ 960{
946 return is_highmem(zone) ? 961 return is_highmem(zone) ?
947 saveable_highmem_page(pfn) : saveable_page(pfn); 962 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
948} 963}
949 964
950static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) 965static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
@@ -966,7 +981,7 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
966 * data modified by kmap_atomic() 981 * data modified by kmap_atomic()
967 */ 982 */
968 safe_copy_page(buffer, s_page); 983 safe_copy_page(buffer, s_page);
969 dst = kmap_atomic(pfn_to_page(dst_pfn), KM_USER0); 984 dst = kmap_atomic(d_page, KM_USER0);
970 memcpy(dst, buffer, PAGE_SIZE); 985 memcpy(dst, buffer, PAGE_SIZE);
971 kunmap_atomic(dst, KM_USER0); 986 kunmap_atomic(dst, KM_USER0);
972 } else { 987 } else {
@@ -975,7 +990,7 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
975 } 990 }
976} 991}
977#else 992#else
978#define page_is_saveable(zone, pfn) saveable_page(pfn) 993#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
979 994
980static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) 995static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
981{ 996{
@@ -1459,9 +1474,7 @@ load_header(struct swsusp_info *info)
1459 * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set 1474 * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
1460 * the corresponding bit in the memory bitmap @bm 1475 * the corresponding bit in the memory bitmap @bm
1461 */ 1476 */
1462 1477static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
1463static inline void
1464unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
1465{ 1478{
1466 int j; 1479 int j;
1467 1480
@@ -1469,8 +1482,13 @@ unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
1469 if (unlikely(buf[j] == BM_END_OF_MAP)) 1482 if (unlikely(buf[j] == BM_END_OF_MAP))
1470 break; 1483 break;
1471 1484
1472 memory_bm_set_bit(bm, buf[j]); 1485 if (memory_bm_pfn_present(bm, buf[j]))
1486 memory_bm_set_bit(bm, buf[j]);
1487 else
1488 return -EFAULT;
1473 } 1489 }
1490
1491 return 0;
1474} 1492}
1475 1493
1476/* List of "safe" pages that may be used to store data loaded from the suspend 1494/* List of "safe" pages that may be used to store data loaded from the suspend
@@ -1608,7 +1626,7 @@ get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1608 pbe = chain_alloc(ca, sizeof(struct highmem_pbe)); 1626 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
1609 if (!pbe) { 1627 if (!pbe) {
1610 swsusp_free(); 1628 swsusp_free();
1611 return NULL; 1629 return ERR_PTR(-ENOMEM);
1612 } 1630 }
1613 pbe->orig_page = page; 1631 pbe->orig_page = page;
1614 if (safe_highmem_pages > 0) { 1632 if (safe_highmem_pages > 0) {
@@ -1677,7 +1695,7 @@ prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1677static inline void * 1695static inline void *
1678get_highmem_page_buffer(struct page *page, struct chain_allocator *ca) 1696get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1679{ 1697{
1680 return NULL; 1698 return ERR_PTR(-EINVAL);
1681} 1699}
1682 1700
1683static inline void copy_last_highmem_page(void) {} 1701static inline void copy_last_highmem_page(void) {}
@@ -1788,8 +1806,13 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
1788static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) 1806static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
1789{ 1807{
1790 struct pbe *pbe; 1808 struct pbe *pbe;
1791 struct page *page = pfn_to_page(memory_bm_next_pfn(bm)); 1809 struct page *page;
1810 unsigned long pfn = memory_bm_next_pfn(bm);
1792 1811
1812 if (pfn == BM_END_OF_MAP)
1813 return ERR_PTR(-EFAULT);
1814
1815 page = pfn_to_page(pfn);
1793 if (PageHighMem(page)) 1816 if (PageHighMem(page))
1794 return get_highmem_page_buffer(page, ca); 1817 return get_highmem_page_buffer(page, ca);
1795 1818
@@ -1805,7 +1828,7 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
1805 pbe = chain_alloc(ca, sizeof(struct pbe)); 1828 pbe = chain_alloc(ca, sizeof(struct pbe));
1806 if (!pbe) { 1829 if (!pbe) {
1807 swsusp_free(); 1830 swsusp_free();
1808 return NULL; 1831 return ERR_PTR(-ENOMEM);
1809 } 1832 }
1810 pbe->orig_address = page_address(page); 1833 pbe->orig_address = page_address(page);
1811 pbe->address = safe_pages_list; 1834 pbe->address = safe_pages_list;
@@ -1868,7 +1891,10 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count)
1868 return error; 1891 return error;
1869 1892
1870 } else if (handle->prev <= nr_meta_pages) { 1893 } else if (handle->prev <= nr_meta_pages) {
1871 unpack_orig_pfns(buffer, &copy_bm); 1894 error = unpack_orig_pfns(buffer, &copy_bm);
1895 if (error)
1896 return error;
1897
1872 if (handle->prev == nr_meta_pages) { 1898 if (handle->prev == nr_meta_pages) {
1873 error = prepare_image(&orig_bm, &copy_bm); 1899 error = prepare_image(&orig_bm, &copy_bm);
1874 if (error) 1900 if (error)
@@ -1879,12 +1905,14 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count)
1879 restore_pblist = NULL; 1905 restore_pblist = NULL;
1880 handle->buffer = get_buffer(&orig_bm, &ca); 1906 handle->buffer = get_buffer(&orig_bm, &ca);
1881 handle->sync_read = 0; 1907 handle->sync_read = 0;
1882 if (!handle->buffer) 1908 if (IS_ERR(handle->buffer))
1883 return -ENOMEM; 1909 return PTR_ERR(handle->buffer);
1884 } 1910 }
1885 } else { 1911 } else {
1886 copy_last_highmem_page(); 1912 copy_last_highmem_page();
1887 handle->buffer = get_buffer(&orig_bm, &ca); 1913 handle->buffer = get_buffer(&orig_bm, &ca);
1914 if (IS_ERR(handle->buffer))
1915 return PTR_ERR(handle->buffer);
1888 if (handle->buffer != buffer) 1916 if (handle->buffer != buffer)
1889 handle->sync_read = 0; 1917 handle->sync_read = 0;
1890 } 1918 }
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 023ff2a31d89..a92c91451559 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -262,3 +262,125 @@ int swsusp_shrink_memory(void)
262 262
263 return 0; 263 return 0;
264} 264}
265
266/*
267 * Platforms, like ACPI, may want us to save some memory used by them during
268 * hibernation and to restore the contents of this memory during the subsequent
269 * resume. The code below implements a mechanism allowing us to do that.
270 */
271
272struct nvs_page {
273 unsigned long phys_start;
274 unsigned int size;
275 void *kaddr;
276 void *data;
277 struct list_head node;
278};
279
280static LIST_HEAD(nvs_list);
281
282/**
283 * hibernate_nvs_register - register platform NVS memory region to save
284 * @start - physical address of the region
285 * @size - size of the region
286 *
287 * The NVS region need not be page-aligned (both ends) and we arrange
288 * things so that the data from page-aligned addresses in this region will
289 * be copied into separate RAM pages.
290 */
291int hibernate_nvs_register(unsigned long start, unsigned long size)
292{
293 struct nvs_page *entry, *next;
294
295 while (size > 0) {
296 unsigned int nr_bytes;
297
298 entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
299 if (!entry)
300 goto Error;
301
302 list_add_tail(&entry->node, &nvs_list);
303 entry->phys_start = start;
304 nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
305 entry->size = (size < nr_bytes) ? size : nr_bytes;
306
307 start += entry->size;
308 size -= entry->size;
309 }
310 return 0;
311
312 Error:
313 list_for_each_entry_safe(entry, next, &nvs_list, node) {
314 list_del(&entry->node);
315 kfree(entry);
316 }
317 return -ENOMEM;
318}
319
320/**
321 * hibernate_nvs_free - free data pages allocated for saving NVS regions
322 */
323void hibernate_nvs_free(void)
324{
325 struct nvs_page *entry;
326
327 list_for_each_entry(entry, &nvs_list, node)
328 if (entry->data) {
329 free_page((unsigned long)entry->data);
330 entry->data = NULL;
331 if (entry->kaddr) {
332 iounmap(entry->kaddr);
333 entry->kaddr = NULL;
334 }
335 }
336}
337
338/**
339 * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions
340 */
341int hibernate_nvs_alloc(void)
342{
343 struct nvs_page *entry;
344
345 list_for_each_entry(entry, &nvs_list, node) {
346 entry->data = (void *)__get_free_page(GFP_KERNEL);
347 if (!entry->data) {
348 hibernate_nvs_free();
349 return -ENOMEM;
350 }
351 }
352 return 0;
353}
354
355/**
356 * hibernate_nvs_save - save NVS memory regions
357 */
358void hibernate_nvs_save(void)
359{
360 struct nvs_page *entry;
361
362 printk(KERN_INFO "PM: Saving platform NVS memory\n");
363
364 list_for_each_entry(entry, &nvs_list, node)
365 if (entry->data) {
366 entry->kaddr = ioremap(entry->phys_start, entry->size);
367 memcpy(entry->data, entry->kaddr, entry->size);
368 }
369}
370
371/**
372 * hibernate_nvs_restore - restore NVS memory regions
373 *
374 * This function is going to be called with interrupts disabled, so it
375 * cannot iounmap the virtual addresses used to access the NVS region.
376 */
377void hibernate_nvs_restore(void)
378{
379 struct nvs_page *entry;
380
381 printk(KERN_INFO "PM: Restoring platform NVS memory\n");
382
383 list_for_each_entry(entry, &nvs_list, node)
384 if (entry->data)
385 memcpy(entry->kaddr, entry->data, entry->size);
386}
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index f275c8eca772..bf8e7534c803 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -15,10 +15,11 @@
15#include <linux/uaccess.h> 15#include <linux/uaccess.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17 17
18void res_counter_init(struct res_counter *counter) 18void res_counter_init(struct res_counter *counter, struct res_counter *parent)
19{ 19{
20 spin_lock_init(&counter->lock); 20 spin_lock_init(&counter->lock);
21 counter->limit = (unsigned long long)LLONG_MAX; 21 counter->limit = (unsigned long long)LLONG_MAX;
22 counter->parent = parent;
22} 23}
23 24
24int res_counter_charge_locked(struct res_counter *counter, unsigned long val) 25int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
@@ -34,14 +35,34 @@ int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
34 return 0; 35 return 0;
35} 36}
36 37
37int res_counter_charge(struct res_counter *counter, unsigned long val) 38int res_counter_charge(struct res_counter *counter, unsigned long val,
39 struct res_counter **limit_fail_at)
38{ 40{
39 int ret; 41 int ret;
40 unsigned long flags; 42 unsigned long flags;
41 43 struct res_counter *c, *u;
42 spin_lock_irqsave(&counter->lock, flags); 44
43 ret = res_counter_charge_locked(counter, val); 45 *limit_fail_at = NULL;
44 spin_unlock_irqrestore(&counter->lock, flags); 46 local_irq_save(flags);
47 for (c = counter; c != NULL; c = c->parent) {
48 spin_lock(&c->lock);
49 ret = res_counter_charge_locked(c, val);
50 spin_unlock(&c->lock);
51 if (ret < 0) {
52 *limit_fail_at = c;
53 goto undo;
54 }
55 }
56 ret = 0;
57 goto done;
58undo:
59 for (u = counter; u != c; u = u->parent) {
60 spin_lock(&u->lock);
61 res_counter_uncharge_locked(u, val);
62 spin_unlock(&u->lock);
63 }
64done:
65 local_irq_restore(flags);
45 return ret; 66 return ret;
46} 67}
47 68
@@ -56,10 +77,15 @@ void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
56void res_counter_uncharge(struct res_counter *counter, unsigned long val) 77void res_counter_uncharge(struct res_counter *counter, unsigned long val)
57{ 78{
58 unsigned long flags; 79 unsigned long flags;
80 struct res_counter *c;
59 81
60 spin_lock_irqsave(&counter->lock, flags); 82 local_irq_save(flags);
61 res_counter_uncharge_locked(counter, val); 83 for (c = counter; c != NULL; c = c->parent) {
62 spin_unlock_irqrestore(&counter->lock, flags); 84 spin_lock(&c->lock);
85 res_counter_uncharge_locked(c, val);
86 spin_unlock(&c->lock);
87 }
88 local_irq_restore(flags);
63} 89}
64 90
65 91
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index e0c0b4bc3f08..8e1352c75557 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1617,8 +1617,6 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
1617 } 1617 }
1618} 1618}
1619 1619
1620#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
1621
1622/* 1620/*
1623 * Share the fairness runtime between parent and child, thus the 1621 * Share the fairness runtime between parent and child, thus the
1624 * total amount of pressure for CPU stays equal - new tasks 1622 * total amount of pressure for CPU stays equal - new tasks
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a9d9760dc7b6..8b0daf0662ef 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -168,7 +168,13 @@ rb_event_length(struct ring_buffer_event *event)
168 */ 168 */
169unsigned ring_buffer_event_length(struct ring_buffer_event *event) 169unsigned ring_buffer_event_length(struct ring_buffer_event *event)
170{ 170{
171 return rb_event_length(event); 171 unsigned length = rb_event_length(event);
172 if (event->type != RINGBUF_TYPE_DATA)
173 return length;
174 length -= RB_EVNT_HDR_SIZE;
175 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
176 length -= sizeof(event->array[0]);
177 return length;
172} 178}
173EXPORT_SYMBOL_GPL(ring_buffer_event_length); 179EXPORT_SYMBOL_GPL(ring_buffer_event_length);
174 180
diff --git a/lib/sort.c b/lib/sort.c
index 6abbaf3d5858..926d00429ed2 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -32,11 +32,11 @@ static void generic_swap(void *a, void *b, int size)
32 * @base: pointer to data to sort 32 * @base: pointer to data to sort
33 * @num: number of elements 33 * @num: number of elements
34 * @size: size of each element 34 * @size: size of each element
35 * @cmp: pointer to comparison function 35 * @cmp_func: pointer to comparison function
36 * @swap: pointer to swap function or NULL 36 * @swap_func: pointer to swap function or NULL
37 * 37 *
38 * This function does a heapsort on the given array. You may provide a 38 * This function does a heapsort on the given array. You may provide a
39 * swap function optimized to your element type. 39 * swap_func function optimized to your element type.
40 * 40 *
41 * Sorting time is O(n log n) both on average and worst-case. While 41 * Sorting time is O(n log n) both on average and worst-case. While
42 * qsort is about 20% faster on average, it suffers from exploitable 42 * qsort is about 20% faster on average, it suffers from exploitable
@@ -45,37 +45,39 @@ static void generic_swap(void *a, void *b, int size)
45 */ 45 */
46 46
47void sort(void *base, size_t num, size_t size, 47void sort(void *base, size_t num, size_t size,
48 int (*cmp)(const void *, const void *), 48 int (*cmp_func)(const void *, const void *),
49 void (*swap)(void *, void *, int size)) 49 void (*swap_func)(void *, void *, int size))
50{ 50{
51 /* pre-scale counters for performance */ 51 /* pre-scale counters for performance */
52 int i = (num/2 - 1) * size, n = num * size, c, r; 52 int i = (num/2 - 1) * size, n = num * size, c, r;
53 53
54 if (!swap) 54 if (!swap_func)
55 swap = (size == 4 ? u32_swap : generic_swap); 55 swap_func = (size == 4 ? u32_swap : generic_swap);
56 56
57 /* heapify */ 57 /* heapify */
58 for ( ; i >= 0; i -= size) { 58 for ( ; i >= 0; i -= size) {
59 for (r = i; r * 2 + size < n; r = c) { 59 for (r = i; r * 2 + size < n; r = c) {
60 c = r * 2 + size; 60 c = r * 2 + size;
61 if (c < n - size && cmp(base + c, base + c + size) < 0) 61 if (c < n - size &&
62 cmp_func(base + c, base + c + size) < 0)
62 c += size; 63 c += size;
63 if (cmp(base + r, base + c) >= 0) 64 if (cmp_func(base + r, base + c) >= 0)
64 break; 65 break;
65 swap(base + r, base + c, size); 66 swap_func(base + r, base + c, size);
66 } 67 }
67 } 68 }
68 69
69 /* sort */ 70 /* sort */
70 for (i = n - size; i > 0; i -= size) { 71 for (i = n - size; i > 0; i -= size) {
71 swap(base, base + i, size); 72 swap_func(base, base + i, size);
72 for (r = 0; r * 2 + size < i; r = c) { 73 for (r = 0; r * 2 + size < i; r = c) {
73 c = r * 2 + size; 74 c = r * 2 + size;
74 if (c < i - size && cmp(base + c, base + c + size) < 0) 75 if (c < i - size &&
76 cmp_func(base + c, base + c + size) < 0)
75 c += size; 77 c += size;
76 if (cmp(base + r, base + c) >= 0) 78 if (cmp_func(base + r, base + c) >= 0)
77 break; 79 break;
78 swap(base + r, base + c, size); 80 swap_func(base + r, base + c, size);
79 } 81 }
80 } 82 }
81} 83}
diff --git a/mm/filemap.c b/mm/filemap.c
index 2f55a1e2baf7..ceba0bd03662 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -460,7 +460,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
460 VM_BUG_ON(!PageLocked(page)); 460 VM_BUG_ON(!PageLocked(page));
461 461
462 error = mem_cgroup_cache_charge(page, current->mm, 462 error = mem_cgroup_cache_charge(page, current->mm,
463 gfp_mask & ~__GFP_HIGHMEM); 463 gfp_mask & GFP_RECLAIM_MASK);
464 if (error) 464 if (error)
465 goto out; 465 goto out;
466 466
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 51ee96545579..e2996b80601f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -21,11 +21,13 @@
21#include <linux/memcontrol.h> 21#include <linux/memcontrol.h>
22#include <linux/cgroup.h> 22#include <linux/cgroup.h>
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/pagemap.h>
24#include <linux/smp.h> 25#include <linux/smp.h>
25#include <linux/page-flags.h> 26#include <linux/page-flags.h>
26#include <linux/backing-dev.h> 27#include <linux/backing-dev.h>
27#include <linux/bit_spinlock.h> 28#include <linux/bit_spinlock.h>
28#include <linux/rcupdate.h> 29#include <linux/rcupdate.h>
30#include <linux/mutex.h>
29#include <linux/slab.h> 31#include <linux/slab.h>
30#include <linux/swap.h> 32#include <linux/swap.h>
31#include <linux/spinlock.h> 33#include <linux/spinlock.h>
@@ -34,12 +36,23 @@
34#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
35#include <linux/mm_inline.h> 37#include <linux/mm_inline.h>
36#include <linux/page_cgroup.h> 38#include <linux/page_cgroup.h>
39#include "internal.h"
37 40
38#include <asm/uaccess.h> 41#include <asm/uaccess.h>
39 42
40struct cgroup_subsys mem_cgroup_subsys __read_mostly; 43struct cgroup_subsys mem_cgroup_subsys __read_mostly;
41#define MEM_CGROUP_RECLAIM_RETRIES 5 44#define MEM_CGROUP_RECLAIM_RETRIES 5
42 45
46#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
47/* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
48int do_swap_account __read_mostly;
49static int really_do_swap_account __initdata = 1; /* for remember boot option*/
50#else
51#define do_swap_account (0)
52#endif
53
54static DEFINE_MUTEX(memcg_tasklist); /* can be hold under cgroup_mutex */
55
43/* 56/*
44 * Statistics for memory cgroup. 57 * Statistics for memory cgroup.
45 */ 58 */
@@ -60,7 +73,7 @@ struct mem_cgroup_stat_cpu {
60} ____cacheline_aligned_in_smp; 73} ____cacheline_aligned_in_smp;
61 74
62struct mem_cgroup_stat { 75struct mem_cgroup_stat {
63 struct mem_cgroup_stat_cpu cpustat[NR_CPUS]; 76 struct mem_cgroup_stat_cpu cpustat[0];
64}; 77};
65 78
66/* 79/*
@@ -89,9 +102,10 @@ struct mem_cgroup_per_zone {
89 /* 102 /*
90 * spin_lock to protect the per cgroup LRU 103 * spin_lock to protect the per cgroup LRU
91 */ 104 */
92 spinlock_t lru_lock;
93 struct list_head lists[NR_LRU_LISTS]; 105 struct list_head lists[NR_LRU_LISTS];
94 unsigned long count[NR_LRU_LISTS]; 106 unsigned long count[NR_LRU_LISTS];
107
108 struct zone_reclaim_stat reclaim_stat;
95}; 109};
96/* Macro for accessing counter */ 110/* Macro for accessing counter */
97#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) 111#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
@@ -122,44 +136,73 @@ struct mem_cgroup {
122 */ 136 */
123 struct res_counter res; 137 struct res_counter res;
124 /* 138 /*
139 * the counter to account for mem+swap usage.
140 */
141 struct res_counter memsw;
142 /*
125 * Per cgroup active and inactive list, similar to the 143 * Per cgroup active and inactive list, similar to the
126 * per zone LRU lists. 144 * per zone LRU lists.
127 */ 145 */
128 struct mem_cgroup_lru_info info; 146 struct mem_cgroup_lru_info info;
129 147
148 /*
149 protect against reclaim related member.
150 */
151 spinlock_t reclaim_param_lock;
152
130 int prev_priority; /* for recording reclaim priority */ 153 int prev_priority; /* for recording reclaim priority */
154
155 /*
156 * While reclaiming in a hiearchy, we cache the last child we
157 * reclaimed from. Protected by hierarchy_mutex
158 */
159 struct mem_cgroup *last_scanned_child;
131 /* 160 /*
132 * statistics. 161 * Should the accounting and control be hierarchical, per subtree?
162 */
163 bool use_hierarchy;
164 unsigned long last_oom_jiffies;
165 atomic_t refcnt;
166
167 unsigned int swappiness;
168
169 /*
170 * statistics. This must be placed at the end of memcg.
133 */ 171 */
134 struct mem_cgroup_stat stat; 172 struct mem_cgroup_stat stat;
135}; 173};
136static struct mem_cgroup init_mem_cgroup;
137 174
138enum charge_type { 175enum charge_type {
139 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 176 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
140 MEM_CGROUP_CHARGE_TYPE_MAPPED, 177 MEM_CGROUP_CHARGE_TYPE_MAPPED,
141 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */ 178 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
142 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */ 179 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
180 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
143 NR_CHARGE_TYPE, 181 NR_CHARGE_TYPE,
144}; 182};
145 183
146/* only for here (for easy reading.) */ 184/* only for here (for easy reading.) */
147#define PCGF_CACHE (1UL << PCG_CACHE) 185#define PCGF_CACHE (1UL << PCG_CACHE)
148#define PCGF_USED (1UL << PCG_USED) 186#define PCGF_USED (1UL << PCG_USED)
149#define PCGF_ACTIVE (1UL << PCG_ACTIVE)
150#define PCGF_LOCK (1UL << PCG_LOCK) 187#define PCGF_LOCK (1UL << PCG_LOCK)
151#define PCGF_FILE (1UL << PCG_FILE)
152static const unsigned long 188static const unsigned long
153pcg_default_flags[NR_CHARGE_TYPE] = { 189pcg_default_flags[NR_CHARGE_TYPE] = {
154 PCGF_CACHE | PCGF_FILE | PCGF_USED | PCGF_LOCK, /* File Cache */ 190 PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
155 PCGF_ACTIVE | PCGF_USED | PCGF_LOCK, /* Anon */ 191 PCGF_USED | PCGF_LOCK, /* Anon */
156 PCGF_ACTIVE | PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */ 192 PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
157 0, /* FORCE */ 193 0, /* FORCE */
158}; 194};
159 195
160/* 196/* for encoding cft->private value on file */
161 * Always modified under lru lock. Then, not necessary to preempt_disable() 197#define _MEM (0)
162 */ 198#define _MEMSWAP (1)
199#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
200#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
201#define MEMFILE_ATTR(val) ((val) & 0xffff)
202
203static void mem_cgroup_get(struct mem_cgroup *mem);
204static void mem_cgroup_put(struct mem_cgroup *mem);
205
163static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, 206static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
164 struct page_cgroup *pc, 207 struct page_cgroup *pc,
165 bool charge) 208 bool charge)
@@ -167,10 +210,9 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
167 int val = (charge)? 1 : -1; 210 int val = (charge)? 1 : -1;
168 struct mem_cgroup_stat *stat = &mem->stat; 211 struct mem_cgroup_stat *stat = &mem->stat;
169 struct mem_cgroup_stat_cpu *cpustat; 212 struct mem_cgroup_stat_cpu *cpustat;
213 int cpu = get_cpu();
170 214
171 VM_BUG_ON(!irqs_disabled()); 215 cpustat = &stat->cpustat[cpu];
172
173 cpustat = &stat->cpustat[smp_processor_id()];
174 if (PageCgroupCache(pc)) 216 if (PageCgroupCache(pc))
175 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val); 217 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
176 else 218 else
@@ -182,6 +224,7 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
182 else 224 else
183 __mem_cgroup_stat_add_safe(cpustat, 225 __mem_cgroup_stat_add_safe(cpustat,
184 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1); 226 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
227 put_cpu();
185} 228}
186 229
187static struct mem_cgroup_per_zone * 230static struct mem_cgroup_per_zone *
@@ -197,6 +240,9 @@ page_cgroup_zoneinfo(struct page_cgroup *pc)
197 int nid = page_cgroup_nid(pc); 240 int nid = page_cgroup_nid(pc);
198 int zid = page_cgroup_zid(pc); 241 int zid = page_cgroup_zid(pc);
199 242
243 if (!mem)
244 return NULL;
245
200 return mem_cgroup_zoneinfo(mem, nid, zid); 246 return mem_cgroup_zoneinfo(mem, nid, zid);
201} 247}
202 248
@@ -236,77 +282,152 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
236 struct mem_cgroup, css); 282 struct mem_cgroup, css);
237} 283}
238 284
239static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz, 285static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
240 struct page_cgroup *pc)
241{ 286{
242 int lru = LRU_BASE; 287 struct mem_cgroup *mem = NULL;
288 /*
289 * Because we have no locks, mm->owner's may be being moved to other
290 * cgroup. We use css_tryget() here even if this looks
291 * pessimistic (rather than adding locks here).
292 */
293 rcu_read_lock();
294 do {
295 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
296 if (unlikely(!mem))
297 break;
298 } while (!css_tryget(&mem->css));
299 rcu_read_unlock();
300 return mem;
301}
243 302
244 if (PageCgroupUnevictable(pc)) 303static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem)
245 lru = LRU_UNEVICTABLE; 304{
246 else { 305 if (!mem)
247 if (PageCgroupActive(pc)) 306 return true;
248 lru += LRU_ACTIVE; 307 return css_is_removed(&mem->css);
249 if (PageCgroupFile(pc)) 308}
250 lru += LRU_FILE;
251 }
252 309
253 MEM_CGROUP_ZSTAT(mz, lru) -= 1; 310/*
311 * Following LRU functions are allowed to be used without PCG_LOCK.
312 * Operations are called by routine of global LRU independently from memcg.
313 * What we have to take care of here is validness of pc->mem_cgroup.
314 *
315 * Changes to pc->mem_cgroup happens when
316 * 1. charge
317 * 2. moving account
318 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
319 * It is added to LRU before charge.
320 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
321 * When moving account, the page is not on LRU. It's isolated.
322 */
254 323
255 mem_cgroup_charge_statistics(pc->mem_cgroup, pc, false); 324void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
256 list_del(&pc->lru); 325{
326 struct page_cgroup *pc;
327 struct mem_cgroup *mem;
328 struct mem_cgroup_per_zone *mz;
329
330 if (mem_cgroup_disabled())
331 return;
332 pc = lookup_page_cgroup(page);
333 /* can happen while we handle swapcache. */
334 if (list_empty(&pc->lru) || !pc->mem_cgroup)
335 return;
336 /*
337 * We don't check PCG_USED bit. It's cleared when the "page" is finally
338 * removed from global LRU.
339 */
340 mz = page_cgroup_zoneinfo(pc);
341 mem = pc->mem_cgroup;
342 MEM_CGROUP_ZSTAT(mz, lru) -= 1;
343 list_del_init(&pc->lru);
344 return;
257} 345}
258 346
259static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz, 347void mem_cgroup_del_lru(struct page *page)
260 struct page_cgroup *pc)
261{ 348{
262 int lru = LRU_BASE; 349 mem_cgroup_del_lru_list(page, page_lru(page));
350}
263 351
264 if (PageCgroupUnevictable(pc)) 352void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
265 lru = LRU_UNEVICTABLE; 353{
266 else { 354 struct mem_cgroup_per_zone *mz;
267 if (PageCgroupActive(pc)) 355 struct page_cgroup *pc;
268 lru += LRU_ACTIVE;
269 if (PageCgroupFile(pc))
270 lru += LRU_FILE;
271 }
272 356
273 MEM_CGROUP_ZSTAT(mz, lru) += 1; 357 if (mem_cgroup_disabled())
274 list_add(&pc->lru, &mz->lists[lru]); 358 return;
275 359
276 mem_cgroup_charge_statistics(pc->mem_cgroup, pc, true); 360 pc = lookup_page_cgroup(page);
361 smp_rmb();
362 /* unused page is not rotated. */
363 if (!PageCgroupUsed(pc))
364 return;
365 mz = page_cgroup_zoneinfo(pc);
366 list_move(&pc->lru, &mz->lists[lru]);
277} 367}
278 368
279static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru) 369void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
280{ 370{
281 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); 371 struct page_cgroup *pc;
282 int active = PageCgroupActive(pc); 372 struct mem_cgroup_per_zone *mz;
283 int file = PageCgroupFile(pc);
284 int unevictable = PageCgroupUnevictable(pc);
285 enum lru_list from = unevictable ? LRU_UNEVICTABLE :
286 (LRU_FILE * !!file + !!active);
287 373
288 if (lru == from) 374 if (mem_cgroup_disabled())
375 return;
376 pc = lookup_page_cgroup(page);
377 /* barrier to sync with "charge" */
378 smp_rmb();
379 if (!PageCgroupUsed(pc))
289 return; 380 return;
290 381
291 MEM_CGROUP_ZSTAT(mz, from) -= 1; 382 mz = page_cgroup_zoneinfo(pc);
383 MEM_CGROUP_ZSTAT(mz, lru) += 1;
384 list_add(&pc->lru, &mz->lists[lru]);
385}
386
387/*
388 * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
389 * lru because the page may.be reused after it's fully uncharged (because of
390 * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
391 * it again. This function is only used to charge SwapCache. It's done under
392 * lock_page and expected that zone->lru_lock is never held.
393 */
394static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
395{
396 unsigned long flags;
397 struct zone *zone = page_zone(page);
398 struct page_cgroup *pc = lookup_page_cgroup(page);
399
400 spin_lock_irqsave(&zone->lru_lock, flags);
292 /* 401 /*
293 * However this is done under mz->lru_lock, another flags, which 402 * Forget old LRU when this page_cgroup is *not* used. This Used bit
294 * are not related to LRU, will be modified from out-of-lock. 403 * is guarded by lock_page() because the page is SwapCache.
295 * We have to use atomic set/clear flags.
296 */ 404 */
297 if (is_unevictable_lru(lru)) { 405 if (!PageCgroupUsed(pc))
298 ClearPageCgroupActive(pc); 406 mem_cgroup_del_lru_list(page, page_lru(page));
299 SetPageCgroupUnevictable(pc); 407 spin_unlock_irqrestore(&zone->lru_lock, flags);
300 } else { 408}
301 if (is_active_lru(lru))
302 SetPageCgroupActive(pc);
303 else
304 ClearPageCgroupActive(pc);
305 ClearPageCgroupUnevictable(pc);
306 }
307 409
308 MEM_CGROUP_ZSTAT(mz, lru) += 1; 410static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
309 list_move(&pc->lru, &mz->lists[lru]); 411{
412 unsigned long flags;
413 struct zone *zone = page_zone(page);
414 struct page_cgroup *pc = lookup_page_cgroup(page);
415
416 spin_lock_irqsave(&zone->lru_lock, flags);
417 /* link when the page is linked to LRU but page_cgroup isn't */
418 if (PageLRU(page) && list_empty(&pc->lru))
419 mem_cgroup_add_lru_list(page, page_lru(page));
420 spin_unlock_irqrestore(&zone->lru_lock, flags);
421}
422
423
424void mem_cgroup_move_lists(struct page *page,
425 enum lru_list from, enum lru_list to)
426{
427 if (mem_cgroup_disabled())
428 return;
429 mem_cgroup_del_lru_list(page, from);
430 mem_cgroup_add_lru_list(page, to);
310} 431}
311 432
312int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) 433int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
@@ -320,37 +441,6 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
320} 441}
321 442
322/* 443/*
323 * This routine assumes that the appropriate zone's lru lock is already held
324 */
325void mem_cgroup_move_lists(struct page *page, enum lru_list lru)
326{
327 struct page_cgroup *pc;
328 struct mem_cgroup_per_zone *mz;
329 unsigned long flags;
330
331 if (mem_cgroup_subsys.disabled)
332 return;
333
334 /*
335 * We cannot lock_page_cgroup while holding zone's lru_lock,
336 * because other holders of lock_page_cgroup can be interrupted
337 * with an attempt to rotate_reclaimable_page. But we cannot
338 * safely get to page_cgroup without it, so just try_lock it:
339 * mem_cgroup_isolate_pages allows for page left on wrong list.
340 */
341 pc = lookup_page_cgroup(page);
342 if (!trylock_page_cgroup(pc))
343 return;
344 if (pc && PageCgroupUsed(pc)) {
345 mz = page_cgroup_zoneinfo(pc);
346 spin_lock_irqsave(&mz->lru_lock, flags);
347 __mem_cgroup_move_lists(pc, lru);
348 spin_unlock_irqrestore(&mz->lru_lock, flags);
349 }
350 unlock_page_cgroup(pc);
351}
352
353/*
354 * Calculate mapped_ratio under memory controller. This will be used in 444 * Calculate mapped_ratio under memory controller. This will be used in
355 * vmscan.c for deteremining we have to reclaim mapped pages. 445 * vmscan.c for deteremining we have to reclaim mapped pages.
356 */ 446 */
@@ -372,39 +462,108 @@ int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
372 */ 462 */
373int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) 463int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
374{ 464{
375 return mem->prev_priority; 465 int prev_priority;
466
467 spin_lock(&mem->reclaim_param_lock);
468 prev_priority = mem->prev_priority;
469 spin_unlock(&mem->reclaim_param_lock);
470
471 return prev_priority;
376} 472}
377 473
378void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority) 474void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
379{ 475{
476 spin_lock(&mem->reclaim_param_lock);
380 if (priority < mem->prev_priority) 477 if (priority < mem->prev_priority)
381 mem->prev_priority = priority; 478 mem->prev_priority = priority;
479 spin_unlock(&mem->reclaim_param_lock);
382} 480}
383 481
384void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority) 482void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
385{ 483{
484 spin_lock(&mem->reclaim_param_lock);
386 mem->prev_priority = priority; 485 mem->prev_priority = priority;
486 spin_unlock(&mem->reclaim_param_lock);
387} 487}
388 488
389/* 489static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
390 * Calculate # of pages to be scanned in this priority/zone. 490{
391 * See also vmscan.c 491 unsigned long active;
392 * 492 unsigned long inactive;
393 * priority starts from "DEF_PRIORITY" and decremented in each loop. 493 unsigned long gb;
394 * (see include/linux/mmzone.h) 494 unsigned long inactive_ratio;
395 */ 495
496 inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON);
497 active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON);
498
499 gb = (inactive + active) >> (30 - PAGE_SHIFT);
500 if (gb)
501 inactive_ratio = int_sqrt(10 * gb);
502 else
503 inactive_ratio = 1;
504
505 if (present_pages) {
506 present_pages[0] = inactive;
507 present_pages[1] = active;
508 }
509
510 return inactive_ratio;
511}
512
513int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
514{
515 unsigned long active;
516 unsigned long inactive;
517 unsigned long present_pages[2];
518 unsigned long inactive_ratio;
396 519
397long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone, 520 inactive_ratio = calc_inactive_ratio(memcg, present_pages);
398 int priority, enum lru_list lru) 521
522 inactive = present_pages[0];
523 active = present_pages[1];
524
525 if (inactive * inactive_ratio < active)
526 return 1;
527
528 return 0;
529}
530
531unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
532 struct zone *zone,
533 enum lru_list lru)
399{ 534{
400 long nr_pages;
401 int nid = zone->zone_pgdat->node_id; 535 int nid = zone->zone_pgdat->node_id;
402 int zid = zone_idx(zone); 536 int zid = zone_idx(zone);
403 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); 537 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
404 538
405 nr_pages = MEM_CGROUP_ZSTAT(mz, lru); 539 return MEM_CGROUP_ZSTAT(mz, lru);
540}
406 541
407 return (nr_pages >> priority); 542struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
543 struct zone *zone)
544{
545 int nid = zone->zone_pgdat->node_id;
546 int zid = zone_idx(zone);
547 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
548
549 return &mz->reclaim_stat;
550}
551
552struct zone_reclaim_stat *
553mem_cgroup_get_reclaim_stat_from_page(struct page *page)
554{
555 struct page_cgroup *pc;
556 struct mem_cgroup_per_zone *mz;
557
558 if (mem_cgroup_disabled())
559 return NULL;
560
561 pc = lookup_page_cgroup(page);
562 mz = page_cgroup_zoneinfo(pc);
563 if (!mz)
564 return NULL;
565
566 return &mz->reclaim_stat;
408} 567}
409 568
410unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 569unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
@@ -429,95 +588,281 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
429 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 588 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
430 src = &mz->lists[lru]; 589 src = &mz->lists[lru];
431 590
432 spin_lock(&mz->lru_lock);
433 scan = 0; 591 scan = 0;
434 list_for_each_entry_safe_reverse(pc, tmp, src, lru) { 592 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
435 if (scan >= nr_to_scan) 593 if (scan >= nr_to_scan)
436 break; 594 break;
595
596 page = pc->page;
437 if (unlikely(!PageCgroupUsed(pc))) 597 if (unlikely(!PageCgroupUsed(pc)))
438 continue; 598 continue;
439 page = pc->page;
440
441 if (unlikely(!PageLRU(page))) 599 if (unlikely(!PageLRU(page)))
442 continue; 600 continue;
443 601
444 /*
445 * TODO: play better with lumpy reclaim, grabbing anything.
446 */
447 if (PageUnevictable(page) ||
448 (PageActive(page) && !active) ||
449 (!PageActive(page) && active)) {
450 __mem_cgroup_move_lists(pc, page_lru(page));
451 continue;
452 }
453
454 scan++; 602 scan++;
455 list_move(&pc->lru, &pc_list);
456
457 if (__isolate_lru_page(page, mode, file) == 0) { 603 if (__isolate_lru_page(page, mode, file) == 0) {
458 list_move(&page->lru, dst); 604 list_move(&page->lru, dst);
459 nr_taken++; 605 nr_taken++;
460 } 606 }
461 } 607 }
462 608
463 list_splice(&pc_list, src);
464 spin_unlock(&mz->lru_lock);
465
466 *scanned = scan; 609 *scanned = scan;
467 return nr_taken; 610 return nr_taken;
468} 611}
469 612
613#define mem_cgroup_from_res_counter(counter, member) \
614 container_of(counter, struct mem_cgroup, member)
615
470/* 616/*
471 * Charge the memory controller for page usage. 617 * This routine finds the DFS walk successor. This routine should be
472 * Return 618 * called with hierarchy_mutex held
473 * 0 if the charge was successful
474 * < 0 if the cgroup is over its limit
475 */ 619 */
476static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, 620static struct mem_cgroup *
477 gfp_t gfp_mask, enum charge_type ctype, 621mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
478 struct mem_cgroup *memcg)
479{ 622{
623 struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
624
625 curr_cgroup = curr->css.cgroup;
626 root_cgroup = root_mem->css.cgroup;
627
628 if (!list_empty(&curr_cgroup->children)) {
629 /*
630 * Walk down to children
631 */
632 mem_cgroup_put(curr);
633 cgroup = list_entry(curr_cgroup->children.next,
634 struct cgroup, sibling);
635 curr = mem_cgroup_from_cont(cgroup);
636 mem_cgroup_get(curr);
637 goto done;
638 }
639
640visit_parent:
641 if (curr_cgroup == root_cgroup) {
642 mem_cgroup_put(curr);
643 curr = root_mem;
644 mem_cgroup_get(curr);
645 goto done;
646 }
647
648 /*
649 * Goto next sibling
650 */
651 if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
652 mem_cgroup_put(curr);
653 cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
654 sibling);
655 curr = mem_cgroup_from_cont(cgroup);
656 mem_cgroup_get(curr);
657 goto done;
658 }
659
660 /*
661 * Go up to next parent and next parent's sibling if need be
662 */
663 curr_cgroup = curr_cgroup->parent;
664 goto visit_parent;
665
666done:
667 root_mem->last_scanned_child = curr;
668 return curr;
669}
670
671/*
672 * Visit the first child (need not be the first child as per the ordering
673 * of the cgroup list, since we track last_scanned_child) of @mem and use
674 * that to reclaim free pages from.
675 */
676static struct mem_cgroup *
677mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
678{
679 struct cgroup *cgroup;
680 struct mem_cgroup *ret;
681 bool obsolete;
682
683 obsolete = mem_cgroup_is_obsolete(root_mem->last_scanned_child);
684
685 /*
686 * Scan all children under the mem_cgroup mem
687 */
688 mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
689 if (list_empty(&root_mem->css.cgroup->children)) {
690 ret = root_mem;
691 goto done;
692 }
693
694 if (!root_mem->last_scanned_child || obsolete) {
695
696 if (obsolete && root_mem->last_scanned_child)
697 mem_cgroup_put(root_mem->last_scanned_child);
698
699 cgroup = list_first_entry(&root_mem->css.cgroup->children,
700 struct cgroup, sibling);
701 ret = mem_cgroup_from_cont(cgroup);
702 mem_cgroup_get(ret);
703 } else
704 ret = mem_cgroup_get_next_node(root_mem->last_scanned_child,
705 root_mem);
706
707done:
708 root_mem->last_scanned_child = ret;
709 mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
710 return ret;
711}
712
713static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
714{
715 if (do_swap_account) {
716 if (res_counter_check_under_limit(&mem->res) &&
717 res_counter_check_under_limit(&mem->memsw))
718 return true;
719 } else
720 if (res_counter_check_under_limit(&mem->res))
721 return true;
722 return false;
723}
724
725static unsigned int get_swappiness(struct mem_cgroup *memcg)
726{
727 struct cgroup *cgrp = memcg->css.cgroup;
728 unsigned int swappiness;
729
730 /* root ? */
731 if (cgrp->parent == NULL)
732 return vm_swappiness;
733
734 spin_lock(&memcg->reclaim_param_lock);
735 swappiness = memcg->swappiness;
736 spin_unlock(&memcg->reclaim_param_lock);
737
738 return swappiness;
739}
740
741/*
742 * Dance down the hierarchy if needed to reclaim memory. We remember the
743 * last child we reclaimed from, so that we don't end up penalizing
744 * one child extensively based on its position in the children list.
745 *
746 * root_mem is the original ancestor that we've been reclaim from.
747 */
748static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
749 gfp_t gfp_mask, bool noswap)
750{
751 struct mem_cgroup *next_mem;
752 int ret = 0;
753
754 /*
755 * Reclaim unconditionally and don't check for return value.
756 * We need to reclaim in the current group and down the tree.
757 * One might think about checking for children before reclaiming,
758 * but there might be left over accounting, even after children
759 * have left.
760 */
761 ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap,
762 get_swappiness(root_mem));
763 if (mem_cgroup_check_under_limit(root_mem))
764 return 0;
765 if (!root_mem->use_hierarchy)
766 return ret;
767
768 next_mem = mem_cgroup_get_first_node(root_mem);
769
770 while (next_mem != root_mem) {
771 if (mem_cgroup_is_obsolete(next_mem)) {
772 mem_cgroup_put(next_mem);
773 next_mem = mem_cgroup_get_first_node(root_mem);
774 continue;
775 }
776 ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
777 get_swappiness(next_mem));
778 if (mem_cgroup_check_under_limit(root_mem))
779 return 0;
780 mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
781 next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
782 mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
783 }
784 return ret;
785}
786
787bool mem_cgroup_oom_called(struct task_struct *task)
788{
789 bool ret = false;
480 struct mem_cgroup *mem; 790 struct mem_cgroup *mem;
481 struct page_cgroup *pc; 791 struct mm_struct *mm;
482 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
483 struct mem_cgroup_per_zone *mz;
484 unsigned long flags;
485 792
486 pc = lookup_page_cgroup(page); 793 rcu_read_lock();
487 /* can happen at boot */ 794 mm = task->mm;
488 if (unlikely(!pc)) 795 if (!mm)
796 mm = &init_mm;
797 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
798 if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
799 ret = true;
800 rcu_read_unlock();
801 return ret;
802}
803/*
804 * Unlike exported interface, "oom" parameter is added. if oom==true,
805 * oom-killer can be invoked.
806 */
807static int __mem_cgroup_try_charge(struct mm_struct *mm,
808 gfp_t gfp_mask, struct mem_cgroup **memcg,
809 bool oom)
810{
811 struct mem_cgroup *mem, *mem_over_limit;
812 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
813 struct res_counter *fail_res;
814
815 if (unlikely(test_thread_flag(TIF_MEMDIE))) {
816 /* Don't account this! */
817 *memcg = NULL;
489 return 0; 818 return 0;
490 prefetchw(pc); 819 }
820
491 /* 821 /*
492 * We always charge the cgroup the mm_struct belongs to. 822 * We always charge the cgroup the mm_struct belongs to.
493 * The mm_struct's mem_cgroup changes on task migration if the 823 * The mm_struct's mem_cgroup changes on task migration if the
494 * thread group leader migrates. It's possible that mm is not 824 * thread group leader migrates. It's possible that mm is not
495 * set, if so charge the init_mm (happens for pagecache usage). 825 * set, if so charge the init_mm (happens for pagecache usage).
496 */ 826 */
497 827 mem = *memcg;
498 if (likely(!memcg)) { 828 if (likely(!mem)) {
499 rcu_read_lock(); 829 mem = try_get_mem_cgroup_from_mm(mm);
500 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 830 *memcg = mem;
501 if (unlikely(!mem)) {
502 rcu_read_unlock();
503 return 0;
504 }
505 /*
506 * For every charge from the cgroup, increment reference count
507 */
508 css_get(&mem->css);
509 rcu_read_unlock();
510 } else { 831 } else {
511 mem = memcg; 832 css_get(&mem->css);
512 css_get(&memcg->css);
513 } 833 }
834 if (unlikely(!mem))
835 return 0;
836
837 VM_BUG_ON(mem_cgroup_is_obsolete(mem));
838
839 while (1) {
840 int ret;
841 bool noswap = false;
842
843 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
844 if (likely(!ret)) {
845 if (!do_swap_account)
846 break;
847 ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
848 &fail_res);
849 if (likely(!ret))
850 break;
851 /* mem+swap counter fails */
852 res_counter_uncharge(&mem->res, PAGE_SIZE);
853 noswap = true;
854 mem_over_limit = mem_cgroup_from_res_counter(fail_res,
855 memsw);
856 } else
857 /* mem counter fails */
858 mem_over_limit = mem_cgroup_from_res_counter(fail_res,
859 res);
514 860
515 while (unlikely(res_counter_charge(&mem->res, PAGE_SIZE))) {
516 if (!(gfp_mask & __GFP_WAIT)) 861 if (!(gfp_mask & __GFP_WAIT))
517 goto out; 862 goto nomem;
518 863
519 if (try_to_free_mem_cgroup_pages(mem, gfp_mask)) 864 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
520 continue; 865 noswap);
521 866
522 /* 867 /*
523 * try_to_free_mem_cgroup_pages() might not give us a full 868 * try_to_free_mem_cgroup_pages() might not give us a full
@@ -525,49 +870,214 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
525 * moved to swap cache or just unmapped from the cgroup. 870 * moved to swap cache or just unmapped from the cgroup.
526 * Check the limit again to see if the reclaim reduced the 871 * Check the limit again to see if the reclaim reduced the
527 * current usage of the cgroup before giving up 872 * current usage of the cgroup before giving up
873 *
528 */ 874 */
529 if (res_counter_check_under_limit(&mem->res)) 875 if (mem_cgroup_check_under_limit(mem_over_limit))
530 continue; 876 continue;
531 877
532 if (!nr_retries--) { 878 if (!nr_retries--) {
533 mem_cgroup_out_of_memory(mem, gfp_mask); 879 if (oom) {
534 goto out; 880 mutex_lock(&memcg_tasklist);
881 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
882 mutex_unlock(&memcg_tasklist);
883 mem_over_limit->last_oom_jiffies = jiffies;
884 }
885 goto nomem;
535 } 886 }
536 } 887 }
888 return 0;
889nomem:
890 css_put(&mem->css);
891 return -ENOMEM;
892}
537 893
894static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
895{
896 struct mem_cgroup *mem;
897 swp_entry_t ent;
898
899 if (!PageSwapCache(page))
900 return NULL;
901
902 ent.val = page_private(page);
903 mem = lookup_swap_cgroup(ent);
904 if (!mem)
905 return NULL;
906 if (!css_tryget(&mem->css))
907 return NULL;
908 return mem;
909}
910
911/*
912 * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
913 * USED state. If already USED, uncharge and return.
914 */
915
916static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
917 struct page_cgroup *pc,
918 enum charge_type ctype)
919{
920 /* try_charge() can return NULL to *memcg, taking care of it. */
921 if (!mem)
922 return;
538 923
539 lock_page_cgroup(pc); 924 lock_page_cgroup(pc);
540 if (unlikely(PageCgroupUsed(pc))) { 925 if (unlikely(PageCgroupUsed(pc))) {
541 unlock_page_cgroup(pc); 926 unlock_page_cgroup(pc);
542 res_counter_uncharge(&mem->res, PAGE_SIZE); 927 res_counter_uncharge(&mem->res, PAGE_SIZE);
928 if (do_swap_account)
929 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
543 css_put(&mem->css); 930 css_put(&mem->css);
544 931 return;
545 goto done;
546 } 932 }
547 pc->mem_cgroup = mem; 933 pc->mem_cgroup = mem;
548 /* 934 smp_wmb();
549 * If a page is accounted as a page cache, insert to inactive list.
550 * If anon, insert to active list.
551 */
552 pc->flags = pcg_default_flags[ctype]; 935 pc->flags = pcg_default_flags[ctype];
553 936
554 mz = page_cgroup_zoneinfo(pc); 937 mem_cgroup_charge_statistics(mem, pc, true);
555 938
556 spin_lock_irqsave(&mz->lru_lock, flags);
557 __mem_cgroup_add_list(mz, pc);
558 spin_unlock_irqrestore(&mz->lru_lock, flags);
559 unlock_page_cgroup(pc); 939 unlock_page_cgroup(pc);
940}
560 941
561done: 942/**
562 return 0; 943 * mem_cgroup_move_account - move account of the page
944 * @pc: page_cgroup of the page.
945 * @from: mem_cgroup which the page is moved from.
946 * @to: mem_cgroup which the page is moved to. @from != @to.
947 *
948 * The caller must confirm following.
949 * - page is not on LRU (isolate_page() is useful.)
950 *
951 * returns 0 at success,
952 * returns -EBUSY when lock is busy or "pc" is unstable.
953 *
954 * This function does "uncharge" from old cgroup but doesn't do "charge" to
955 * new cgroup. It should be done by a caller.
956 */
957
958static int mem_cgroup_move_account(struct page_cgroup *pc,
959 struct mem_cgroup *from, struct mem_cgroup *to)
960{
961 struct mem_cgroup_per_zone *from_mz, *to_mz;
962 int nid, zid;
963 int ret = -EBUSY;
964
965 VM_BUG_ON(from == to);
966 VM_BUG_ON(PageLRU(pc->page));
967
968 nid = page_cgroup_nid(pc);
969 zid = page_cgroup_zid(pc);
970 from_mz = mem_cgroup_zoneinfo(from, nid, zid);
971 to_mz = mem_cgroup_zoneinfo(to, nid, zid);
972
973 if (!trylock_page_cgroup(pc))
974 return ret;
975
976 if (!PageCgroupUsed(pc))
977 goto out;
978
979 if (pc->mem_cgroup != from)
980 goto out;
981
982 css_put(&from->css);
983 res_counter_uncharge(&from->res, PAGE_SIZE);
984 mem_cgroup_charge_statistics(from, pc, false);
985 if (do_swap_account)
986 res_counter_uncharge(&from->memsw, PAGE_SIZE);
987 pc->mem_cgroup = to;
988 mem_cgroup_charge_statistics(to, pc, true);
989 css_get(&to->css);
990 ret = 0;
563out: 991out:
564 css_put(&mem->css); 992 unlock_page_cgroup(pc);
565 return -ENOMEM; 993 return ret;
994}
995
996/*
997 * move charges to its parent.
998 */
999
1000static int mem_cgroup_move_parent(struct page_cgroup *pc,
1001 struct mem_cgroup *child,
1002 gfp_t gfp_mask)
1003{
1004 struct page *page = pc->page;
1005 struct cgroup *cg = child->css.cgroup;
1006 struct cgroup *pcg = cg->parent;
1007 struct mem_cgroup *parent;
1008 int ret;
1009
1010 /* Is ROOT ? */
1011 if (!pcg)
1012 return -EINVAL;
1013
1014
1015 parent = mem_cgroup_from_cont(pcg);
1016
1017
1018 ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
1019 if (ret || !parent)
1020 return ret;
1021
1022 if (!get_page_unless_zero(page))
1023 return -EBUSY;
1024
1025 ret = isolate_lru_page(page);
1026
1027 if (ret)
1028 goto cancel;
1029
1030 ret = mem_cgroup_move_account(pc, child, parent);
1031
1032 /* drop extra refcnt by try_charge() (move_account increment one) */
1033 css_put(&parent->css);
1034 putback_lru_page(page);
1035 if (!ret) {
1036 put_page(page);
1037 return 0;
1038 }
1039 /* uncharge if move fails */
1040cancel:
1041 res_counter_uncharge(&parent->res, PAGE_SIZE);
1042 if (do_swap_account)
1043 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
1044 put_page(page);
1045 return ret;
1046}
1047
1048/*
1049 * Charge the memory controller for page usage.
1050 * Return
1051 * 0 if the charge was successful
1052 * < 0 if the cgroup is over its limit
1053 */
1054static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
1055 gfp_t gfp_mask, enum charge_type ctype,
1056 struct mem_cgroup *memcg)
1057{
1058 struct mem_cgroup *mem;
1059 struct page_cgroup *pc;
1060 int ret;
1061
1062 pc = lookup_page_cgroup(page);
1063 /* can happen at boot */
1064 if (unlikely(!pc))
1065 return 0;
1066 prefetchw(pc);
1067
1068 mem = memcg;
1069 ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
1070 if (ret || !mem)
1071 return ret;
1072
1073 __mem_cgroup_commit_charge(mem, pc, ctype);
1074 return 0;
566} 1075}
567 1076
568int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) 1077int mem_cgroup_newpage_charge(struct page *page,
1078 struct mm_struct *mm, gfp_t gfp_mask)
569{ 1079{
570 if (mem_cgroup_subsys.disabled) 1080 if (mem_cgroup_disabled())
571 return 0; 1081 return 0;
572 if (PageCompound(page)) 1082 if (PageCompound(page))
573 return 0; 1083 return 0;
@@ -589,7 +1099,10 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
589int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 1099int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
590 gfp_t gfp_mask) 1100 gfp_t gfp_mask)
591{ 1101{
592 if (mem_cgroup_subsys.disabled) 1102 struct mem_cgroup *mem = NULL;
1103 int ret;
1104
1105 if (mem_cgroup_disabled())
593 return 0; 1106 return 0;
594 if (PageCompound(page)) 1107 if (PageCompound(page))
595 return 0; 1108 return 0;
@@ -601,6 +1114,8 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
601 * For GFP_NOWAIT case, the page may be pre-charged before calling 1114 * For GFP_NOWAIT case, the page may be pre-charged before calling
602 * add_to_page_cache(). (See shmem.c) check it here and avoid to call 1115 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
603 * charge twice. (It works but has to pay a bit larger cost.) 1116 * charge twice. (It works but has to pay a bit larger cost.)
1117 * And when the page is SwapCache, it should take swap information
1118 * into account. This is under lock_page() now.
604 */ 1119 */
605 if (!(gfp_mask & __GFP_WAIT)) { 1120 if (!(gfp_mask & __GFP_WAIT)) {
606 struct page_cgroup *pc; 1121 struct page_cgroup *pc;
@@ -617,58 +1132,198 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
617 unlock_page_cgroup(pc); 1132 unlock_page_cgroup(pc);
618 } 1133 }
619 1134
620 if (unlikely(!mm)) 1135 if (do_swap_account && PageSwapCache(page)) {
1136 mem = try_get_mem_cgroup_from_swapcache(page);
1137 if (mem)
1138 mm = NULL;
1139 else
1140 mem = NULL;
1141 /* SwapCache may be still linked to LRU now. */
1142 mem_cgroup_lru_del_before_commit_swapcache(page);
1143 }
1144
1145 if (unlikely(!mm && !mem))
621 mm = &init_mm; 1146 mm = &init_mm;
622 1147
623 if (page_is_file_cache(page)) 1148 if (page_is_file_cache(page))
624 return mem_cgroup_charge_common(page, mm, gfp_mask, 1149 return mem_cgroup_charge_common(page, mm, gfp_mask,
625 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL); 1150 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
626 else 1151
627 return mem_cgroup_charge_common(page, mm, gfp_mask, 1152 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
628 MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL); 1153 MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
1154 if (mem)
1155 css_put(&mem->css);
1156 if (PageSwapCache(page))
1157 mem_cgroup_lru_add_after_commit_swapcache(page);
1158
1159 if (do_swap_account && !ret && PageSwapCache(page)) {
1160 swp_entry_t ent = {.val = page_private(page)};
1161 /* avoid double counting */
1162 mem = swap_cgroup_record(ent, NULL);
1163 if (mem) {
1164 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1165 mem_cgroup_put(mem);
1166 }
1167 }
1168 return ret;
1169}
1170
1171/*
1172 * While swap-in, try_charge -> commit or cancel, the page is locked.
1173 * And when try_charge() successfully returns, one refcnt to memcg without
1174 * struct page_cgroup is aquired. This refcnt will be cumsumed by
1175 * "commit()" or removed by "cancel()"
1176 */
1177int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
1178 struct page *page,
1179 gfp_t mask, struct mem_cgroup **ptr)
1180{
1181 struct mem_cgroup *mem;
1182 int ret;
1183
1184 if (mem_cgroup_disabled())
1185 return 0;
1186
1187 if (!do_swap_account)
1188 goto charge_cur_mm;
1189 /*
1190 * A racing thread's fault, or swapoff, may have already updated
1191 * the pte, and even removed page from swap cache: return success
1192 * to go on to do_swap_page()'s pte_same() test, which should fail.
1193 */
1194 if (!PageSwapCache(page))
1195 return 0;
1196 mem = try_get_mem_cgroup_from_swapcache(page);
1197 if (!mem)
1198 goto charge_cur_mm;
1199 *ptr = mem;
1200 ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
1201 /* drop extra refcnt from tryget */
1202 css_put(&mem->css);
1203 return ret;
1204charge_cur_mm:
1205 if (unlikely(!mm))
1206 mm = &init_mm;
1207 return __mem_cgroup_try_charge(mm, mask, ptr, true);
1208}
1209
1210void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1211{
1212 struct page_cgroup *pc;
1213
1214 if (mem_cgroup_disabled())
1215 return;
1216 if (!ptr)
1217 return;
1218 pc = lookup_page_cgroup(page);
1219 mem_cgroup_lru_del_before_commit_swapcache(page);
1220 __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1221 mem_cgroup_lru_add_after_commit_swapcache(page);
1222 /*
1223 * Now swap is on-memory. This means this page may be
1224 * counted both as mem and swap....double count.
1225 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
1226 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
1227 * may call delete_from_swap_cache() before reach here.
1228 */
1229 if (do_swap_account && PageSwapCache(page)) {
1230 swp_entry_t ent = {.val = page_private(page)};
1231 struct mem_cgroup *memcg;
1232 memcg = swap_cgroup_record(ent, NULL);
1233 if (memcg) {
1234 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1235 mem_cgroup_put(memcg);
1236 }
1237
1238 }
1239 /* add this page(page_cgroup) to the LRU we want. */
1240
629} 1241}
630 1242
1243void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1244{
1245 if (mem_cgroup_disabled())
1246 return;
1247 if (!mem)
1248 return;
1249 res_counter_uncharge(&mem->res, PAGE_SIZE);
1250 if (do_swap_account)
1251 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1252 css_put(&mem->css);
1253}
1254
1255
631/* 1256/*
632 * uncharge if !page_mapped(page) 1257 * uncharge if !page_mapped(page)
633 */ 1258 */
634static void 1259static struct mem_cgroup *
635__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) 1260__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
636{ 1261{
637 struct page_cgroup *pc; 1262 struct page_cgroup *pc;
638 struct mem_cgroup *mem; 1263 struct mem_cgroup *mem = NULL;
639 struct mem_cgroup_per_zone *mz; 1264 struct mem_cgroup_per_zone *mz;
640 unsigned long flags;
641 1265
642 if (mem_cgroup_subsys.disabled) 1266 if (mem_cgroup_disabled())
643 return; 1267 return NULL;
1268
1269 if (PageSwapCache(page))
1270 return NULL;
644 1271
645 /* 1272 /*
646 * Check if our page_cgroup is valid 1273 * Check if our page_cgroup is valid
647 */ 1274 */
648 pc = lookup_page_cgroup(page); 1275 pc = lookup_page_cgroup(page);
649 if (unlikely(!pc || !PageCgroupUsed(pc))) 1276 if (unlikely(!pc || !PageCgroupUsed(pc)))
650 return; 1277 return NULL;
651 1278
652 lock_page_cgroup(pc); 1279 lock_page_cgroup(pc);
653 if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED && page_mapped(page)) 1280
654 || !PageCgroupUsed(pc)) { 1281 mem = pc->mem_cgroup;
655 /* This happens at race in zap_pte_range() and do_swap_page()*/ 1282
656 unlock_page_cgroup(pc); 1283 if (!PageCgroupUsed(pc))
657 return; 1284 goto unlock_out;
1285
1286 switch (ctype) {
1287 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1288 if (page_mapped(page))
1289 goto unlock_out;
1290 break;
1291 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
1292 if (!PageAnon(page)) { /* Shared memory */
1293 if (page->mapping && !page_is_file_cache(page))
1294 goto unlock_out;
1295 } else if (page_mapped(page)) /* Anon */
1296 goto unlock_out;
1297 break;
1298 default:
1299 break;
658 } 1300 }
1301
1302 res_counter_uncharge(&mem->res, PAGE_SIZE);
1303 if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1304 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1305
1306 mem_cgroup_charge_statistics(mem, pc, false);
659 ClearPageCgroupUsed(pc); 1307 ClearPageCgroupUsed(pc);
660 mem = pc->mem_cgroup; 1308 /*
1309 * pc->mem_cgroup is not cleared here. It will be accessed when it's
1310 * freed from LRU. This is safe because uncharged page is expected not
1311 * to be reused (freed soon). Exception is SwapCache, it's handled by
1312 * special functions.
1313 */
661 1314
662 mz = page_cgroup_zoneinfo(pc); 1315 mz = page_cgroup_zoneinfo(pc);
663 spin_lock_irqsave(&mz->lru_lock, flags);
664 __mem_cgroup_remove_list(mz, pc);
665 spin_unlock_irqrestore(&mz->lru_lock, flags);
666 unlock_page_cgroup(pc); 1316 unlock_page_cgroup(pc);
667 1317
668 res_counter_uncharge(&mem->res, PAGE_SIZE); 1318 /* at swapout, this memcg will be accessed to record to swap */
669 css_put(&mem->css); 1319 if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1320 css_put(&mem->css);
670 1321
671 return; 1322 return mem;
1323
1324unlock_out:
1325 unlock_page_cgroup(pc);
1326 return NULL;
672} 1327}
673 1328
674void mem_cgroup_uncharge_page(struct page *page) 1329void mem_cgroup_uncharge_page(struct page *page)
@@ -689,16 +1344,55 @@ void mem_cgroup_uncharge_cache_page(struct page *page)
689} 1344}
690 1345
691/* 1346/*
692 * Before starting migration, account against new page. 1347 * called from __delete_from_swap_cache() and drop "page" account.
1348 * memcg information is recorded to swap_cgroup of "ent"
1349 */
1350void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
1351{
1352 struct mem_cgroup *memcg;
1353
1354 memcg = __mem_cgroup_uncharge_common(page,
1355 MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
1356 /* record memcg information */
1357 if (do_swap_account && memcg) {
1358 swap_cgroup_record(ent, memcg);
1359 mem_cgroup_get(memcg);
1360 }
1361 if (memcg)
1362 css_put(&memcg->css);
1363}
1364
1365#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1366/*
1367 * called from swap_entry_free(). remove record in swap_cgroup and
1368 * uncharge "memsw" account.
693 */ 1369 */
694int mem_cgroup_prepare_migration(struct page *page, struct page *newpage) 1370void mem_cgroup_uncharge_swap(swp_entry_t ent)
1371{
1372 struct mem_cgroup *memcg;
1373
1374 if (!do_swap_account)
1375 return;
1376
1377 memcg = swap_cgroup_record(ent, NULL);
1378 if (memcg) {
1379 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1380 mem_cgroup_put(memcg);
1381 }
1382}
1383#endif
1384
1385/*
1386 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
1387 * page belongs to.
1388 */
1389int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
695{ 1390{
696 struct page_cgroup *pc; 1391 struct page_cgroup *pc;
697 struct mem_cgroup *mem = NULL; 1392 struct mem_cgroup *mem = NULL;
698 enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
699 int ret = 0; 1393 int ret = 0;
700 1394
701 if (mem_cgroup_subsys.disabled) 1395 if (mem_cgroup_disabled())
702 return 0; 1396 return 0;
703 1397
704 pc = lookup_page_cgroup(page); 1398 pc = lookup_page_cgroup(page);
@@ -706,41 +1400,67 @@ int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
706 if (PageCgroupUsed(pc)) { 1400 if (PageCgroupUsed(pc)) {
707 mem = pc->mem_cgroup; 1401 mem = pc->mem_cgroup;
708 css_get(&mem->css); 1402 css_get(&mem->css);
709 if (PageCgroupCache(pc)) {
710 if (page_is_file_cache(page))
711 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
712 else
713 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
714 }
715 } 1403 }
716 unlock_page_cgroup(pc); 1404 unlock_page_cgroup(pc);
1405
717 if (mem) { 1406 if (mem) {
718 ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL, 1407 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
719 ctype, mem);
720 css_put(&mem->css); 1408 css_put(&mem->css);
721 } 1409 }
1410 *ptr = mem;
722 return ret; 1411 return ret;
723} 1412}
724 1413
725/* remove redundant charge if migration failed*/ 1414/* remove redundant charge if migration failed*/
726void mem_cgroup_end_migration(struct page *newpage) 1415void mem_cgroup_end_migration(struct mem_cgroup *mem,
1416 struct page *oldpage, struct page *newpage)
727{ 1417{
1418 struct page *target, *unused;
1419 struct page_cgroup *pc;
1420 enum charge_type ctype;
1421
1422 if (!mem)
1423 return;
1424
1425 /* at migration success, oldpage->mapping is NULL. */
1426 if (oldpage->mapping) {
1427 target = oldpage;
1428 unused = NULL;
1429 } else {
1430 target = newpage;
1431 unused = oldpage;
1432 }
1433
1434 if (PageAnon(target))
1435 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
1436 else if (page_is_file_cache(target))
1437 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
1438 else
1439 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
1440
1441 /* unused page is not on radix-tree now. */
1442 if (unused)
1443 __mem_cgroup_uncharge_common(unused, ctype);
1444
1445 pc = lookup_page_cgroup(target);
728 /* 1446 /*
729 * At success, page->mapping is not NULL. 1447 * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
730 * special rollback care is necessary when 1448 * So, double-counting is effectively avoided.
731 * 1. at migration failure. (newpage->mapping is cleared in this case)
732 * 2. the newpage was moved but not remapped again because the task
733 * exits and the newpage is obsolete. In this case, the new page
734 * may be a swapcache. So, we just call mem_cgroup_uncharge_page()
735 * always for avoiding mess. The page_cgroup will be removed if
736 * unnecessary. File cache pages is still on radix-tree. Don't
737 * care it.
738 */ 1449 */
739 if (!newpage->mapping) 1450 __mem_cgroup_commit_charge(mem, pc, ctype);
740 __mem_cgroup_uncharge_common(newpage, 1451
741 MEM_CGROUP_CHARGE_TYPE_FORCE); 1452 /*
742 else if (PageAnon(newpage)) 1453 * Both of oldpage and newpage are still under lock_page().
743 mem_cgroup_uncharge_page(newpage); 1454 * Then, we don't have to care about race in radix-tree.
1455 * But we have to be careful that this page is unmapped or not.
1456 *
1457 * There is a case for !page_mapped(). At the start of
1458 * migration, oldpage was mapped. But now, it's zapped.
1459 * But we know *target* page is not freed/reused under us.
1460 * mem_cgroup_uncharge_page() does all necessary checks.
1461 */
1462 if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
1463 mem_cgroup_uncharge_page(target);
744} 1464}
745 1465
746/* 1466/*
@@ -748,29 +1468,26 @@ void mem_cgroup_end_migration(struct page *newpage)
748 * This is typically used for page reclaiming for shmem for reducing side 1468 * This is typically used for page reclaiming for shmem for reducing side
749 * effect of page allocation from shmem, which is used by some mem_cgroup. 1469 * effect of page allocation from shmem, which is used by some mem_cgroup.
750 */ 1470 */
751int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) 1471int mem_cgroup_shrink_usage(struct page *page,
1472 struct mm_struct *mm,
1473 gfp_t gfp_mask)
752{ 1474{
753 struct mem_cgroup *mem; 1475 struct mem_cgroup *mem = NULL;
754 int progress = 0; 1476 int progress = 0;
755 int retry = MEM_CGROUP_RECLAIM_RETRIES; 1477 int retry = MEM_CGROUP_RECLAIM_RETRIES;
756 1478
757 if (mem_cgroup_subsys.disabled) 1479 if (mem_cgroup_disabled())
758 return 0; 1480 return 0;
759 if (!mm) 1481 if (page)
1482 mem = try_get_mem_cgroup_from_swapcache(page);
1483 if (!mem && mm)
1484 mem = try_get_mem_cgroup_from_mm(mm);
1485 if (unlikely(!mem))
760 return 0; 1486 return 0;
761 1487
762 rcu_read_lock();
763 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
764 if (unlikely(!mem)) {
765 rcu_read_unlock();
766 return 0;
767 }
768 css_get(&mem->css);
769 rcu_read_unlock();
770
771 do { 1488 do {
772 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask); 1489 progress = mem_cgroup_hierarchical_reclaim(mem, gfp_mask, true);
773 progress += res_counter_check_under_limit(&mem->res); 1490 progress += mem_cgroup_check_under_limit(mem);
774 } while (!progress && --retry); 1491 } while (!progress && --retry);
775 1492
776 css_put(&mem->css); 1493 css_put(&mem->css);
@@ -779,117 +1496,295 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
779 return 0; 1496 return 0;
780} 1497}
781 1498
1499static DEFINE_MUTEX(set_limit_mutex);
1500
782static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 1501static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
783 unsigned long long val) 1502 unsigned long long val)
784{ 1503{
785 1504
786 int retry_count = MEM_CGROUP_RECLAIM_RETRIES; 1505 int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
787 int progress; 1506 int progress;
1507 u64 memswlimit;
788 int ret = 0; 1508 int ret = 0;
789 1509
790 while (res_counter_set_limit(&memcg->res, val)) { 1510 while (retry_count) {
791 if (signal_pending(current)) { 1511 if (signal_pending(current)) {
792 ret = -EINTR; 1512 ret = -EINTR;
793 break; 1513 break;
794 } 1514 }
795 if (!retry_count) { 1515 /*
796 ret = -EBUSY; 1516 * Rather than hide all in some function, I do this in
1517 * open coded manner. You see what this really does.
1518 * We have to guarantee mem->res.limit < mem->memsw.limit.
1519 */
1520 mutex_lock(&set_limit_mutex);
1521 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1522 if (memswlimit < val) {
1523 ret = -EINVAL;
1524 mutex_unlock(&set_limit_mutex);
797 break; 1525 break;
798 } 1526 }
799 progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL); 1527 ret = res_counter_set_limit(&memcg->res, val);
800 if (!progress) 1528 mutex_unlock(&set_limit_mutex);
801 retry_count--; 1529
1530 if (!ret)
1531 break;
1532
1533 progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
1534 false);
1535 if (!progress) retry_count--;
802 } 1536 }
1537
803 return ret; 1538 return ret;
804} 1539}
805 1540
1541int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1542 unsigned long long val)
1543{
1544 int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1545 u64 memlimit, oldusage, curusage;
1546 int ret;
1547
1548 if (!do_swap_account)
1549 return -EINVAL;
1550
1551 while (retry_count) {
1552 if (signal_pending(current)) {
1553 ret = -EINTR;
1554 break;
1555 }
1556 /*
1557 * Rather than hide all in some function, I do this in
1558 * open coded manner. You see what this really does.
1559 * We have to guarantee mem->res.limit < mem->memsw.limit.
1560 */
1561 mutex_lock(&set_limit_mutex);
1562 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1563 if (memlimit > val) {
1564 ret = -EINVAL;
1565 mutex_unlock(&set_limit_mutex);
1566 break;
1567 }
1568 ret = res_counter_set_limit(&memcg->memsw, val);
1569 mutex_unlock(&set_limit_mutex);
1570
1571 if (!ret)
1572 break;
1573
1574 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1575 mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true);
1576 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1577 if (curusage >= oldusage)
1578 retry_count--;
1579 }
1580 return ret;
1581}
806 1582
807/* 1583/*
808 * This routine traverse page_cgroup in given list and drop them all. 1584 * This routine traverse page_cgroup in given list and drop them all.
809 * *And* this routine doesn't reclaim page itself, just removes page_cgroup. 1585 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
810 */ 1586 */
811#define FORCE_UNCHARGE_BATCH (128) 1587static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
812static void mem_cgroup_force_empty_list(struct mem_cgroup *mem, 1588 int node, int zid, enum lru_list lru)
813 struct mem_cgroup_per_zone *mz,
814 enum lru_list lru)
815{ 1589{
816 struct page_cgroup *pc; 1590 struct zone *zone;
817 struct page *page; 1591 struct mem_cgroup_per_zone *mz;
818 int count = FORCE_UNCHARGE_BATCH; 1592 struct page_cgroup *pc, *busy;
819 unsigned long flags; 1593 unsigned long flags, loop;
820 struct list_head *list; 1594 struct list_head *list;
1595 int ret = 0;
821 1596
1597 zone = &NODE_DATA(node)->node_zones[zid];
1598 mz = mem_cgroup_zoneinfo(mem, node, zid);
822 list = &mz->lists[lru]; 1599 list = &mz->lists[lru];
823 1600
824 spin_lock_irqsave(&mz->lru_lock, flags); 1601 loop = MEM_CGROUP_ZSTAT(mz, lru);
825 while (!list_empty(list)) { 1602 /* give some margin against EBUSY etc...*/
826 pc = list_entry(list->prev, struct page_cgroup, lru); 1603 loop += 256;
827 page = pc->page; 1604 busy = NULL;
828 if (!PageCgroupUsed(pc)) 1605 while (loop--) {
829 break; 1606 ret = 0;
830 get_page(page); 1607 spin_lock_irqsave(&zone->lru_lock, flags);
831 spin_unlock_irqrestore(&mz->lru_lock, flags); 1608 if (list_empty(list)) {
832 /* 1609 spin_unlock_irqrestore(&zone->lru_lock, flags);
833 * Check if this page is on LRU. !LRU page can be found
834 * if it's under page migration.
835 */
836 if (PageLRU(page)) {
837 __mem_cgroup_uncharge_common(page,
838 MEM_CGROUP_CHARGE_TYPE_FORCE);
839 put_page(page);
840 if (--count <= 0) {
841 count = FORCE_UNCHARGE_BATCH;
842 cond_resched();
843 }
844 } else {
845 spin_lock_irqsave(&mz->lru_lock, flags);
846 break; 1610 break;
847 } 1611 }
848 spin_lock_irqsave(&mz->lru_lock, flags); 1612 pc = list_entry(list->prev, struct page_cgroup, lru);
1613 if (busy == pc) {
1614 list_move(&pc->lru, list);
1615 busy = 0;
1616 spin_unlock_irqrestore(&zone->lru_lock, flags);
1617 continue;
1618 }
1619 spin_unlock_irqrestore(&zone->lru_lock, flags);
1620
1621 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
1622 if (ret == -ENOMEM)
1623 break;
1624
1625 if (ret == -EBUSY || ret == -EINVAL) {
1626 /* found lock contention or "pc" is obsolete. */
1627 busy = pc;
1628 cond_resched();
1629 } else
1630 busy = NULL;
849 } 1631 }
850 spin_unlock_irqrestore(&mz->lru_lock, flags); 1632
1633 if (!ret && !list_empty(list))
1634 return -EBUSY;
1635 return ret;
851} 1636}
852 1637
853/* 1638/*
854 * make mem_cgroup's charge to be 0 if there is no task. 1639 * make mem_cgroup's charge to be 0 if there is no task.
855 * This enables deleting this mem_cgroup. 1640 * This enables deleting this mem_cgroup.
856 */ 1641 */
857static int mem_cgroup_force_empty(struct mem_cgroup *mem) 1642static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
858{ 1643{
859 int ret = -EBUSY; 1644 int ret;
860 int node, zid; 1645 int node, zid, shrink;
1646 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1647 struct cgroup *cgrp = mem->css.cgroup;
861 1648
862 css_get(&mem->css); 1649 css_get(&mem->css);
863 /* 1650
864 * page reclaim code (kswapd etc..) will move pages between 1651 shrink = 0;
865 * active_list <-> inactive_list while we don't take a lock. 1652 /* should free all ? */
866 * So, we have to do loop here until all lists are empty. 1653 if (free_all)
867 */ 1654 goto try_to_free;
1655move_account:
868 while (mem->res.usage > 0) { 1656 while (mem->res.usage > 0) {
869 if (atomic_read(&mem->css.cgroup->count) > 0) 1657 ret = -EBUSY;
1658 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
1659 goto out;
1660 ret = -EINTR;
1661 if (signal_pending(current))
870 goto out; 1662 goto out;
871 /* This is for making all *used* pages to be on LRU. */ 1663 /* This is for making all *used* pages to be on LRU. */
872 lru_add_drain_all(); 1664 lru_add_drain_all();
873 for_each_node_state(node, N_POSSIBLE) 1665 ret = 0;
874 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1666 for_each_node_state(node, N_POSSIBLE) {
875 struct mem_cgroup_per_zone *mz; 1667 for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
876 enum lru_list l; 1668 enum lru_list l;
877 mz = mem_cgroup_zoneinfo(mem, node, zid); 1669 for_each_lru(l) {
878 for_each_lru(l) 1670 ret = mem_cgroup_force_empty_list(mem,
879 mem_cgroup_force_empty_list(mem, mz, l); 1671 node, zid, l);
1672 if (ret)
1673 break;
1674 }
880 } 1675 }
1676 if (ret)
1677 break;
1678 }
1679 /* it seems parent cgroup doesn't have enough mem */
1680 if (ret == -ENOMEM)
1681 goto try_to_free;
881 cond_resched(); 1682 cond_resched();
882 } 1683 }
883 ret = 0; 1684 ret = 0;
884out: 1685out:
885 css_put(&mem->css); 1686 css_put(&mem->css);
886 return ret; 1687 return ret;
1688
1689try_to_free:
1690 /* returns EBUSY if there is a task or if we come here twice. */
1691 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
1692 ret = -EBUSY;
1693 goto out;
1694 }
1695 /* we call try-to-free pages for make this cgroup empty */
1696 lru_add_drain_all();
1697 /* try to free all pages in this cgroup */
1698 shrink = 1;
1699 while (nr_retries && mem->res.usage > 0) {
1700 int progress;
1701
1702 if (signal_pending(current)) {
1703 ret = -EINTR;
1704 goto out;
1705 }
1706 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
1707 false, get_swappiness(mem));
1708 if (!progress) {
1709 nr_retries--;
1710 /* maybe some writeback is necessary */
1711 congestion_wait(WRITE, HZ/10);
1712 }
1713
1714 }
1715 lru_add_drain();
1716 /* try move_account...there may be some *locked* pages. */
1717 if (mem->res.usage)
1718 goto move_account;
1719 ret = 0;
1720 goto out;
1721}
1722
1723int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
1724{
1725 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
1726}
1727
1728
1729static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
1730{
1731 return mem_cgroup_from_cont(cont)->use_hierarchy;
1732}
1733
1734static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
1735 u64 val)
1736{
1737 int retval = 0;
1738 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1739 struct cgroup *parent = cont->parent;
1740 struct mem_cgroup *parent_mem = NULL;
1741
1742 if (parent)
1743 parent_mem = mem_cgroup_from_cont(parent);
1744
1745 cgroup_lock();
1746 /*
1747 * If parent's use_hiearchy is set, we can't make any modifications
1748 * in the child subtrees. If it is unset, then the change can
1749 * occur, provided the current cgroup has no children.
1750 *
1751 * For the root cgroup, parent_mem is NULL, we allow value to be
1752 * set if there are no children.
1753 */
1754 if ((!parent_mem || !parent_mem->use_hierarchy) &&
1755 (val == 1 || val == 0)) {
1756 if (list_empty(&cont->children))
1757 mem->use_hierarchy = val;
1758 else
1759 retval = -EBUSY;
1760 } else
1761 retval = -EINVAL;
1762 cgroup_unlock();
1763
1764 return retval;
887} 1765}
888 1766
889static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) 1767static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
890{ 1768{
891 return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res, 1769 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
892 cft->private); 1770 u64 val = 0;
1771 int type, name;
1772
1773 type = MEMFILE_TYPE(cft->private);
1774 name = MEMFILE_ATTR(cft->private);
1775 switch (type) {
1776 case _MEM:
1777 val = res_counter_read_u64(&mem->res, name);
1778 break;
1779 case _MEMSWAP:
1780 if (do_swap_account)
1781 val = res_counter_read_u64(&mem->memsw, name);
1782 break;
1783 default:
1784 BUG();
1785 break;
1786 }
1787 return val;
893} 1788}
894/* 1789/*
895 * The user of this function is... 1790 * The user of this function is...
@@ -899,15 +1794,22 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
899 const char *buffer) 1794 const char *buffer)
900{ 1795{
901 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 1796 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
1797 int type, name;
902 unsigned long long val; 1798 unsigned long long val;
903 int ret; 1799 int ret;
904 1800
905 switch (cft->private) { 1801 type = MEMFILE_TYPE(cft->private);
1802 name = MEMFILE_ATTR(cft->private);
1803 switch (name) {
906 case RES_LIMIT: 1804 case RES_LIMIT:
907 /* This function does all necessary parse...reuse it */ 1805 /* This function does all necessary parse...reuse it */
908 ret = res_counter_memparse_write_strategy(buffer, &val); 1806 ret = res_counter_memparse_write_strategy(buffer, &val);
909 if (!ret) 1807 if (ret)
1808 break;
1809 if (type == _MEM)
910 ret = mem_cgroup_resize_limit(memcg, val); 1810 ret = mem_cgroup_resize_limit(memcg, val);
1811 else
1812 ret = mem_cgroup_resize_memsw_limit(memcg, val);
911 break; 1813 break;
912 default: 1814 default:
913 ret = -EINVAL; /* should be BUG() ? */ 1815 ret = -EINVAL; /* should be BUG() ? */
@@ -916,27 +1818,59 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
916 return ret; 1818 return ret;
917} 1819}
918 1820
1821static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
1822 unsigned long long *mem_limit, unsigned long long *memsw_limit)
1823{
1824 struct cgroup *cgroup;
1825 unsigned long long min_limit, min_memsw_limit, tmp;
1826
1827 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1828 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1829 cgroup = memcg->css.cgroup;
1830 if (!memcg->use_hierarchy)
1831 goto out;
1832
1833 while (cgroup->parent) {
1834 cgroup = cgroup->parent;
1835 memcg = mem_cgroup_from_cont(cgroup);
1836 if (!memcg->use_hierarchy)
1837 break;
1838 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
1839 min_limit = min(min_limit, tmp);
1840 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1841 min_memsw_limit = min(min_memsw_limit, tmp);
1842 }
1843out:
1844 *mem_limit = min_limit;
1845 *memsw_limit = min_memsw_limit;
1846 return;
1847}
1848
919static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) 1849static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
920{ 1850{
921 struct mem_cgroup *mem; 1851 struct mem_cgroup *mem;
1852 int type, name;
922 1853
923 mem = mem_cgroup_from_cont(cont); 1854 mem = mem_cgroup_from_cont(cont);
924 switch (event) { 1855 type = MEMFILE_TYPE(event);
1856 name = MEMFILE_ATTR(event);
1857 switch (name) {
925 case RES_MAX_USAGE: 1858 case RES_MAX_USAGE:
926 res_counter_reset_max(&mem->res); 1859 if (type == _MEM)
1860 res_counter_reset_max(&mem->res);
1861 else
1862 res_counter_reset_max(&mem->memsw);
927 break; 1863 break;
928 case RES_FAILCNT: 1864 case RES_FAILCNT:
929 res_counter_reset_failcnt(&mem->res); 1865 if (type == _MEM)
1866 res_counter_reset_failcnt(&mem->res);
1867 else
1868 res_counter_reset_failcnt(&mem->memsw);
930 break; 1869 break;
931 } 1870 }
932 return 0; 1871 return 0;
933} 1872}
934 1873
935static int mem_force_empty_write(struct cgroup *cont, unsigned int event)
936{
937 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont));
938}
939
940static const struct mem_cgroup_stat_desc { 1874static const struct mem_cgroup_stat_desc {
941 const char *msg; 1875 const char *msg;
942 u64 unit; 1876 u64 unit;
@@ -985,43 +1919,163 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
985 cb->fill(cb, "unevictable", unevictable * PAGE_SIZE); 1919 cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
986 1920
987 } 1921 }
1922 {
1923 unsigned long long limit, memsw_limit;
1924 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
1925 cb->fill(cb, "hierarchical_memory_limit", limit);
1926 if (do_swap_account)
1927 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
1928 }
1929
1930#ifdef CONFIG_DEBUG_VM
1931 cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
1932
1933 {
1934 int nid, zid;
1935 struct mem_cgroup_per_zone *mz;
1936 unsigned long recent_rotated[2] = {0, 0};
1937 unsigned long recent_scanned[2] = {0, 0};
1938
1939 for_each_online_node(nid)
1940 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1941 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
1942
1943 recent_rotated[0] +=
1944 mz->reclaim_stat.recent_rotated[0];
1945 recent_rotated[1] +=
1946 mz->reclaim_stat.recent_rotated[1];
1947 recent_scanned[0] +=
1948 mz->reclaim_stat.recent_scanned[0];
1949 recent_scanned[1] +=
1950 mz->reclaim_stat.recent_scanned[1];
1951 }
1952 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
1953 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
1954 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
1955 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
1956 }
1957#endif
1958
1959 return 0;
1960}
1961
1962static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
1963{
1964 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
1965
1966 return get_swappiness(memcg);
1967}
1968
1969static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
1970 u64 val)
1971{
1972 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
1973 struct mem_cgroup *parent;
1974 if (val > 100)
1975 return -EINVAL;
1976
1977 if (cgrp->parent == NULL)
1978 return -EINVAL;
1979
1980 parent = mem_cgroup_from_cont(cgrp->parent);
1981 /* If under hierarchy, only empty-root can set this value */
1982 if ((parent->use_hierarchy) ||
1983 (memcg->use_hierarchy && !list_empty(&cgrp->children)))
1984 return -EINVAL;
1985
1986 spin_lock(&memcg->reclaim_param_lock);
1987 memcg->swappiness = val;
1988 spin_unlock(&memcg->reclaim_param_lock);
1989
988 return 0; 1990 return 0;
989} 1991}
990 1992
1993
991static struct cftype mem_cgroup_files[] = { 1994static struct cftype mem_cgroup_files[] = {
992 { 1995 {
993 .name = "usage_in_bytes", 1996 .name = "usage_in_bytes",
994 .private = RES_USAGE, 1997 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
995 .read_u64 = mem_cgroup_read, 1998 .read_u64 = mem_cgroup_read,
996 }, 1999 },
997 { 2000 {
998 .name = "max_usage_in_bytes", 2001 .name = "max_usage_in_bytes",
999 .private = RES_MAX_USAGE, 2002 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
1000 .trigger = mem_cgroup_reset, 2003 .trigger = mem_cgroup_reset,
1001 .read_u64 = mem_cgroup_read, 2004 .read_u64 = mem_cgroup_read,
1002 }, 2005 },
1003 { 2006 {
1004 .name = "limit_in_bytes", 2007 .name = "limit_in_bytes",
1005 .private = RES_LIMIT, 2008 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
1006 .write_string = mem_cgroup_write, 2009 .write_string = mem_cgroup_write,
1007 .read_u64 = mem_cgroup_read, 2010 .read_u64 = mem_cgroup_read,
1008 }, 2011 },
1009 { 2012 {
1010 .name = "failcnt", 2013 .name = "failcnt",
1011 .private = RES_FAILCNT, 2014 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
1012 .trigger = mem_cgroup_reset, 2015 .trigger = mem_cgroup_reset,
1013 .read_u64 = mem_cgroup_read, 2016 .read_u64 = mem_cgroup_read,
1014 }, 2017 },
1015 { 2018 {
2019 .name = "stat",
2020 .read_map = mem_control_stat_show,
2021 },
2022 {
1016 .name = "force_empty", 2023 .name = "force_empty",
1017 .trigger = mem_force_empty_write, 2024 .trigger = mem_cgroup_force_empty_write,
1018 }, 2025 },
1019 { 2026 {
1020 .name = "stat", 2027 .name = "use_hierarchy",
1021 .read_map = mem_control_stat_show, 2028 .write_u64 = mem_cgroup_hierarchy_write,
2029 .read_u64 = mem_cgroup_hierarchy_read,
2030 },
2031 {
2032 .name = "swappiness",
2033 .read_u64 = mem_cgroup_swappiness_read,
2034 .write_u64 = mem_cgroup_swappiness_write,
1022 }, 2035 },
1023}; 2036};
1024 2037
2038#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2039static struct cftype memsw_cgroup_files[] = {
2040 {
2041 .name = "memsw.usage_in_bytes",
2042 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
2043 .read_u64 = mem_cgroup_read,
2044 },
2045 {
2046 .name = "memsw.max_usage_in_bytes",
2047 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
2048 .trigger = mem_cgroup_reset,
2049 .read_u64 = mem_cgroup_read,
2050 },
2051 {
2052 .name = "memsw.limit_in_bytes",
2053 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
2054 .write_string = mem_cgroup_write,
2055 .read_u64 = mem_cgroup_read,
2056 },
2057 {
2058 .name = "memsw.failcnt",
2059 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
2060 .trigger = mem_cgroup_reset,
2061 .read_u64 = mem_cgroup_read,
2062 },
2063};
2064
2065static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2066{
2067 if (!do_swap_account)
2068 return 0;
2069 return cgroup_add_files(cont, ss, memsw_cgroup_files,
2070 ARRAY_SIZE(memsw_cgroup_files));
2071};
2072#else
2073static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2074{
2075 return 0;
2076}
2077#endif
2078
1025static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) 2079static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1026{ 2080{
1027 struct mem_cgroup_per_node *pn; 2081 struct mem_cgroup_per_node *pn;
@@ -1047,7 +2101,6 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1047 2101
1048 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 2102 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
1049 mz = &pn->zoneinfo[zone]; 2103 mz = &pn->zoneinfo[zone];
1050 spin_lock_init(&mz->lru_lock);
1051 for_each_lru(l) 2104 for_each_lru(l)
1052 INIT_LIST_HEAD(&mz->lists[l]); 2105 INIT_LIST_HEAD(&mz->lists[l]);
1053 } 2106 }
@@ -1059,55 +2112,113 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1059 kfree(mem->info.nodeinfo[node]); 2112 kfree(mem->info.nodeinfo[node]);
1060} 2113}
1061 2114
2115static int mem_cgroup_size(void)
2116{
2117 int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
2118 return sizeof(struct mem_cgroup) + cpustat_size;
2119}
2120
1062static struct mem_cgroup *mem_cgroup_alloc(void) 2121static struct mem_cgroup *mem_cgroup_alloc(void)
1063{ 2122{
1064 struct mem_cgroup *mem; 2123 struct mem_cgroup *mem;
2124 int size = mem_cgroup_size();
1065 2125
1066 if (sizeof(*mem) < PAGE_SIZE) 2126 if (size < PAGE_SIZE)
1067 mem = kmalloc(sizeof(*mem), GFP_KERNEL); 2127 mem = kmalloc(size, GFP_KERNEL);
1068 else 2128 else
1069 mem = vmalloc(sizeof(*mem)); 2129 mem = vmalloc(size);
1070 2130
1071 if (mem) 2131 if (mem)
1072 memset(mem, 0, sizeof(*mem)); 2132 memset(mem, 0, size);
1073 return mem; 2133 return mem;
1074} 2134}
1075 2135
1076static void mem_cgroup_free(struct mem_cgroup *mem) 2136/*
2137 * At destroying mem_cgroup, references from swap_cgroup can remain.
2138 * (scanning all at force_empty is too costly...)
2139 *
2140 * Instead of clearing all references at force_empty, we remember
2141 * the number of reference from swap_cgroup and free mem_cgroup when
2142 * it goes down to 0.
2143 *
2144 * Removal of cgroup itself succeeds regardless of refs from swap.
2145 */
2146
2147static void __mem_cgroup_free(struct mem_cgroup *mem)
1077{ 2148{
1078 if (sizeof(*mem) < PAGE_SIZE) 2149 int node;
2150
2151 for_each_node_state(node, N_POSSIBLE)
2152 free_mem_cgroup_per_zone_info(mem, node);
2153
2154 if (mem_cgroup_size() < PAGE_SIZE)
1079 kfree(mem); 2155 kfree(mem);
1080 else 2156 else
1081 vfree(mem); 2157 vfree(mem);
1082} 2158}
1083 2159
2160static void mem_cgroup_get(struct mem_cgroup *mem)
2161{
2162 atomic_inc(&mem->refcnt);
2163}
2164
2165static void mem_cgroup_put(struct mem_cgroup *mem)
2166{
2167 if (atomic_dec_and_test(&mem->refcnt))
2168 __mem_cgroup_free(mem);
2169}
2170
2171
2172#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2173static void __init enable_swap_cgroup(void)
2174{
2175 if (!mem_cgroup_disabled() && really_do_swap_account)
2176 do_swap_account = 1;
2177}
2178#else
2179static void __init enable_swap_cgroup(void)
2180{
2181}
2182#endif
1084 2183
1085static struct cgroup_subsys_state * 2184static struct cgroup_subsys_state *
1086mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) 2185mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1087{ 2186{
1088 struct mem_cgroup *mem; 2187 struct mem_cgroup *mem, *parent;
1089 int node; 2188 int node;
1090 2189
1091 if (unlikely((cont->parent) == NULL)) { 2190 mem = mem_cgroup_alloc();
1092 mem = &init_mem_cgroup; 2191 if (!mem)
1093 } else { 2192 return ERR_PTR(-ENOMEM);
1094 mem = mem_cgroup_alloc();
1095 if (!mem)
1096 return ERR_PTR(-ENOMEM);
1097 }
1098
1099 res_counter_init(&mem->res);
1100 2193
1101 for_each_node_state(node, N_POSSIBLE) 2194 for_each_node_state(node, N_POSSIBLE)
1102 if (alloc_mem_cgroup_per_zone_info(mem, node)) 2195 if (alloc_mem_cgroup_per_zone_info(mem, node))
1103 goto free_out; 2196 goto free_out;
2197 /* root ? */
2198 if (cont->parent == NULL) {
2199 enable_swap_cgroup();
2200 parent = NULL;
2201 } else {
2202 parent = mem_cgroup_from_cont(cont->parent);
2203 mem->use_hierarchy = parent->use_hierarchy;
2204 }
1104 2205
2206 if (parent && parent->use_hierarchy) {
2207 res_counter_init(&mem->res, &parent->res);
2208 res_counter_init(&mem->memsw, &parent->memsw);
2209 } else {
2210 res_counter_init(&mem->res, NULL);
2211 res_counter_init(&mem->memsw, NULL);
2212 }
2213 mem->last_scanned_child = NULL;
2214 spin_lock_init(&mem->reclaim_param_lock);
2215
2216 if (parent)
2217 mem->swappiness = get_swappiness(parent);
2218 atomic_set(&mem->refcnt, 1);
1105 return &mem->css; 2219 return &mem->css;
1106free_out: 2220free_out:
1107 for_each_node_state(node, N_POSSIBLE) 2221 __mem_cgroup_free(mem);
1108 free_mem_cgroup_per_zone_info(mem, node);
1109 if (cont->parent != NULL)
1110 mem_cgroup_free(mem);
1111 return ERR_PTR(-ENOMEM); 2222 return ERR_PTR(-ENOMEM);
1112} 2223}
1113 2224
@@ -1115,26 +2226,26 @@ static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
1115 struct cgroup *cont) 2226 struct cgroup *cont)
1116{ 2227{
1117 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 2228 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1118 mem_cgroup_force_empty(mem); 2229 mem_cgroup_force_empty(mem, false);
1119} 2230}
1120 2231
1121static void mem_cgroup_destroy(struct cgroup_subsys *ss, 2232static void mem_cgroup_destroy(struct cgroup_subsys *ss,
1122 struct cgroup *cont) 2233 struct cgroup *cont)
1123{ 2234{
1124 int node; 2235 mem_cgroup_put(mem_cgroup_from_cont(cont));
1125 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1126
1127 for_each_node_state(node, N_POSSIBLE)
1128 free_mem_cgroup_per_zone_info(mem, node);
1129
1130 mem_cgroup_free(mem_cgroup_from_cont(cont));
1131} 2236}
1132 2237
1133static int mem_cgroup_populate(struct cgroup_subsys *ss, 2238static int mem_cgroup_populate(struct cgroup_subsys *ss,
1134 struct cgroup *cont) 2239 struct cgroup *cont)
1135{ 2240{
1136 return cgroup_add_files(cont, ss, mem_cgroup_files, 2241 int ret;
1137 ARRAY_SIZE(mem_cgroup_files)); 2242
2243 ret = cgroup_add_files(cont, ss, mem_cgroup_files,
2244 ARRAY_SIZE(mem_cgroup_files));
2245
2246 if (!ret)
2247 ret = register_memsw_files(cont, ss);
2248 return ret;
1138} 2249}
1139 2250
1140static void mem_cgroup_move_task(struct cgroup_subsys *ss, 2251static void mem_cgroup_move_task(struct cgroup_subsys *ss,
@@ -1142,25 +2253,12 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
1142 struct cgroup *old_cont, 2253 struct cgroup *old_cont,
1143 struct task_struct *p) 2254 struct task_struct *p)
1144{ 2255{
1145 struct mm_struct *mm; 2256 mutex_lock(&memcg_tasklist);
1146 struct mem_cgroup *mem, *old_mem;
1147
1148 mm = get_task_mm(p);
1149 if (mm == NULL)
1150 return;
1151
1152 mem = mem_cgroup_from_cont(cont);
1153 old_mem = mem_cgroup_from_cont(old_cont);
1154
1155 /* 2257 /*
1156 * Only thread group leaders are allowed to migrate, the mm_struct is 2258 * FIXME: It's better to move charges of this process from old
1157 * in effect owned by the leader 2259 * memcg to new memcg. But it's just on TODO-List now.
1158 */ 2260 */
1159 if (!thread_group_leader(p)) 2261 mutex_unlock(&memcg_tasklist);
1160 goto out;
1161
1162out:
1163 mmput(mm);
1164} 2262}
1165 2263
1166struct cgroup_subsys mem_cgroup_subsys = { 2264struct cgroup_subsys mem_cgroup_subsys = {
@@ -1173,3 +2271,13 @@ struct cgroup_subsys mem_cgroup_subsys = {
1173 .attach = mem_cgroup_move_task, 2271 .attach = mem_cgroup_move_task,
1174 .early_init = 0, 2272 .early_init = 0,
1175}; 2273};
2274
2275#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2276
2277static int __init disable_swap_account(char *s)
2278{
2279 really_do_swap_account = 0;
2280 return 1;
2281}
2282__setup("noswapaccount", disable_swap_account);
2283#endif
diff --git a/mm/memory.c b/mm/memory.c
index 3f8fa06b963b..e009ce870859 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2000,7 +2000,7 @@ gotten:
2000 cow_user_page(new_page, old_page, address, vma); 2000 cow_user_page(new_page, old_page, address, vma);
2001 __SetPageUptodate(new_page); 2001 __SetPageUptodate(new_page);
2002 2002
2003 if (mem_cgroup_charge(new_page, mm, GFP_KERNEL)) 2003 if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
2004 goto oom_free_new; 2004 goto oom_free_new;
2005 2005
2006 /* 2006 /*
@@ -2392,6 +2392,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2392 struct page *page; 2392 struct page *page;
2393 swp_entry_t entry; 2393 swp_entry_t entry;
2394 pte_t pte; 2394 pte_t pte;
2395 struct mem_cgroup *ptr = NULL;
2395 int ret = 0; 2396 int ret = 0;
2396 2397
2397 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 2398 if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
@@ -2430,7 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2430 lock_page(page); 2431 lock_page(page);
2431 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2432 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2432 2433
2433 if (mem_cgroup_charge(page, mm, GFP_KERNEL)) { 2434 if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
2434 ret = VM_FAULT_OOM; 2435 ret = VM_FAULT_OOM;
2435 unlock_page(page); 2436 unlock_page(page);
2436 goto out; 2437 goto out;
@@ -2448,7 +2449,19 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2448 goto out_nomap; 2449 goto out_nomap;
2449 } 2450 }
2450 2451
2451 /* The page isn't present yet, go ahead with the fault. */ 2452 /*
2453 * The page isn't present yet, go ahead with the fault.
2454 *
2455 * Be careful about the sequence of operations here.
2456 * To get its accounting right, reuse_swap_page() must be called
2457 * while the page is counted on swap but not yet in mapcount i.e.
2458 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
2459 * must be called after the swap_free(), or it will never succeed.
2460 * Because delete_from_swap_page() may be called by reuse_swap_page(),
2461 * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry
2462 * in page->private. In this case, a record in swap_cgroup is silently
2463 * discarded at swap_free().
2464 */
2452 2465
2453 inc_mm_counter(mm, anon_rss); 2466 inc_mm_counter(mm, anon_rss);
2454 pte = mk_pte(page, vma->vm_page_prot); 2467 pte = mk_pte(page, vma->vm_page_prot);
@@ -2456,10 +2469,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2456 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 2469 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
2457 write_access = 0; 2470 write_access = 0;
2458 } 2471 }
2459
2460 flush_icache_page(vma, page); 2472 flush_icache_page(vma, page);
2461 set_pte_at(mm, address, page_table, pte); 2473 set_pte_at(mm, address, page_table, pte);
2462 page_add_anon_rmap(page, vma, address); 2474 page_add_anon_rmap(page, vma, address);
2475 /* It's better to call commit-charge after rmap is established */
2476 mem_cgroup_commit_charge_swapin(page, ptr);
2463 2477
2464 swap_free(entry); 2478 swap_free(entry);
2465 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) 2479 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
@@ -2480,7 +2494,7 @@ unlock:
2480out: 2494out:
2481 return ret; 2495 return ret;
2482out_nomap: 2496out_nomap:
2483 mem_cgroup_uncharge_page(page); 2497 mem_cgroup_cancel_charge_swapin(ptr);
2484 pte_unmap_unlock(page_table, ptl); 2498 pte_unmap_unlock(page_table, ptl);
2485 unlock_page(page); 2499 unlock_page(page);
2486 page_cache_release(page); 2500 page_cache_release(page);
@@ -2510,7 +2524,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2510 goto oom; 2524 goto oom;
2511 __SetPageUptodate(page); 2525 __SetPageUptodate(page);
2512 2526
2513 if (mem_cgroup_charge(page, mm, GFP_KERNEL)) 2527 if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
2514 goto oom_free_page; 2528 goto oom_free_page;
2515 2529
2516 entry = mk_pte(page, vma->vm_page_prot); 2530 entry = mk_pte(page, vma->vm_page_prot);
@@ -2601,7 +2615,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2601 ret = VM_FAULT_OOM; 2615 ret = VM_FAULT_OOM;
2602 goto out; 2616 goto out;
2603 } 2617 }
2604 if (mem_cgroup_charge(page, mm, GFP_KERNEL)) { 2618 if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
2605 ret = VM_FAULT_OOM; 2619 ret = VM_FAULT_OOM;
2606 page_cache_release(page); 2620 page_cache_release(page);
2607 goto out; 2621 goto out;
diff --git a/mm/migrate.c b/mm/migrate.c
index 55373983c9c6..a30ea5fcf9f1 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -121,20 +121,6 @@ static void remove_migration_pte(struct vm_area_struct *vma,
121 if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old) 121 if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
122 goto out; 122 goto out;
123 123
124 /*
125 * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge.
126 * Failure is not an option here: we're now expected to remove every
127 * migration pte, and will cause crashes otherwise. Normally this
128 * is not an issue: mem_cgroup_prepare_migration bumped up the old
129 * page_cgroup count for safety, that's now attached to the new page,
130 * so this charge should just be another incrementation of the count,
131 * to keep in balance with rmap.c's mem_cgroup_uncharging. But if
132 * there's been a force_empty, those reference counts may no longer
133 * be reliable, and this charge can actually fail: oh well, we don't
134 * make the situation any worse by proceeding as if it had succeeded.
135 */
136 mem_cgroup_charge(new, mm, GFP_ATOMIC);
137
138 get_page(new); 124 get_page(new);
139 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); 125 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
140 if (is_write_migration_entry(entry)) 126 if (is_write_migration_entry(entry))
@@ -378,9 +364,6 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
378 anon = PageAnon(page); 364 anon = PageAnon(page);
379 page->mapping = NULL; 365 page->mapping = NULL;
380 366
381 if (!anon) /* This page was removed from radix-tree. */
382 mem_cgroup_uncharge_cache_page(page);
383
384 /* 367 /*
385 * If any waiters have accumulated on the new page then 368 * If any waiters have accumulated on the new page then
386 * wake them up. 369 * wake them up.
@@ -614,6 +597,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
614 struct page *newpage = get_new_page(page, private, &result); 597 struct page *newpage = get_new_page(page, private, &result);
615 int rcu_locked = 0; 598 int rcu_locked = 0;
616 int charge = 0; 599 int charge = 0;
600 struct mem_cgroup *mem;
617 601
618 if (!newpage) 602 if (!newpage)
619 return -ENOMEM; 603 return -ENOMEM;
@@ -623,24 +607,26 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
623 goto move_newpage; 607 goto move_newpage;
624 } 608 }
625 609
626 charge = mem_cgroup_prepare_migration(page, newpage);
627 if (charge == -ENOMEM) {
628 rc = -ENOMEM;
629 goto move_newpage;
630 }
631 /* prepare cgroup just returns 0 or -ENOMEM */ 610 /* prepare cgroup just returns 0 or -ENOMEM */
632 BUG_ON(charge);
633
634 rc = -EAGAIN; 611 rc = -EAGAIN;
612
635 if (!trylock_page(page)) { 613 if (!trylock_page(page)) {
636 if (!force) 614 if (!force)
637 goto move_newpage; 615 goto move_newpage;
638 lock_page(page); 616 lock_page(page);
639 } 617 }
640 618
619 /* charge against new page */
620 charge = mem_cgroup_prepare_migration(page, &mem);
621 if (charge == -ENOMEM) {
622 rc = -ENOMEM;
623 goto unlock;
624 }
625 BUG_ON(charge);
626
641 if (PageWriteback(page)) { 627 if (PageWriteback(page)) {
642 if (!force) 628 if (!force)
643 goto unlock; 629 goto uncharge;
644 wait_on_page_writeback(page); 630 wait_on_page_writeback(page);
645 } 631 }
646 /* 632 /*
@@ -693,7 +679,9 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
693rcu_unlock: 679rcu_unlock:
694 if (rcu_locked) 680 if (rcu_locked)
695 rcu_read_unlock(); 681 rcu_read_unlock();
696 682uncharge:
683 if (!charge)
684 mem_cgroup_end_migration(mem, page, newpage);
697unlock: 685unlock:
698 unlock_page(page); 686 unlock_page(page);
699 687
@@ -709,8 +697,6 @@ unlock:
709 } 697 }
710 698
711move_newpage: 699move_newpage:
712 if (!charge)
713 mem_cgroup_end_migration(newpage);
714 700
715 /* 701 /*
716 * Move the new page to the LRU. If migration was not successful 702 * Move the new page to the LRU. If migration was not successful
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 6b9e758c98a5..40ba05061a4f 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -429,7 +429,6 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
429 unsigned long points = 0; 429 unsigned long points = 0;
430 struct task_struct *p; 430 struct task_struct *p;
431 431
432 cgroup_lock();
433 read_lock(&tasklist_lock); 432 read_lock(&tasklist_lock);
434retry: 433retry:
435 p = select_bad_process(&points, mem); 434 p = select_bad_process(&points, mem);
@@ -444,7 +443,6 @@ retry:
444 goto retry; 443 goto retry;
445out: 444out:
446 read_unlock(&tasklist_lock); 445 read_unlock(&tasklist_lock);
447 cgroup_unlock();
448} 446}
449#endif 447#endif
450 448
@@ -560,6 +558,13 @@ void pagefault_out_of_memory(void)
560 /* Got some memory back in the last second. */ 558 /* Got some memory back in the last second. */
561 return; 559 return;
562 560
561 /*
562 * If this is from memcg, oom-killer is already invoked.
563 * and not worth to go system-wide-oom.
564 */
565 if (mem_cgroup_oom_called(current))
566 goto rest_and_return;
567
563 if (sysctl_panic_on_oom) 568 if (sysctl_panic_on_oom)
564 panic("out of memory from page fault. panic_on_oom is selected.\n"); 569 panic("out of memory from page fault. panic_on_oom is selected.\n");
565 570
@@ -571,6 +576,7 @@ void pagefault_out_of_memory(void)
571 * Give "p" a good chance of killing itself before we 576 * Give "p" a good chance of killing itself before we
572 * retry to allocate memory. 577 * retry to allocate memory.
573 */ 578 */
579rest_and_return:
574 if (!test_thread_flag(TIF_MEMDIE)) 580 if (!test_thread_flag(TIF_MEMDIE))
575 schedule_timeout_uninterruptible(1); 581 schedule_timeout_uninterruptible(1);
576} 582}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7bf22e045318..5675b3073854 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3523,10 +3523,10 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3523 INIT_LIST_HEAD(&zone->lru[l].list); 3523 INIT_LIST_HEAD(&zone->lru[l].list);
3524 zone->lru[l].nr_scan = 0; 3524 zone->lru[l].nr_scan = 0;
3525 } 3525 }
3526 zone->recent_rotated[0] = 0; 3526 zone->reclaim_stat.recent_rotated[0] = 0;
3527 zone->recent_rotated[1] = 0; 3527 zone->reclaim_stat.recent_rotated[1] = 0;
3528 zone->recent_scanned[0] = 0; 3528 zone->reclaim_stat.recent_scanned[0] = 0;
3529 zone->recent_scanned[1] = 0; 3529 zone->reclaim_stat.recent_scanned[1] = 0;
3530 zap_zone_vm_stats(zone); 3530 zap_zone_vm_stats(zone);
3531 zone->flags = 0; 3531 zone->flags = 0;
3532 if (!size) 3532 if (!size)
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index d6507a660ed6..7006a11350c8 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -8,6 +8,7 @@
8#include <linux/memory.h> 8#include <linux/memory.h>
9#include <linux/vmalloc.h> 9#include <linux/vmalloc.h>
10#include <linux/cgroup.h> 10#include <linux/cgroup.h>
11#include <linux/swapops.h>
11 12
12static void __meminit 13static void __meminit
13__init_page_cgroup(struct page_cgroup *pc, unsigned long pfn) 14__init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
@@ -15,6 +16,7 @@ __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
15 pc->flags = 0; 16 pc->flags = 0;
16 pc->mem_cgroup = NULL; 17 pc->mem_cgroup = NULL;
17 pc->page = pfn_to_page(pfn); 18 pc->page = pfn_to_page(pfn);
19 INIT_LIST_HEAD(&pc->lru);
18} 20}
19static unsigned long total_usage; 21static unsigned long total_usage;
20 22
@@ -72,7 +74,7 @@ void __init page_cgroup_init(void)
72 74
73 int nid, fail; 75 int nid, fail;
74 76
75 if (mem_cgroup_subsys.disabled) 77 if (mem_cgroup_disabled())
76 return; 78 return;
77 79
78 for_each_online_node(nid) { 80 for_each_online_node(nid) {
@@ -103,13 +105,11 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
103/* __alloc_bootmem...() is protected by !slab_available() */ 105/* __alloc_bootmem...() is protected by !slab_available() */
104static int __init_refok init_section_page_cgroup(unsigned long pfn) 106static int __init_refok init_section_page_cgroup(unsigned long pfn)
105{ 107{
106 struct mem_section *section; 108 struct mem_section *section = __pfn_to_section(pfn);
107 struct page_cgroup *base, *pc; 109 struct page_cgroup *base, *pc;
108 unsigned long table_size; 110 unsigned long table_size;
109 int nid, index; 111 int nid, index;
110 112
111 section = __pfn_to_section(pfn);
112
113 if (!section->page_cgroup) { 113 if (!section->page_cgroup) {
114 nid = page_to_nid(pfn_to_page(pfn)); 114 nid = page_to_nid(pfn_to_page(pfn));
115 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; 115 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
@@ -145,7 +145,6 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
145 __init_page_cgroup(pc, pfn + index); 145 __init_page_cgroup(pc, pfn + index);
146 } 146 }
147 147
148 section = __pfn_to_section(pfn);
149 section->page_cgroup = base - pfn; 148 section->page_cgroup = base - pfn;
150 total_usage += table_size; 149 total_usage += table_size;
151 return 0; 150 return 0;
@@ -248,7 +247,7 @@ void __init page_cgroup_init(void)
248 unsigned long pfn; 247 unsigned long pfn;
249 int fail = 0; 248 int fail = 0;
250 249
251 if (mem_cgroup_subsys.disabled) 250 if (mem_cgroup_disabled())
252 return; 251 return;
253 252
254 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) { 253 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
@@ -273,3 +272,199 @@ void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
273} 272}
274 273
275#endif 274#endif
275
276
277#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
278
279static DEFINE_MUTEX(swap_cgroup_mutex);
280struct swap_cgroup_ctrl {
281 struct page **map;
282 unsigned long length;
283};
284
285struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
286
287/*
288 * This 8bytes seems big..maybe we can reduce this when we can use "id" for
289 * cgroup rather than pointer.
290 */
291struct swap_cgroup {
292 struct mem_cgroup *val;
293};
294#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
295#define SC_POS_MASK (SC_PER_PAGE - 1)
296
297/*
298 * SwapCgroup implements "lookup" and "exchange" operations.
299 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
300 * against SwapCache. At swap_free(), this is accessed directly from swap.
301 *
302 * This means,
303 * - we have no race in "exchange" when we're accessed via SwapCache because
304 * SwapCache(and its swp_entry) is under lock.
305 * - When called via swap_free(), there is no user of this entry and no race.
306 * Then, we don't need lock around "exchange".
307 *
308 * TODO: we can push these buffers out to HIGHMEM.
309 */
310
311/*
312 * allocate buffer for swap_cgroup.
313 */
314static int swap_cgroup_prepare(int type)
315{
316 struct page *page;
317 struct swap_cgroup_ctrl *ctrl;
318 unsigned long idx, max;
319
320 if (!do_swap_account)
321 return 0;
322 ctrl = &swap_cgroup_ctrl[type];
323
324 for (idx = 0; idx < ctrl->length; idx++) {
325 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
326 if (!page)
327 goto not_enough_page;
328 ctrl->map[idx] = page;
329 }
330 return 0;
331not_enough_page:
332 max = idx;
333 for (idx = 0; idx < max; idx++)
334 __free_page(ctrl->map[idx]);
335
336 return -ENOMEM;
337}
338
339/**
340 * swap_cgroup_record - record mem_cgroup for this swp_entry.
341 * @ent: swap entry to be recorded into
342 * @mem: mem_cgroup to be recorded
343 *
344 * Returns old value at success, NULL at failure.
345 * (Of course, old value can be NULL.)
346 */
347struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem)
348{
349 int type = swp_type(ent);
350 unsigned long offset = swp_offset(ent);
351 unsigned long idx = offset / SC_PER_PAGE;
352 unsigned long pos = offset & SC_POS_MASK;
353 struct swap_cgroup_ctrl *ctrl;
354 struct page *mappage;
355 struct swap_cgroup *sc;
356 struct mem_cgroup *old;
357
358 if (!do_swap_account)
359 return NULL;
360
361 ctrl = &swap_cgroup_ctrl[type];
362
363 mappage = ctrl->map[idx];
364 sc = page_address(mappage);
365 sc += pos;
366 old = sc->val;
367 sc->val = mem;
368
369 return old;
370}
371
372/**
373 * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
374 * @ent: swap entry to be looked up.
375 *
376 * Returns pointer to mem_cgroup at success. NULL at failure.
377 */
378struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent)
379{
380 int type = swp_type(ent);
381 unsigned long offset = swp_offset(ent);
382 unsigned long idx = offset / SC_PER_PAGE;
383 unsigned long pos = offset & SC_POS_MASK;
384 struct swap_cgroup_ctrl *ctrl;
385 struct page *mappage;
386 struct swap_cgroup *sc;
387 struct mem_cgroup *ret;
388
389 if (!do_swap_account)
390 return NULL;
391
392 ctrl = &swap_cgroup_ctrl[type];
393 mappage = ctrl->map[idx];
394 sc = page_address(mappage);
395 sc += pos;
396 ret = sc->val;
397 return ret;
398}
399
400int swap_cgroup_swapon(int type, unsigned long max_pages)
401{
402 void *array;
403 unsigned long array_size;
404 unsigned long length;
405 struct swap_cgroup_ctrl *ctrl;
406
407 if (!do_swap_account)
408 return 0;
409
410 length = ((max_pages/SC_PER_PAGE) + 1);
411 array_size = length * sizeof(void *);
412
413 array = vmalloc(array_size);
414 if (!array)
415 goto nomem;
416
417 memset(array, 0, array_size);
418 ctrl = &swap_cgroup_ctrl[type];
419 mutex_lock(&swap_cgroup_mutex);
420 ctrl->length = length;
421 ctrl->map = array;
422 if (swap_cgroup_prepare(type)) {
423 /* memory shortage */
424 ctrl->map = NULL;
425 ctrl->length = 0;
426 vfree(array);
427 mutex_unlock(&swap_cgroup_mutex);
428 goto nomem;
429 }
430 mutex_unlock(&swap_cgroup_mutex);
431
432 printk(KERN_INFO
433 "swap_cgroup: uses %ld bytes of vmalloc for pointer array space"
434 " and %ld bytes to hold mem_cgroup pointers on swap\n",
435 array_size, length * PAGE_SIZE);
436 printk(KERN_INFO
437 "swap_cgroup can be disabled by noswapaccount boot option.\n");
438
439 return 0;
440nomem:
441 printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
442 printk(KERN_INFO
443 "swap_cgroup can be disabled by noswapaccount boot option\n");
444 return -ENOMEM;
445}
446
447void swap_cgroup_swapoff(int type)
448{
449 int i;
450 struct swap_cgroup_ctrl *ctrl;
451
452 if (!do_swap_account)
453 return;
454
455 mutex_lock(&swap_cgroup_mutex);
456 ctrl = &swap_cgroup_ctrl[type];
457 if (ctrl->map) {
458 for (i = 0; i < ctrl->length; i++) {
459 struct page *page = ctrl->map[i];
460 if (page)
461 __free_page(page);
462 }
463 vfree(ctrl->map);
464 ctrl->map = NULL;
465 ctrl->length = 0;
466 }
467 mutex_unlock(&swap_cgroup_mutex);
468}
469
470#endif
diff --git a/mm/shmem.c b/mm/shmem.c
index 5941f9801363..5d0de96c9789 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -928,7 +928,11 @@ found:
928 error = 1; 928 error = 1;
929 if (!inode) 929 if (!inode)
930 goto out; 930 goto out;
931 /* Precharge page using GFP_KERNEL while we can wait */ 931 /*
932 * Charge page using GFP_KERNEL while we can wait.
933 * Charged back to the user(not to caller) when swap account is used.
934 * add_to_page_cache() will be called with GFP_NOWAIT.
935 */
932 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); 936 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
933 if (error) 937 if (error)
934 goto out; 938 goto out;
@@ -1320,15 +1324,19 @@ repeat:
1320 } else { 1324 } else {
1321 shmem_swp_unmap(entry); 1325 shmem_swp_unmap(entry);
1322 spin_unlock(&info->lock); 1326 spin_unlock(&info->lock);
1323 unlock_page(swappage);
1324 page_cache_release(swappage);
1325 if (error == -ENOMEM) { 1327 if (error == -ENOMEM) {
1326 /* allow reclaim from this memory cgroup */ 1328 /* allow reclaim from this memory cgroup */
1327 error = mem_cgroup_shrink_usage(current->mm, 1329 error = mem_cgroup_shrink_usage(swappage,
1330 current->mm,
1328 gfp); 1331 gfp);
1329 if (error) 1332 if (error) {
1333 unlock_page(swappage);
1334 page_cache_release(swappage);
1330 goto failed; 1335 goto failed;
1336 }
1331 } 1337 }
1338 unlock_page(swappage);
1339 page_cache_release(swappage);
1332 goto repeat; 1340 goto repeat;
1333 } 1341 }
1334 } else if (sgp == SGP_READ && !filepage) { 1342 } else if (sgp == SGP_READ && !filepage) {
@@ -1379,7 +1387,7 @@ repeat:
1379 1387
1380 /* Precharge page while we can wait, compensate after */ 1388 /* Precharge page while we can wait, compensate after */
1381 error = mem_cgroup_cache_charge(filepage, current->mm, 1389 error = mem_cgroup_cache_charge(filepage, current->mm,
1382 gfp & ~__GFP_HIGHMEM); 1390 GFP_KERNEL);
1383 if (error) { 1391 if (error) {
1384 page_cache_release(filepage); 1392 page_cache_release(filepage);
1385 shmem_unacct_blocks(info->flags, 1); 1393 shmem_unacct_blocks(info->flags, 1);
diff --git a/mm/swap.c b/mm/swap.c
index ba2c0e8b8b54..8adb9feb61e1 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -151,6 +151,26 @@ void rotate_reclaimable_page(struct page *page)
151 } 151 }
152} 152}
153 153
154static void update_page_reclaim_stat(struct zone *zone, struct page *page,
155 int file, int rotated)
156{
157 struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
158 struct zone_reclaim_stat *memcg_reclaim_stat;
159
160 memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
161
162 reclaim_stat->recent_scanned[file]++;
163 if (rotated)
164 reclaim_stat->recent_rotated[file]++;
165
166 if (!memcg_reclaim_stat)
167 return;
168
169 memcg_reclaim_stat->recent_scanned[file]++;
170 if (rotated)
171 memcg_reclaim_stat->recent_rotated[file]++;
172}
173
154/* 174/*
155 * FIXME: speed this up? 175 * FIXME: speed this up?
156 */ 176 */
@@ -168,10 +188,8 @@ void activate_page(struct page *page)
168 lru += LRU_ACTIVE; 188 lru += LRU_ACTIVE;
169 add_page_to_lru_list(zone, page, lru); 189 add_page_to_lru_list(zone, page, lru);
170 __count_vm_event(PGACTIVATE); 190 __count_vm_event(PGACTIVATE);
171 mem_cgroup_move_lists(page, lru);
172 191
173 zone->recent_rotated[!!file]++; 192 update_page_reclaim_stat(zone, page, !!file, 1);
174 zone->recent_scanned[!!file]++;
175 } 193 }
176 spin_unlock_irq(&zone->lru_lock); 194 spin_unlock_irq(&zone->lru_lock);
177} 195}
@@ -386,12 +404,14 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
386{ 404{
387 int i; 405 int i;
388 struct zone *zone = NULL; 406 struct zone *zone = NULL;
407
389 VM_BUG_ON(is_unevictable_lru(lru)); 408 VM_BUG_ON(is_unevictable_lru(lru));
390 409
391 for (i = 0; i < pagevec_count(pvec); i++) { 410 for (i = 0; i < pagevec_count(pvec); i++) {
392 struct page *page = pvec->pages[i]; 411 struct page *page = pvec->pages[i];
393 struct zone *pagezone = page_zone(page); 412 struct zone *pagezone = page_zone(page);
394 int file; 413 int file;
414 int active;
395 415
396 if (pagezone != zone) { 416 if (pagezone != zone) {
397 if (zone) 417 if (zone)
@@ -403,12 +423,11 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
403 VM_BUG_ON(PageUnevictable(page)); 423 VM_BUG_ON(PageUnevictable(page));
404 VM_BUG_ON(PageLRU(page)); 424 VM_BUG_ON(PageLRU(page));
405 SetPageLRU(page); 425 SetPageLRU(page);
426 active = is_active_lru(lru);
406 file = is_file_lru(lru); 427 file = is_file_lru(lru);
407 zone->recent_scanned[file]++; 428 if (active)
408 if (is_active_lru(lru)) {
409 SetPageActive(page); 429 SetPageActive(page);
410 zone->recent_rotated[file]++; 430 update_page_reclaim_stat(zone, page, file, active);
411 }
412 add_page_to_lru_list(zone, page, lru); 431 add_page_to_lru_list(zone, page, lru);
413 } 432 }
414 if (zone) 433 if (zone)
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 81c825f67a7f..3ecea98ecb45 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -17,6 +17,7 @@
17#include <linux/backing-dev.h> 17#include <linux/backing-dev.h>
18#include <linux/pagevec.h> 18#include <linux/pagevec.h>
19#include <linux/migrate.h> 19#include <linux/migrate.h>
20#include <linux/page_cgroup.h>
20 21
21#include <asm/pgtable.h> 22#include <asm/pgtable.h>
22 23
@@ -108,6 +109,8 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
108 */ 109 */
109void __delete_from_swap_cache(struct page *page) 110void __delete_from_swap_cache(struct page *page)
110{ 111{
112 swp_entry_t ent = {.val = page_private(page)};
113
111 VM_BUG_ON(!PageLocked(page)); 114 VM_BUG_ON(!PageLocked(page));
112 VM_BUG_ON(!PageSwapCache(page)); 115 VM_BUG_ON(!PageSwapCache(page));
113 VM_BUG_ON(PageWriteback(page)); 116 VM_BUG_ON(PageWriteback(page));
@@ -118,6 +121,7 @@ void __delete_from_swap_cache(struct page *page)
118 total_swapcache_pages--; 121 total_swapcache_pages--;
119 __dec_zone_page_state(page, NR_FILE_PAGES); 122 __dec_zone_page_state(page, NR_FILE_PAGES);
120 INC_CACHE_INFO(del_total); 123 INC_CACHE_INFO(del_total);
124 mem_cgroup_uncharge_swapcache(page, ent);
121} 125}
122 126
123/** 127/**
diff --git a/mm/swapfile.c b/mm/swapfile.c
index eec5ca758a23..da422c47e2ee 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -33,6 +33,7 @@
33#include <asm/pgtable.h> 33#include <asm/pgtable.h>
34#include <asm/tlbflush.h> 34#include <asm/tlbflush.h>
35#include <linux/swapops.h> 35#include <linux/swapops.h>
36#include <linux/page_cgroup.h>
36 37
37static DEFINE_SPINLOCK(swap_lock); 38static DEFINE_SPINLOCK(swap_lock);
38static unsigned int nr_swapfiles; 39static unsigned int nr_swapfiles;
@@ -470,8 +471,9 @@ out:
470 return NULL; 471 return NULL;
471} 472}
472 473
473static int swap_entry_free(struct swap_info_struct *p, unsigned long offset) 474static int swap_entry_free(struct swap_info_struct *p, swp_entry_t ent)
474{ 475{
476 unsigned long offset = swp_offset(ent);
475 int count = p->swap_map[offset]; 477 int count = p->swap_map[offset];
476 478
477 if (count < SWAP_MAP_MAX) { 479 if (count < SWAP_MAP_MAX) {
@@ -486,6 +488,7 @@ static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
486 swap_list.next = p - swap_info; 488 swap_list.next = p - swap_info;
487 nr_swap_pages++; 489 nr_swap_pages++;
488 p->inuse_pages--; 490 p->inuse_pages--;
491 mem_cgroup_uncharge_swap(ent);
489 } 492 }
490 } 493 }
491 return count; 494 return count;
@@ -501,7 +504,7 @@ void swap_free(swp_entry_t entry)
501 504
502 p = swap_info_get(entry); 505 p = swap_info_get(entry);
503 if (p) { 506 if (p) {
504 swap_entry_free(p, swp_offset(entry)); 507 swap_entry_free(p, entry);
505 spin_unlock(&swap_lock); 508 spin_unlock(&swap_lock);
506 } 509 }
507} 510}
@@ -581,7 +584,7 @@ int free_swap_and_cache(swp_entry_t entry)
581 584
582 p = swap_info_get(entry); 585 p = swap_info_get(entry);
583 if (p) { 586 if (p) {
584 if (swap_entry_free(p, swp_offset(entry)) == 1) { 587 if (swap_entry_free(p, entry) == 1) {
585 page = find_get_page(&swapper_space, entry.val); 588 page = find_get_page(&swapper_space, entry.val);
586 if (page && !trylock_page(page)) { 589 if (page && !trylock_page(page)) {
587 page_cache_release(page); 590 page_cache_release(page);
@@ -690,17 +693,18 @@ unsigned int count_swap_pages(int type, int free)
690static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, 693static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
691 unsigned long addr, swp_entry_t entry, struct page *page) 694 unsigned long addr, swp_entry_t entry, struct page *page)
692{ 695{
696 struct mem_cgroup *ptr = NULL;
693 spinlock_t *ptl; 697 spinlock_t *ptl;
694 pte_t *pte; 698 pte_t *pte;
695 int ret = 1; 699 int ret = 1;
696 700
697 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL)) 701 if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr))
698 ret = -ENOMEM; 702 ret = -ENOMEM;
699 703
700 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 704 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
701 if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { 705 if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
702 if (ret > 0) 706 if (ret > 0)
703 mem_cgroup_uncharge_page(page); 707 mem_cgroup_cancel_charge_swapin(ptr);
704 ret = 0; 708 ret = 0;
705 goto out; 709 goto out;
706 } 710 }
@@ -710,6 +714,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
710 set_pte_at(vma->vm_mm, addr, pte, 714 set_pte_at(vma->vm_mm, addr, pte,
711 pte_mkold(mk_pte(page, vma->vm_page_prot))); 715 pte_mkold(mk_pte(page, vma->vm_page_prot)));
712 page_add_anon_rmap(page, vma, addr); 716 page_add_anon_rmap(page, vma, addr);
717 mem_cgroup_commit_charge_swapin(page, ptr);
713 swap_free(entry); 718 swap_free(entry);
714 /* 719 /*
715 * Move the page to the active list so it is not 720 * Move the page to the active list so it is not
@@ -1492,6 +1497,9 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
1492 spin_unlock(&swap_lock); 1497 spin_unlock(&swap_lock);
1493 mutex_unlock(&swapon_mutex); 1498 mutex_unlock(&swapon_mutex);
1494 vfree(swap_map); 1499 vfree(swap_map);
1500 /* Destroy swap account informatin */
1501 swap_cgroup_swapoff(type);
1502
1495 inode = mapping->host; 1503 inode = mapping->host;
1496 if (S_ISBLK(inode->i_mode)) { 1504 if (S_ISBLK(inode->i_mode)) {
1497 struct block_device *bdev = I_BDEV(inode); 1505 struct block_device *bdev = I_BDEV(inode);
@@ -1809,6 +1817,11 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1809 } 1817 }
1810 swap_map[page_nr] = SWAP_MAP_BAD; 1818 swap_map[page_nr] = SWAP_MAP_BAD;
1811 } 1819 }
1820
1821 error = swap_cgroup_swapon(type, maxpages);
1822 if (error)
1823 goto bad_swap;
1824
1812 nr_good_pages = swap_header->info.last_page - 1825 nr_good_pages = swap_header->info.last_page -
1813 swap_header->info.nr_badpages - 1826 swap_header->info.nr_badpages -
1814 1 /* header page */; 1827 1 /* header page */;
@@ -1880,6 +1893,7 @@ bad_swap:
1880 bd_release(bdev); 1893 bd_release(bdev);
1881 } 1894 }
1882 destroy_swap_extents(p); 1895 destroy_swap_extents(p);
1896 swap_cgroup_swapoff(type);
1883bad_swap_2: 1897bad_swap_2:
1884 spin_lock(&swap_lock); 1898 spin_lock(&swap_lock);
1885 p->swap_file = NULL; 1899 p->swap_file = NULL;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b07c48b09a93..9a27c44aa327 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -125,11 +125,30 @@ static LIST_HEAD(shrinker_list);
125static DECLARE_RWSEM(shrinker_rwsem); 125static DECLARE_RWSEM(shrinker_rwsem);
126 126
127#ifdef CONFIG_CGROUP_MEM_RES_CTLR 127#ifdef CONFIG_CGROUP_MEM_RES_CTLR
128#define scan_global_lru(sc) (!(sc)->mem_cgroup) 128#define scanning_global_lru(sc) (!(sc)->mem_cgroup)
129#else 129#else
130#define scan_global_lru(sc) (1) 130#define scanning_global_lru(sc) (1)
131#endif 131#endif
132 132
133static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
134 struct scan_control *sc)
135{
136 if (!scanning_global_lru(sc))
137 return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
138
139 return &zone->reclaim_stat;
140}
141
142static unsigned long zone_nr_pages(struct zone *zone, struct scan_control *sc,
143 enum lru_list lru)
144{
145 if (!scanning_global_lru(sc))
146 return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
147
148 return zone_page_state(zone, NR_LRU_BASE + lru);
149}
150
151
133/* 152/*
134 * Add a shrinker callback to be called from the vm 153 * Add a shrinker callback to be called from the vm
135 */ 154 */
@@ -512,7 +531,6 @@ redo:
512 lru = LRU_UNEVICTABLE; 531 lru = LRU_UNEVICTABLE;
513 add_page_to_unevictable_list(page); 532 add_page_to_unevictable_list(page);
514 } 533 }
515 mem_cgroup_move_lists(page, lru);
516 534
517 /* 535 /*
518 * page's status can change while we move it among lru. If an evictable 536 * page's status can change while we move it among lru. If an evictable
@@ -547,7 +565,6 @@ void putback_lru_page(struct page *page)
547 565
548 lru = !!TestClearPageActive(page) + page_is_file_cache(page); 566 lru = !!TestClearPageActive(page) + page_is_file_cache(page);
549 lru_cache_add_lru(page, lru); 567 lru_cache_add_lru(page, lru);
550 mem_cgroup_move_lists(page, lru);
551 put_page(page); 568 put_page(page);
552} 569}
553#endif /* CONFIG_UNEVICTABLE_LRU */ 570#endif /* CONFIG_UNEVICTABLE_LRU */
@@ -813,6 +830,7 @@ int __isolate_lru_page(struct page *page, int mode, int file)
813 return ret; 830 return ret;
814 831
815 ret = -EBUSY; 832 ret = -EBUSY;
833
816 if (likely(get_page_unless_zero(page))) { 834 if (likely(get_page_unless_zero(page))) {
817 /* 835 /*
818 * Be careful not to clear PageLRU until after we're 836 * Be careful not to clear PageLRU until after we're
@@ -821,6 +839,7 @@ int __isolate_lru_page(struct page *page, int mode, int file)
821 */ 839 */
822 ClearPageLRU(page); 840 ClearPageLRU(page);
823 ret = 0; 841 ret = 0;
842 mem_cgroup_del_lru(page);
824 } 843 }
825 844
826 return ret; 845 return ret;
@@ -1029,6 +1048,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1029 struct pagevec pvec; 1048 struct pagevec pvec;
1030 unsigned long nr_scanned = 0; 1049 unsigned long nr_scanned = 0;
1031 unsigned long nr_reclaimed = 0; 1050 unsigned long nr_reclaimed = 0;
1051 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1032 1052
1033 pagevec_init(&pvec, 1); 1053 pagevec_init(&pvec, 1);
1034 1054
@@ -1070,13 +1090,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1070 __mod_zone_page_state(zone, NR_INACTIVE_ANON, 1090 __mod_zone_page_state(zone, NR_INACTIVE_ANON,
1071 -count[LRU_INACTIVE_ANON]); 1091 -count[LRU_INACTIVE_ANON]);
1072 1092
1073 if (scan_global_lru(sc)) { 1093 if (scanning_global_lru(sc))
1074 zone->pages_scanned += nr_scan; 1094 zone->pages_scanned += nr_scan;
1075 zone->recent_scanned[0] += count[LRU_INACTIVE_ANON]; 1095
1076 zone->recent_scanned[0] += count[LRU_ACTIVE_ANON]; 1096 reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
1077 zone->recent_scanned[1] += count[LRU_INACTIVE_FILE]; 1097 reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
1078 zone->recent_scanned[1] += count[LRU_ACTIVE_FILE]; 1098 reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE];
1079 } 1099 reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE];
1100
1080 spin_unlock_irq(&zone->lru_lock); 1101 spin_unlock_irq(&zone->lru_lock);
1081 1102
1082 nr_scanned += nr_scan; 1103 nr_scanned += nr_scan;
@@ -1108,7 +1129,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1108 if (current_is_kswapd()) { 1129 if (current_is_kswapd()) {
1109 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan); 1130 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
1110 __count_vm_events(KSWAPD_STEAL, nr_freed); 1131 __count_vm_events(KSWAPD_STEAL, nr_freed);
1111 } else if (scan_global_lru(sc)) 1132 } else if (scanning_global_lru(sc))
1112 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan); 1133 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
1113 1134
1114 __count_zone_vm_events(PGSTEAL, zone, nr_freed); 1135 __count_zone_vm_events(PGSTEAL, zone, nr_freed);
@@ -1134,10 +1155,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1134 SetPageLRU(page); 1155 SetPageLRU(page);
1135 lru = page_lru(page); 1156 lru = page_lru(page);
1136 add_page_to_lru_list(zone, page, lru); 1157 add_page_to_lru_list(zone, page, lru);
1137 mem_cgroup_move_lists(page, lru); 1158 if (PageActive(page)) {
1138 if (PageActive(page) && scan_global_lru(sc)) {
1139 int file = !!page_is_file_cache(page); 1159 int file = !!page_is_file_cache(page);
1140 zone->recent_rotated[file]++; 1160 reclaim_stat->recent_rotated[file]++;
1141 } 1161 }
1142 if (!pagevec_add(&pvec, page)) { 1162 if (!pagevec_add(&pvec, page)) {
1143 spin_unlock_irq(&zone->lru_lock); 1163 spin_unlock_irq(&zone->lru_lock);
@@ -1197,6 +1217,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1197 struct page *page; 1217 struct page *page;
1198 struct pagevec pvec; 1218 struct pagevec pvec;
1199 enum lru_list lru; 1219 enum lru_list lru;
1220 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1200 1221
1201 lru_add_drain(); 1222 lru_add_drain();
1202 spin_lock_irq(&zone->lru_lock); 1223 spin_lock_irq(&zone->lru_lock);
@@ -1207,10 +1228,10 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1207 * zone->pages_scanned is used for detect zone's oom 1228 * zone->pages_scanned is used for detect zone's oom
1208 * mem_cgroup remembers nr_scan by itself. 1229 * mem_cgroup remembers nr_scan by itself.
1209 */ 1230 */
1210 if (scan_global_lru(sc)) { 1231 if (scanning_global_lru(sc)) {
1211 zone->pages_scanned += pgscanned; 1232 zone->pages_scanned += pgscanned;
1212 zone->recent_scanned[!!file] += pgmoved;
1213 } 1233 }
1234 reclaim_stat->recent_scanned[!!file] += pgmoved;
1214 1235
1215 if (file) 1236 if (file)
1216 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved); 1237 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
@@ -1251,8 +1272,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1251 * This helps balance scan pressure between file and anonymous 1272 * This helps balance scan pressure between file and anonymous
1252 * pages in get_scan_ratio. 1273 * pages in get_scan_ratio.
1253 */ 1274 */
1254 if (scan_global_lru(sc)) 1275 reclaim_stat->recent_rotated[!!file] += pgmoved;
1255 zone->recent_rotated[!!file] += pgmoved;
1256 1276
1257 while (!list_empty(&l_inactive)) { 1277 while (!list_empty(&l_inactive)) {
1258 page = lru_to_page(&l_inactive); 1278 page = lru_to_page(&l_inactive);
@@ -1263,7 +1283,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1263 ClearPageActive(page); 1283 ClearPageActive(page);
1264 1284
1265 list_move(&page->lru, &zone->lru[lru].list); 1285 list_move(&page->lru, &zone->lru[lru].list);
1266 mem_cgroup_move_lists(page, lru); 1286 mem_cgroup_add_lru_list(page, lru);
1267 pgmoved++; 1287 pgmoved++;
1268 if (!pagevec_add(&pvec, page)) { 1288 if (!pagevec_add(&pvec, page)) {
1269 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); 1289 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
@@ -1292,6 +1312,38 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1292 pagevec_release(&pvec); 1312 pagevec_release(&pvec);
1293} 1313}
1294 1314
1315static int inactive_anon_is_low_global(struct zone *zone)
1316{
1317 unsigned long active, inactive;
1318
1319 active = zone_page_state(zone, NR_ACTIVE_ANON);
1320 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1321
1322 if (inactive * zone->inactive_ratio < active)
1323 return 1;
1324
1325 return 0;
1326}
1327
1328/**
1329 * inactive_anon_is_low - check if anonymous pages need to be deactivated
1330 * @zone: zone to check
1331 * @sc: scan control of this context
1332 *
1333 * Returns true if the zone does not have enough inactive anon pages,
1334 * meaning some active anon pages need to be deactivated.
1335 */
1336static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
1337{
1338 int low;
1339
1340 if (scanning_global_lru(sc))
1341 low = inactive_anon_is_low_global(zone);
1342 else
1343 low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
1344 return low;
1345}
1346
1295static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1347static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1296 struct zone *zone, struct scan_control *sc, int priority) 1348 struct zone *zone, struct scan_control *sc, int priority)
1297{ 1349{
@@ -1302,8 +1354,7 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1302 return 0; 1354 return 0;
1303 } 1355 }
1304 1356
1305 if (lru == LRU_ACTIVE_ANON && 1357 if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
1306 (!scan_global_lru(sc) || inactive_anon_is_low(zone))) {
1307 shrink_active_list(nr_to_scan, zone, sc, priority, file); 1358 shrink_active_list(nr_to_scan, zone, sc, priority, file);
1308 return 0; 1359 return 0;
1309 } 1360 }
@@ -1325,6 +1376,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1325 unsigned long anon, file, free; 1376 unsigned long anon, file, free;
1326 unsigned long anon_prio, file_prio; 1377 unsigned long anon_prio, file_prio;
1327 unsigned long ap, fp; 1378 unsigned long ap, fp;
1379 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1328 1380
1329 /* If we have no swap space, do not bother scanning anon pages. */ 1381 /* If we have no swap space, do not bother scanning anon pages. */
1330 if (nr_swap_pages <= 0) { 1382 if (nr_swap_pages <= 0) {
@@ -1333,17 +1385,20 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1333 return; 1385 return;
1334 } 1386 }
1335 1387
1336 anon = zone_page_state(zone, NR_ACTIVE_ANON) + 1388 anon = zone_nr_pages(zone, sc, LRU_ACTIVE_ANON) +
1337 zone_page_state(zone, NR_INACTIVE_ANON); 1389 zone_nr_pages(zone, sc, LRU_INACTIVE_ANON);
1338 file = zone_page_state(zone, NR_ACTIVE_FILE) + 1390 file = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) +
1339 zone_page_state(zone, NR_INACTIVE_FILE); 1391 zone_nr_pages(zone, sc, LRU_INACTIVE_FILE);
1340 free = zone_page_state(zone, NR_FREE_PAGES); 1392
1341 1393 if (scanning_global_lru(sc)) {
1342 /* If we have very few page cache pages, force-scan anon pages. */ 1394 free = zone_page_state(zone, NR_FREE_PAGES);
1343 if (unlikely(file + free <= zone->pages_high)) { 1395 /* If we have very few page cache pages,
1344 percent[0] = 100; 1396 force-scan anon pages. */
1345 percent[1] = 0; 1397 if (unlikely(file + free <= zone->pages_high)) {
1346 return; 1398 percent[0] = 100;
1399 percent[1] = 0;
1400 return;
1401 }
1347 } 1402 }
1348 1403
1349 /* 1404 /*
@@ -1357,17 +1412,17 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1357 * 1412 *
1358 * anon in [0], file in [1] 1413 * anon in [0], file in [1]
1359 */ 1414 */
1360 if (unlikely(zone->recent_scanned[0] > anon / 4)) { 1415 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1361 spin_lock_irq(&zone->lru_lock); 1416 spin_lock_irq(&zone->lru_lock);
1362 zone->recent_scanned[0] /= 2; 1417 reclaim_stat->recent_scanned[0] /= 2;
1363 zone->recent_rotated[0] /= 2; 1418 reclaim_stat->recent_rotated[0] /= 2;
1364 spin_unlock_irq(&zone->lru_lock); 1419 spin_unlock_irq(&zone->lru_lock);
1365 } 1420 }
1366 1421
1367 if (unlikely(zone->recent_scanned[1] > file / 4)) { 1422 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
1368 spin_lock_irq(&zone->lru_lock); 1423 spin_lock_irq(&zone->lru_lock);
1369 zone->recent_scanned[1] /= 2; 1424 reclaim_stat->recent_scanned[1] /= 2;
1370 zone->recent_rotated[1] /= 2; 1425 reclaim_stat->recent_rotated[1] /= 2;
1371 spin_unlock_irq(&zone->lru_lock); 1426 spin_unlock_irq(&zone->lru_lock);
1372 } 1427 }
1373 1428
@@ -1383,11 +1438,11 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1383 * proportional to the fraction of recently scanned pages on 1438 * proportional to the fraction of recently scanned pages on
1384 * each list that were recently referenced and in active use. 1439 * each list that were recently referenced and in active use.
1385 */ 1440 */
1386 ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1); 1441 ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
1387 ap /= zone->recent_rotated[0] + 1; 1442 ap /= reclaim_stat->recent_rotated[0] + 1;
1388 1443
1389 fp = (file_prio + 1) * (zone->recent_scanned[1] + 1); 1444 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
1390 fp /= zone->recent_rotated[1] + 1; 1445 fp /= reclaim_stat->recent_rotated[1] + 1;
1391 1446
1392 /* Normalize to percentages */ 1447 /* Normalize to percentages */
1393 percent[0] = 100 * ap / (ap + fp + 1); 1448 percent[0] = 100 * ap / (ap + fp + 1);
@@ -1411,30 +1466,23 @@ static void shrink_zone(int priority, struct zone *zone,
1411 get_scan_ratio(zone, sc, percent); 1466 get_scan_ratio(zone, sc, percent);
1412 1467
1413 for_each_evictable_lru(l) { 1468 for_each_evictable_lru(l) {
1414 if (scan_global_lru(sc)) { 1469 int file = is_file_lru(l);
1415 int file = is_file_lru(l); 1470 int scan;
1416 int scan; 1471
1417 1472 scan = zone_page_state(zone, NR_LRU_BASE + l);
1418 scan = zone_page_state(zone, NR_LRU_BASE + l); 1473 if (priority) {
1419 if (priority) { 1474 scan >>= priority;
1420 scan >>= priority; 1475 scan = (scan * percent[file]) / 100;
1421 scan = (scan * percent[file]) / 100; 1476 }
1422 } 1477 if (scanning_global_lru(sc)) {
1423 zone->lru[l].nr_scan += scan; 1478 zone->lru[l].nr_scan += scan;
1424 nr[l] = zone->lru[l].nr_scan; 1479 nr[l] = zone->lru[l].nr_scan;
1425 if (nr[l] >= swap_cluster_max) 1480 if (nr[l] >= swap_cluster_max)
1426 zone->lru[l].nr_scan = 0; 1481 zone->lru[l].nr_scan = 0;
1427 else 1482 else
1428 nr[l] = 0; 1483 nr[l] = 0;
1429 } else { 1484 } else
1430 /* 1485 nr[l] = scan;
1431 * This reclaim occurs not because zone memory shortage
1432 * but because memory controller hits its limit.
1433 * Don't modify zone reclaim related data.
1434 */
1435 nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone,
1436 priority, l);
1437 }
1438 } 1486 }
1439 1487
1440 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 1488 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -1467,9 +1515,7 @@ static void shrink_zone(int priority, struct zone *zone,
1467 * Even if we did not try to evict anon pages at all, we want to 1515 * Even if we did not try to evict anon pages at all, we want to
1468 * rebalance the anon lru active/inactive ratio. 1516 * rebalance the anon lru active/inactive ratio.
1469 */ 1517 */
1470 if (!scan_global_lru(sc) || inactive_anon_is_low(zone)) 1518 if (inactive_anon_is_low(zone, sc))
1471 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
1472 else if (!scan_global_lru(sc))
1473 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); 1519 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
1474 1520
1475 throttle_vm_writeout(sc->gfp_mask); 1521 throttle_vm_writeout(sc->gfp_mask);
@@ -1504,7 +1550,7 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
1504 * Take care memory controller reclaiming has small influence 1550 * Take care memory controller reclaiming has small influence
1505 * to global LRU. 1551 * to global LRU.
1506 */ 1552 */
1507 if (scan_global_lru(sc)) { 1553 if (scanning_global_lru(sc)) {
1508 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1554 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1509 continue; 1555 continue;
1510 note_zone_scanning_priority(zone, priority); 1556 note_zone_scanning_priority(zone, priority);
@@ -1557,12 +1603,12 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1557 1603
1558 delayacct_freepages_start(); 1604 delayacct_freepages_start();
1559 1605
1560 if (scan_global_lru(sc)) 1606 if (scanning_global_lru(sc))
1561 count_vm_event(ALLOCSTALL); 1607 count_vm_event(ALLOCSTALL);
1562 /* 1608 /*
1563 * mem_cgroup will not do shrink_slab. 1609 * mem_cgroup will not do shrink_slab.
1564 */ 1610 */
1565 if (scan_global_lru(sc)) { 1611 if (scanning_global_lru(sc)) {
1566 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1612 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1567 1613
1568 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1614 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
@@ -1581,7 +1627,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1581 * Don't shrink slabs when reclaiming memory from 1627 * Don't shrink slabs when reclaiming memory from
1582 * over limit cgroups 1628 * over limit cgroups
1583 */ 1629 */
1584 if (scan_global_lru(sc)) { 1630 if (scanning_global_lru(sc)) {
1585 shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); 1631 shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
1586 if (reclaim_state) { 1632 if (reclaim_state) {
1587 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 1633 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
@@ -1612,7 +1658,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1612 congestion_wait(WRITE, HZ/10); 1658 congestion_wait(WRITE, HZ/10);
1613 } 1659 }
1614 /* top priority shrink_zones still had more to do? don't OOM, then */ 1660 /* top priority shrink_zones still had more to do? don't OOM, then */
1615 if (!sc->all_unreclaimable && scan_global_lru(sc)) 1661 if (!sc->all_unreclaimable && scanning_global_lru(sc))
1616 ret = sc->nr_reclaimed; 1662 ret = sc->nr_reclaimed;
1617out: 1663out:
1618 /* 1664 /*
@@ -1625,7 +1671,7 @@ out:
1625 if (priority < 0) 1671 if (priority < 0)
1626 priority = 0; 1672 priority = 0;
1627 1673
1628 if (scan_global_lru(sc)) { 1674 if (scanning_global_lru(sc)) {
1629 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1675 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1630 1676
1631 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1677 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
@@ -1661,19 +1707,24 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1661#ifdef CONFIG_CGROUP_MEM_RES_CTLR 1707#ifdef CONFIG_CGROUP_MEM_RES_CTLR
1662 1708
1663unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, 1709unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1664 gfp_t gfp_mask) 1710 gfp_t gfp_mask,
1711 bool noswap,
1712 unsigned int swappiness)
1665{ 1713{
1666 struct scan_control sc = { 1714 struct scan_control sc = {
1667 .may_writepage = !laptop_mode, 1715 .may_writepage = !laptop_mode,
1668 .may_swap = 1, 1716 .may_swap = 1,
1669 .swap_cluster_max = SWAP_CLUSTER_MAX, 1717 .swap_cluster_max = SWAP_CLUSTER_MAX,
1670 .swappiness = vm_swappiness, 1718 .swappiness = swappiness,
1671 .order = 0, 1719 .order = 0,
1672 .mem_cgroup = mem_cont, 1720 .mem_cgroup = mem_cont,
1673 .isolate_pages = mem_cgroup_isolate_pages, 1721 .isolate_pages = mem_cgroup_isolate_pages,
1674 }; 1722 };
1675 struct zonelist *zonelist; 1723 struct zonelist *zonelist;
1676 1724
1725 if (noswap)
1726 sc.may_swap = 0;
1727
1677 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 1728 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
1678 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 1729 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
1679 zonelist = NODE_DATA(numa_node_id())->node_zonelists; 1730 zonelist = NODE_DATA(numa_node_id())->node_zonelists;
@@ -1761,7 +1812,7 @@ loop_again:
1761 * Do some background aging of the anon list, to give 1812 * Do some background aging of the anon list, to give
1762 * pages a chance to be referenced before reclaiming. 1813 * pages a chance to be referenced before reclaiming.
1763 */ 1814 */
1764 if (inactive_anon_is_low(zone)) 1815 if (inactive_anon_is_low(zone, &sc))
1765 shrink_active_list(SWAP_CLUSTER_MAX, zone, 1816 shrink_active_list(SWAP_CLUSTER_MAX, zone,
1766 &sc, priority, 0); 1817 &sc, priority, 0);
1767 1818
@@ -2404,6 +2455,7 @@ retry:
2404 2455
2405 __dec_zone_state(zone, NR_UNEVICTABLE); 2456 __dec_zone_state(zone, NR_UNEVICTABLE);
2406 list_move(&page->lru, &zone->lru[l].list); 2457 list_move(&page->lru, &zone->lru[l].list);
2458 mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
2407 __inc_zone_state(zone, NR_INACTIVE_ANON + l); 2459 __inc_zone_state(zone, NR_INACTIVE_ANON + l);
2408 __count_vm_event(UNEVICTABLE_PGRESCUED); 2460 __count_vm_event(UNEVICTABLE_PGRESCUED);
2409 } else { 2461 } else {
@@ -2412,6 +2464,7 @@ retry:
2412 */ 2464 */
2413 SetPageUnevictable(page); 2465 SetPageUnevictable(page);
2414 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list); 2466 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
2467 mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
2415 if (page_evictable(page, NULL)) 2468 if (page_evictable(page, NULL))
2416 goto retry; 2469 goto retry;
2417 } 2470 }
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index dd86a1dc4cd0..6c1323940263 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -3,46 +3,35 @@
3#include <linux/if_vlan.h> 3#include <linux/if_vlan.h>
4#include "vlan.h" 4#include "vlan.h"
5 5
6struct vlan_hwaccel_cb {
7 struct net_device *dev;
8};
9
10static inline struct vlan_hwaccel_cb *vlan_hwaccel_cb(struct sk_buff *skb)
11{
12 return (struct vlan_hwaccel_cb *)skb->cb;
13}
14
15/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ 6/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
16int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 7int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
17 u16 vlan_tci, int polling) 8 u16 vlan_tci, int polling)
18{ 9{
19 struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb); 10 if (skb_bond_should_drop(skb))
20 11 goto drop;
21 if (skb_bond_should_drop(skb)) {
22 dev_kfree_skb_any(skb);
23 return NET_RX_DROP;
24 }
25 12
26 skb->vlan_tci = vlan_tci; 13 skb->vlan_tci = vlan_tci;
27 cb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 14 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
15
16 if (!skb->dev)
17 goto drop;
28 18
29 return (polling ? netif_receive_skb(skb) : netif_rx(skb)); 19 return (polling ? netif_receive_skb(skb) : netif_rx(skb));
20
21drop:
22 dev_kfree_skb_any(skb);
23 return NET_RX_DROP;
30} 24}
31EXPORT_SYMBOL(__vlan_hwaccel_rx); 25EXPORT_SYMBOL(__vlan_hwaccel_rx);
32 26
33int vlan_hwaccel_do_receive(struct sk_buff *skb) 27int vlan_hwaccel_do_receive(struct sk_buff *skb)
34{ 28{
35 struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb); 29 struct net_device *dev = skb->dev;
36 struct net_device *dev = cb->dev;
37 struct net_device_stats *stats; 30 struct net_device_stats *stats;
38 31
32 skb->dev = vlan_dev_info(dev)->real_dev;
39 netif_nit_deliver(skb); 33 netif_nit_deliver(skb);
40 34
41 if (dev == NULL) {
42 kfree_skb(skb);
43 return -1;
44 }
45
46 skb->dev = dev; 35 skb->dev = dev;
47 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); 36 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
48 skb->vlan_tci = 0; 37 skb->vlan_tci = 0;
@@ -80,3 +69,79 @@ u16 vlan_dev_vlan_id(const struct net_device *dev)
80 return vlan_dev_info(dev)->vlan_id; 69 return vlan_dev_info(dev)->vlan_id;
81} 70}
82EXPORT_SYMBOL_GPL(vlan_dev_vlan_id); 71EXPORT_SYMBOL_GPL(vlan_dev_vlan_id);
72
73static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
74 unsigned int vlan_tci, struct sk_buff *skb)
75{
76 struct sk_buff *p;
77
78 if (skb_bond_should_drop(skb))
79 goto drop;
80
81 skb->vlan_tci = vlan_tci;
82 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
83
84 if (!skb->dev)
85 goto drop;
86
87 for (p = napi->gro_list; p; p = p->next) {
88 NAPI_GRO_CB(p)->same_flow = p->dev == skb->dev;
89 NAPI_GRO_CB(p)->flush = 0;
90 }
91
92 return dev_gro_receive(napi, skb);
93
94drop:
95 return 2;
96}
97
98int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
99 unsigned int vlan_tci, struct sk_buff *skb)
100{
101 int err = NET_RX_SUCCESS;
102
103 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
104 case -1:
105 return netif_receive_skb(skb);
106
107 case 2:
108 err = NET_RX_DROP;
109 /* fall through */
110
111 case 1:
112 kfree_skb(skb);
113 break;
114 }
115
116 return err;
117}
118EXPORT_SYMBOL(vlan_gro_receive);
119
120int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
121 unsigned int vlan_tci, struct napi_gro_fraginfo *info)
122{
123 struct sk_buff *skb = napi_fraginfo_skb(napi, info);
124 int err = NET_RX_DROP;
125
126 if (!skb)
127 goto out;
128
129 err = NET_RX_SUCCESS;
130
131 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
132 case -1:
133 return netif_receive_skb(skb);
134
135 case 2:
136 err = NET_RX_DROP;
137 /* fall through */
138
139 case 1:
140 napi_reuse_skb(napi, skb);
141 break;
142 }
143
144out:
145 return err;
146}
147EXPORT_SYMBOL(vlan_gro_frags);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 89a3bbdfca3f..4a19acd3a32b 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -546,6 +546,18 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
546 return err; 546 return err;
547} 547}
548 548
549static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
550{
551 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
552 const struct net_device_ops *ops = real_dev->netdev_ops;
553 int err = 0;
554
555 if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
556 err = ops->ndo_neigh_setup(dev, pa);
557
558 return err;
559}
560
549static void vlan_dev_change_rx_flags(struct net_device *dev, int change) 561static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
550{ 562{
551 struct net_device *real_dev = vlan_dev_info(dev)->real_dev; 563 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
@@ -713,6 +725,7 @@ static const struct net_device_ops vlan_netdev_ops = {
713 .ndo_set_multicast_list = vlan_dev_set_rx_mode, 725 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
714 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 726 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
715 .ndo_do_ioctl = vlan_dev_ioctl, 727 .ndo_do_ioctl = vlan_dev_ioctl,
728 .ndo_neigh_setup = vlan_dev_neigh_setup,
716}; 729};
717 730
718static const struct net_device_ops vlan_netdev_accel_ops = { 731static const struct net_device_ops vlan_netdev_accel_ops = {
@@ -728,6 +741,7 @@ static const struct net_device_ops vlan_netdev_accel_ops = {
728 .ndo_set_multicast_list = vlan_dev_set_rx_mode, 741 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
729 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 742 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
730 .ndo_do_ioctl = vlan_dev_ioctl, 743 .ndo_do_ioctl = vlan_dev_ioctl,
744 .ndo_neigh_setup = vlan_dev_neigh_setup,
731}; 745};
732 746
733void vlan_setup(struct net_device *dev) 747void vlan_setup(struct net_device *dev)
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index b03ff58e9308..89f99d3beb60 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -443,13 +443,14 @@ static void aarp_send_probe_phase1(struct atalk_iface *iface)
443{ 443{
444 struct ifreq atreq; 444 struct ifreq atreq;
445 struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr; 445 struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr;
446 const struct net_device_ops *ops = iface->dev->netdev_ops;
446 447
447 sa->sat_addr.s_node = iface->address.s_node; 448 sa->sat_addr.s_node = iface->address.s_node;
448 sa->sat_addr.s_net = ntohs(iface->address.s_net); 449 sa->sat_addr.s_net = ntohs(iface->address.s_net);
449 450
450 /* We pass the Net:Node to the drivers/cards by a Device ioctl. */ 451 /* We pass the Net:Node to the drivers/cards by a Device ioctl. */
451 if (!(iface->dev->do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) { 452 if (!(ops->ndo_do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) {
452 (void)iface->dev->do_ioctl(iface->dev, &atreq, SIOCGIFADDR); 453 ops->ndo_do_ioctl(iface->dev, &atreq, SIOCGIFADDR);
453 if (iface->address.s_net != htons(sa->sat_addr.s_net) || 454 if (iface->address.s_net != htons(sa->sat_addr.s_net) ||
454 iface->address.s_node != sa->sat_addr.s_node) 455 iface->address.s_node != sa->sat_addr.s_node)
455 iface->status |= ATIF_PROBE_FAIL; 456 iface->status |= ATIF_PROBE_FAIL;
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index d20f8a40f36e..0d9e506f5d5a 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -165,7 +165,6 @@ struct bnep_session {
165 165
166 struct socket *sock; 166 struct socket *sock;
167 struct net_device *dev; 167 struct net_device *dev;
168 struct net_device_stats stats;
169}; 168};
170 169
171void bnep_net_setup(struct net_device *dev); 170void bnep_net_setup(struct net_device *dev);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 70fea8bdb4e5..52a6ce0d772b 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -306,7 +306,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
306 struct sk_buff *nskb; 306 struct sk_buff *nskb;
307 u8 type; 307 u8 type;
308 308
309 s->stats.rx_bytes += skb->len; 309 dev->stats.rx_bytes += skb->len;
310 310
311 type = *(u8 *) skb->data; skb_pull(skb, 1); 311 type = *(u8 *) skb->data; skb_pull(skb, 1);
312 312
@@ -343,7 +343,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
343 * may not be modified and because of the alignment requirements. */ 343 * may not be modified and because of the alignment requirements. */
344 nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL); 344 nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL);
345 if (!nskb) { 345 if (!nskb) {
346 s->stats.rx_dropped++; 346 dev->stats.rx_dropped++;
347 kfree_skb(skb); 347 kfree_skb(skb);
348 return -ENOMEM; 348 return -ENOMEM;
349 } 349 }
@@ -378,14 +378,14 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
378 skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len); 378 skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len);
379 kfree_skb(skb); 379 kfree_skb(skb);
380 380
381 s->stats.rx_packets++; 381 dev->stats.rx_packets++;
382 nskb->ip_summed = CHECKSUM_NONE; 382 nskb->ip_summed = CHECKSUM_NONE;
383 nskb->protocol = eth_type_trans(nskb, dev); 383 nskb->protocol = eth_type_trans(nskb, dev);
384 netif_rx_ni(nskb); 384 netif_rx_ni(nskb);
385 return 0; 385 return 0;
386 386
387badframe: 387badframe:
388 s->stats.rx_errors++; 388 dev->stats.rx_errors++;
389 kfree_skb(skb); 389 kfree_skb(skb);
390 return 0; 390 return 0;
391} 391}
@@ -448,8 +448,8 @@ send:
448 kfree_skb(skb); 448 kfree_skb(skb);
449 449
450 if (len > 0) { 450 if (len > 0) {
451 s->stats.tx_bytes += len; 451 s->dev->stats.tx_bytes += len;
452 s->stats.tx_packets++; 452 s->dev->stats.tx_packets++;
453 return 0; 453 return 0;
454 } 454 }
455 455
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index f897da6e0444..d7a0e9722def 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -55,12 +55,6 @@ static int bnep_net_close(struct net_device *dev)
55 return 0; 55 return 0;
56} 56}
57 57
58static struct net_device_stats *bnep_net_get_stats(struct net_device *dev)
59{
60 struct bnep_session *s = netdev_priv(dev);
61 return &s->stats;
62}
63
64static void bnep_net_set_mc_list(struct net_device *dev) 58static void bnep_net_set_mc_list(struct net_device *dev)
65{ 59{
66#ifdef CONFIG_BT_BNEP_MC_FILTER 60#ifdef CONFIG_BT_BNEP_MC_FILTER
@@ -128,11 +122,6 @@ static void bnep_net_timeout(struct net_device *dev)
128 netif_wake_queue(dev); 122 netif_wake_queue(dev);
129} 123}
130 124
131static int bnep_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
132{
133 return -EINVAL;
134}
135
136#ifdef CONFIG_BT_BNEP_MC_FILTER 125#ifdef CONFIG_BT_BNEP_MC_FILTER
137static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) 126static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
138{ 127{
@@ -217,6 +206,18 @@ static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev)
217 return 0; 206 return 0;
218} 207}
219 208
209static const struct net_device_ops bnep_netdev_ops = {
210 .ndo_open = bnep_net_open,
211 .ndo_stop = bnep_net_close,
212 .ndo_start_xmit = bnep_net_xmit,
213 .ndo_validate_addr = eth_validate_addr,
214 .ndo_set_multicast_list = bnep_net_set_mc_list,
215 .ndo_set_mac_address = bnep_net_set_mac_addr,
216 .ndo_tx_timeout = bnep_net_timeout,
217 .ndo_change_mtu = eth_change_mtu,
218
219};
220
220void bnep_net_setup(struct net_device *dev) 221void bnep_net_setup(struct net_device *dev)
221{ 222{
222 223
@@ -224,15 +225,7 @@ void bnep_net_setup(struct net_device *dev)
224 dev->addr_len = ETH_ALEN; 225 dev->addr_len = ETH_ALEN;
225 226
226 ether_setup(dev); 227 ether_setup(dev);
227 228 dev->netdev_ops = &bnep_netdev_ops;
228 dev->open = bnep_net_open;
229 dev->stop = bnep_net_close;
230 dev->hard_start_xmit = bnep_net_xmit;
231 dev->get_stats = bnep_net_get_stats;
232 dev->do_ioctl = bnep_net_ioctl;
233 dev->set_mac_address = bnep_net_set_mac_addr;
234 dev->set_multicast_list = bnep_net_set_mc_list;
235 229
236 dev->watchdog_timeo = HZ * 2; 230 dev->watchdog_timeo = HZ * 2;
237 dev->tx_timeout = bnep_net_timeout;
238} 231}
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 3dadb338addd..fa417ca6cbe6 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -414,6 +414,12 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
414 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can 414 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
415 * filter for error frames (CAN_ERR_FLAG bit set in mask). 415 * filter for error frames (CAN_ERR_FLAG bit set in mask).
416 * 416 *
417 * The provided pointer to the sk_buff is guaranteed to be valid as long as
418 * the callback function is running. The callback function must *not* free
419 * the given sk_buff while processing it's task. When the given sk_buff is
420 * needed after the end of the callback function it must be cloned inside
421 * the callback function with skb_clone().
422 *
417 * Return: 423 * Return:
418 * 0 on success 424 * 0 on success
419 * -ENOMEM on missing cache mem to create subscription entry 425 * -ENOMEM on missing cache mem to create subscription entry
@@ -569,13 +575,8 @@ EXPORT_SYMBOL(can_rx_unregister);
569 575
570static inline void deliver(struct sk_buff *skb, struct receiver *r) 576static inline void deliver(struct sk_buff *skb, struct receiver *r)
571{ 577{
572 struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); 578 r->func(skb, r->data);
573 579 r->matches++;
574 if (clone) {
575 clone->sk = skb->sk;
576 r->func(clone, r->data);
577 r->matches++;
578 }
579} 580}
580 581
581static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) 582static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 6248ae2502c7..1649c8ab2c2f 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -633,7 +633,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
633 hrtimer_cancel(&op->timer); 633 hrtimer_cancel(&op->timer);
634 634
635 if (op->can_id != rxframe->can_id) 635 if (op->can_id != rxframe->can_id)
636 goto rx_freeskb; 636 return;
637 637
638 /* save rx timestamp */ 638 /* save rx timestamp */
639 op->rx_stamp = skb->tstamp; 639 op->rx_stamp = skb->tstamp;
@@ -645,19 +645,19 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
645 if (op->flags & RX_RTR_FRAME) { 645 if (op->flags & RX_RTR_FRAME) {
646 /* send reply for RTR-request (placed in op->frames[0]) */ 646 /* send reply for RTR-request (placed in op->frames[0]) */
647 bcm_can_tx(op); 647 bcm_can_tx(op);
648 goto rx_freeskb; 648 return;
649 } 649 }
650 650
651 if (op->flags & RX_FILTER_ID) { 651 if (op->flags & RX_FILTER_ID) {
652 /* the easiest case */ 652 /* the easiest case */
653 bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); 653 bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
654 goto rx_freeskb_starttimer; 654 goto rx_starttimer;
655 } 655 }
656 656
657 if (op->nframes == 1) { 657 if (op->nframes == 1) {
658 /* simple compare with index 0 */ 658 /* simple compare with index 0 */
659 bcm_rx_cmp_to_index(op, 0, rxframe); 659 bcm_rx_cmp_to_index(op, 0, rxframe);
660 goto rx_freeskb_starttimer; 660 goto rx_starttimer;
661 } 661 }
662 662
663 if (op->nframes > 1) { 663 if (op->nframes > 1) {
@@ -678,10 +678,8 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
678 } 678 }
679 } 679 }
680 680
681rx_freeskb_starttimer: 681rx_starttimer:
682 bcm_rx_starttimer(op); 682 bcm_rx_starttimer(op);
683rx_freeskb:
684 kfree_skb(skb);
685} 683}
686 684
687/* 685/*
diff --git a/net/can/raw.c b/net/can/raw.c
index 27aab63df467..0703cba4bf9f 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -99,13 +99,14 @@ static void raw_rcv(struct sk_buff *skb, void *data)
99 struct raw_sock *ro = raw_sk(sk); 99 struct raw_sock *ro = raw_sk(sk);
100 struct sockaddr_can *addr; 100 struct sockaddr_can *addr;
101 101
102 if (!ro->recv_own_msgs) { 102 /* check the received tx sock reference */
103 /* check the received tx sock reference */ 103 if (!ro->recv_own_msgs && skb->sk == sk)
104 if (skb->sk == sk) { 104 return;
105 kfree_skb(skb); 105
106 return; 106 /* clone the given skb to be able to enqueue it into the rcv queue */
107 } 107 skb = skb_clone(skb, GFP_ATOMIC);
108 } 108 if (!skb)
109 return;
109 110
110 /* 111 /*
111 * Put the datagram to the queue so that raw_recvmsg() can 112 * Put the datagram to the queue so that raw_recvmsg() can
diff --git a/net/core/dev.c b/net/core/dev.c
index 382df6c09eec..5f736f1ceeae 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -170,25 +170,6 @@ static DEFINE_SPINLOCK(ptype_lock);
170static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 170static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
171static struct list_head ptype_all __read_mostly; /* Taps */ 171static struct list_head ptype_all __read_mostly; /* Taps */
172 172
173#ifdef CONFIG_NET_DMA
174struct net_dma {
175 struct dma_client client;
176 spinlock_t lock;
177 cpumask_t channel_mask;
178 struct dma_chan **channels;
179};
180
181static enum dma_state_client
182netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
183 enum dma_state state);
184
185static struct net_dma net_dma = {
186 .client = {
187 .event_callback = netdev_dma_event,
188 },
189};
190#endif
191
192/* 173/*
193 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 174 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
194 * semaphore. 175 * semaphore.
@@ -2387,7 +2368,7 @@ void napi_gro_flush(struct napi_struct *napi)
2387} 2368}
2388EXPORT_SYMBOL(napi_gro_flush); 2369EXPORT_SYMBOL(napi_gro_flush);
2389 2370
2390static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2371int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2391{ 2372{
2392 struct sk_buff **pp = NULL; 2373 struct sk_buff **pp = NULL;
2393 struct packet_type *ptype; 2374 struct packet_type *ptype;
@@ -2417,11 +2398,14 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2417 2398
2418 for (p = napi->gro_list; p; p = p->next) { 2399 for (p = napi->gro_list; p; p = p->next) {
2419 count++; 2400 count++;
2420 NAPI_GRO_CB(p)->same_flow = 2401
2421 p->mac_len == mac_len && 2402 if (!NAPI_GRO_CB(p)->same_flow)
2422 !memcmp(skb_mac_header(p), skb_mac_header(skb), 2403 continue;
2423 mac_len); 2404
2424 NAPI_GRO_CB(p)->flush = 0; 2405 if (p->mac_len != mac_len ||
2406 memcmp(skb_mac_header(p), skb_mac_header(skb),
2407 mac_len))
2408 NAPI_GRO_CB(p)->same_flow = 0;
2425 } 2409 }
2426 2410
2427 pp = ptype->gro_receive(&napi->gro_list, skb); 2411 pp = ptype->gro_receive(&napi->gro_list, skb);
@@ -2463,6 +2447,19 @@ ok:
2463normal: 2447normal:
2464 return -1; 2448 return -1;
2465} 2449}
2450EXPORT_SYMBOL(dev_gro_receive);
2451
2452static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2453{
2454 struct sk_buff *p;
2455
2456 for (p = napi->gro_list; p; p = p->next) {
2457 NAPI_GRO_CB(p)->same_flow = 1;
2458 NAPI_GRO_CB(p)->flush = 0;
2459 }
2460
2461 return dev_gro_receive(napi, skb);
2462}
2466 2463
2467int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2464int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2468{ 2465{
@@ -2479,11 +2476,26 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2479} 2476}
2480EXPORT_SYMBOL(napi_gro_receive); 2477EXPORT_SYMBOL(napi_gro_receive);
2481 2478
2482int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) 2479void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2480{
2481 skb_shinfo(skb)->nr_frags = 0;
2482
2483 skb->len -= skb->data_len;
2484 skb->truesize -= skb->data_len;
2485 skb->data_len = 0;
2486
2487 __skb_pull(skb, skb_headlen(skb));
2488 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2489
2490 napi->skb = skb;
2491}
2492EXPORT_SYMBOL(napi_reuse_skb);
2493
2494struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
2495 struct napi_gro_fraginfo *info)
2483{ 2496{
2484 struct net_device *dev = napi->dev; 2497 struct net_device *dev = napi->dev;
2485 struct sk_buff *skb = napi->skb; 2498 struct sk_buff *skb = napi->skb;
2486 int err = NET_RX_DROP;
2487 2499
2488 napi->skb = NULL; 2500 napi->skb = NULL;
2489 2501
@@ -2503,16 +2515,31 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
2503 skb->len += info->len; 2515 skb->len += info->len;
2504 skb->truesize += info->len; 2516 skb->truesize += info->len;
2505 2517
2506 if (!pskb_may_pull(skb, ETH_HLEN)) 2518 if (!pskb_may_pull(skb, ETH_HLEN)) {
2507 goto reuse; 2519 napi_reuse_skb(napi, skb);
2508 2520 goto out;
2509 err = NET_RX_SUCCESS; 2521 }
2510 2522
2511 skb->protocol = eth_type_trans(skb, dev); 2523 skb->protocol = eth_type_trans(skb, dev);
2512 2524
2513 skb->ip_summed = info->ip_summed; 2525 skb->ip_summed = info->ip_summed;
2514 skb->csum = info->csum; 2526 skb->csum = info->csum;
2515 2527
2528out:
2529 return skb;
2530}
2531EXPORT_SYMBOL(napi_fraginfo_skb);
2532
2533int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
2534{
2535 struct sk_buff *skb = napi_fraginfo_skb(napi, info);
2536 int err = NET_RX_DROP;
2537
2538 if (!skb)
2539 goto out;
2540
2541 err = NET_RX_SUCCESS;
2542
2516 switch (__napi_gro_receive(napi, skb)) { 2543 switch (__napi_gro_receive(napi, skb)) {
2517 case -1: 2544 case -1:
2518 return netif_receive_skb(skb); 2545 return netif_receive_skb(skb);
@@ -2521,17 +2548,7 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
2521 goto out; 2548 goto out;
2522 } 2549 }
2523 2550
2524reuse: 2551 napi_reuse_skb(napi, skb);
2525 skb_shinfo(skb)->nr_frags = 0;
2526
2527 skb->len -= skb->data_len;
2528 skb->truesize -= skb->data_len;
2529 skb->data_len = 0;
2530
2531 __skb_pull(skb, skb_headlen(skb));
2532 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2533
2534 napi->skb = skb;
2535 2552
2536out: 2553out:
2537 return err; 2554 return err;
@@ -2718,14 +2735,7 @@ out:
2718 * There may not be any more sk_buffs coming right now, so push 2735 * There may not be any more sk_buffs coming right now, so push
2719 * any pending DMA copies to hardware 2736 * any pending DMA copies to hardware
2720 */ 2737 */
2721 if (!cpus_empty(net_dma.channel_mask)) { 2738 dma_issue_pending_all();
2722 int chan_idx;
2723 for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
2724 struct dma_chan *chan = net_dma.channels[chan_idx];
2725 if (chan)
2726 dma_async_memcpy_issue_pending(chan);
2727 }
2728 }
2729#endif 2739#endif
2730 2740
2731 return; 2741 return;
@@ -4916,122 +4926,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
4916 return NOTIFY_OK; 4926 return NOTIFY_OK;
4917} 4927}
4918 4928
4919#ifdef CONFIG_NET_DMA
4920/**
4921 * net_dma_rebalance - try to maintain one DMA channel per CPU
4922 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4923 *
4924 * This is called when the number of channels allocated to the net_dma client
4925 * changes. The net_dma client tries to have one DMA channel per CPU.
4926 */
4927
4928static void net_dma_rebalance(struct net_dma *net_dma)
4929{
4930 unsigned int cpu, i, n, chan_idx;
4931 struct dma_chan *chan;
4932
4933 if (cpus_empty(net_dma->channel_mask)) {
4934 for_each_online_cpu(cpu)
4935 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
4936 return;
4937 }
4938
4939 i = 0;
4940 cpu = first_cpu(cpu_online_map);
4941
4942 for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
4943 chan = net_dma->channels[chan_idx];
4944
4945 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4946 + (i < (num_online_cpus() %
4947 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
4948
4949 while(n) {
4950 per_cpu(softnet_data, cpu).net_dma = chan;
4951 cpu = next_cpu(cpu, cpu_online_map);
4952 n--;
4953 }
4954 i++;
4955 }
4956}
4957
4958/**
4959 * netdev_dma_event - event callback for the net_dma_client
4960 * @client: should always be net_dma_client
4961 * @chan: DMA channel for the event
4962 * @state: DMA state to be handled
4963 */
4964static enum dma_state_client
4965netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4966 enum dma_state state)
4967{
4968 int i, found = 0, pos = -1;
4969 struct net_dma *net_dma =
4970 container_of(client, struct net_dma, client);
4971 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4972
4973 spin_lock(&net_dma->lock);
4974 switch (state) {
4975 case DMA_RESOURCE_AVAILABLE:
4976 for (i = 0; i < nr_cpu_ids; i++)
4977 if (net_dma->channels[i] == chan) {
4978 found = 1;
4979 break;
4980 } else if (net_dma->channels[i] == NULL && pos < 0)
4981 pos = i;
4982
4983 if (!found && pos >= 0) {
4984 ack = DMA_ACK;
4985 net_dma->channels[pos] = chan;
4986 cpu_set(pos, net_dma->channel_mask);
4987 net_dma_rebalance(net_dma);
4988 }
4989 break;
4990 case DMA_RESOURCE_REMOVED:
4991 for (i = 0; i < nr_cpu_ids; i++)
4992 if (net_dma->channels[i] == chan) {
4993 found = 1;
4994 pos = i;
4995 break;
4996 }
4997
4998 if (found) {
4999 ack = DMA_ACK;
5000 cpu_clear(pos, net_dma->channel_mask);
5001 net_dma->channels[i] = NULL;
5002 net_dma_rebalance(net_dma);
5003 }
5004 break;
5005 default:
5006 break;
5007 }
5008 spin_unlock(&net_dma->lock);
5009
5010 return ack;
5011}
5012
5013/**
5014 * netdev_dma_register - register the networking subsystem as a DMA client
5015 */
5016static int __init netdev_dma_register(void)
5017{
5018 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
5019 GFP_KERNEL);
5020 if (unlikely(!net_dma.channels)) {
5021 printk(KERN_NOTICE
5022 "netdev_dma: no memory for net_dma.channels\n");
5023 return -ENOMEM;
5024 }
5025 spin_lock_init(&net_dma.lock);
5026 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
5027 dma_async_client_register(&net_dma.client);
5028 dma_async_client_chan_request(&net_dma.client);
5029 return 0;
5030}
5031
5032#else
5033static int __init netdev_dma_register(void) { return -ENODEV; }
5034#endif /* CONFIG_NET_DMA */
5035 4929
5036/** 4930/**
5037 * netdev_increment_features - increment feature set by one 4931 * netdev_increment_features - increment feature set by one
@@ -5251,14 +5145,15 @@ static int __init net_dev_init(void)
5251 if (register_pernet_device(&default_device_ops)) 5145 if (register_pernet_device(&default_device_ops))
5252 goto out; 5146 goto out;
5253 5147
5254 netdev_dma_register();
5255
5256 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 5148 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5257 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 5149 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
5258 5150
5259 hotcpu_notifier(dev_cpu_callback, 0); 5151 hotcpu_notifier(dev_cpu_callback, 0);
5260 dst_init(); 5152 dst_init();
5261 dev_mcast_init(); 5153 dev_mcast_init();
5154 #ifdef CONFIG_NET_DMA
5155 dmaengine_get();
5156 #endif
5262 rc = 0; 5157 rc = 0;
5263out: 5158out:
5264 return rc; 5159 return rc;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index a3a410d20da0..a68fd79e9eca 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -286,6 +286,42 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
286 .get_sset_count = dsa_slave_get_sset_count, 286 .get_sset_count = dsa_slave_get_sset_count,
287}; 287};
288 288
289#ifdef CONFIG_NET_DSA_TAG_DSA
290static const struct net_device_ops dsa_netdev_ops = {
291 .ndo_open = dsa_slave_open,
292 .ndo_stop = dsa_slave_close,
293 .ndo_start_xmit = dsa_xmit,
294 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
295 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
296 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
297 .ndo_set_mac_address = dsa_slave_set_mac_address,
298 .ndo_do_ioctl = dsa_slave_ioctl,
299};
300#endif
301#ifdef CONFIG_NET_DSA_TAG_EDSA
302static const struct net_device_ops edsa_netdev_ops = {
303 .ndo_open = dsa_slave_open,
304 .ndo_stop = dsa_slave_close,
305 .ndo_start_xmit = edsa_xmit,
306 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
307 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
308 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
309 .ndo_set_mac_address = dsa_slave_set_mac_address,
310 .ndo_do_ioctl = dsa_slave_ioctl,
311};
312#endif
313#ifdef CONFIG_NET_DSA_TAG_TRAILER
314static const struct net_device_ops trailer_netdev_ops = {
315 .ndo_open = dsa_slave_open,
316 .ndo_stop = dsa_slave_close,
317 .ndo_start_xmit = trailer_xmit,
318 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
319 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
320 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
321 .ndo_set_mac_address = dsa_slave_set_mac_address,
322 .ndo_do_ioctl = dsa_slave_ioctl,
323};
324#endif
289 325
290/* slave device setup *******************************************************/ 326/* slave device setup *******************************************************/
291struct net_device * 327struct net_device *
@@ -306,32 +342,27 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
306 SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops); 342 SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops);
307 memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN); 343 memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN);
308 slave_dev->tx_queue_len = 0; 344 slave_dev->tx_queue_len = 0;
345
309 switch (ds->tag_protocol) { 346 switch (ds->tag_protocol) {
310#ifdef CONFIG_NET_DSA_TAG_DSA 347#ifdef CONFIG_NET_DSA_TAG_DSA
311 case htons(ETH_P_DSA): 348 case htons(ETH_P_DSA):
312 slave_dev->hard_start_xmit = dsa_xmit; 349 slave_dev->netdev_ops = &dsa_netdev_ops;
313 break; 350 break;
314#endif 351#endif
315#ifdef CONFIG_NET_DSA_TAG_EDSA 352#ifdef CONFIG_NET_DSA_TAG_EDSA
316 case htons(ETH_P_EDSA): 353 case htons(ETH_P_EDSA):
317 slave_dev->hard_start_xmit = edsa_xmit; 354 slave_dev->netdev_ops = &edsa_netdev_ops;
318 break; 355 break;
319#endif 356#endif
320#ifdef CONFIG_NET_DSA_TAG_TRAILER 357#ifdef CONFIG_NET_DSA_TAG_TRAILER
321 case htons(ETH_P_TRAILER): 358 case htons(ETH_P_TRAILER):
322 slave_dev->hard_start_xmit = trailer_xmit; 359 slave_dev->netdev_ops = &trailer_netdev_ops;
323 break; 360 break;
324#endif 361#endif
325 default: 362 default:
326 BUG(); 363 BUG();
327 } 364 }
328 slave_dev->open = dsa_slave_open; 365
329 slave_dev->stop = dsa_slave_close;
330 slave_dev->change_rx_flags = dsa_slave_change_rx_flags;
331 slave_dev->set_rx_mode = dsa_slave_set_rx_mode;
332 slave_dev->set_multicast_list = dsa_slave_set_rx_mode;
333 slave_dev->set_mac_address = dsa_slave_set_mac_address;
334 slave_dev->do_ioctl = dsa_slave_ioctl;
335 SET_NETDEV_DEV(slave_dev, parent); 366 SET_NETDEV_DEV(slave_dev, parent);
336 slave_dev->vlan_features = master->vlan_features; 367 slave_dev->vlan_features = master->vlan_features;
337 368
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 35bcddf8a932..ce572f9dff02 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1313,7 +1313,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1313 if ((available < target) && 1313 if ((available < target) &&
1314 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1314 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1315 !sysctl_tcp_low_latency && 1315 !sysctl_tcp_low_latency &&
1316 __get_cpu_var(softnet_data).net_dma) { 1316 dma_find_channel(DMA_MEMCPY)) {
1317 preempt_enable_no_resched(); 1317 preempt_enable_no_resched();
1318 tp->ucopy.pinned_list = 1318 tp->ucopy.pinned_list =
1319 dma_pin_iovec_pages(msg->msg_iov, len); 1319 dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1523,7 +1523,7 @@ do_prequeue:
1523 if (!(flags & MSG_TRUNC)) { 1523 if (!(flags & MSG_TRUNC)) {
1524#ifdef CONFIG_NET_DMA 1524#ifdef CONFIG_NET_DMA
1525 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1525 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1526 tp->ucopy.dma_chan = get_softnet_dma(); 1526 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1527 1527
1528 if (tp->ucopy.dma_chan) { 1528 if (tp->ucopy.dma_chan) {
1529 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( 1529 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
@@ -1628,7 +1628,6 @@ skip_copy:
1628 1628
1629 /* Safe to free early-copied skbs now */ 1629 /* Safe to free early-copied skbs now */
1630 __skb_queue_purge(&sk->sk_async_wait_queue); 1630 __skb_queue_purge(&sk->sk_async_wait_queue);
1631 dma_chan_put(tp->ucopy.dma_chan);
1632 tp->ucopy.dma_chan = NULL; 1631 tp->ucopy.dma_chan = NULL;
1633 } 1632 }
1634 if (tp->ucopy.pinned_list) { 1633 if (tp->ucopy.pinned_list) {
@@ -2542,6 +2541,7 @@ out:
2542 2541
2543 return pp; 2542 return pp;
2544} 2543}
2544EXPORT_SYMBOL(tcp_gro_receive);
2545 2545
2546int tcp_gro_complete(struct sk_buff *skb) 2546int tcp_gro_complete(struct sk_buff *skb)
2547{ 2547{
@@ -2558,6 +2558,7 @@ int tcp_gro_complete(struct sk_buff *skb)
2558 2558
2559 return 0; 2559 return 0;
2560} 2560}
2561EXPORT_SYMBOL(tcp_gro_complete);
2561 2562
2562#ifdef CONFIG_TCP_MD5SIG 2563#ifdef CONFIG_TCP_MD5SIG
2563static unsigned long tcp_md5sig_users; 2564static unsigned long tcp_md5sig_users;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 99b7ecbe8893..a6961d75c7ea 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5005,7 +5005,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
5005 return 0; 5005 return 0;
5006 5006
5007 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 5007 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
5008 tp->ucopy.dma_chan = get_softnet_dma(); 5008 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
5009 5009
5010 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { 5010 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
5011 5011
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9d839fa9331e..19d7b429a262 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1594,7 +1594,7 @@ process:
1594#ifdef CONFIG_NET_DMA 1594#ifdef CONFIG_NET_DMA
1595 struct tcp_sock *tp = tcp_sk(sk); 1595 struct tcp_sock *tp = tcp_sk(sk);
1596 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1596 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1597 tp->ucopy.dma_chan = get_softnet_dma(); 1597 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1598 if (tp->ucopy.dma_chan) 1598 if (tp->ucopy.dma_chan)
1599 ret = tcp_v4_do_rcv(sk, skb); 1599 ret = tcp_v4_do_rcv(sk, skb);
1600 else 1600 else
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 437b750b98fd..94f74f5b0cbf 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -672,8 +672,7 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
672 672
673EXPORT_SYMBOL_GPL(ipv6_opt_accepted); 673EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
674 674
675static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb, 675static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
676 int proto)
677{ 676{
678 struct inet6_protocol *ops = NULL; 677 struct inet6_protocol *ops = NULL;
679 678
@@ -704,7 +703,7 @@ static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb,
704 __skb_pull(skb, len); 703 __skb_pull(skb, len);
705 } 704 }
706 705
707 return ops; 706 return proto;
708} 707}
709 708
710static int ipv6_gso_send_check(struct sk_buff *skb) 709static int ipv6_gso_send_check(struct sk_buff *skb)
@@ -721,7 +720,9 @@ static int ipv6_gso_send_check(struct sk_buff *skb)
721 err = -EPROTONOSUPPORT; 720 err = -EPROTONOSUPPORT;
722 721
723 rcu_read_lock(); 722 rcu_read_lock();
724 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); 723 ops = rcu_dereference(inet6_protos[
724 ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
725
725 if (likely(ops && ops->gso_send_check)) { 726 if (likely(ops && ops->gso_send_check)) {
726 skb_reset_transport_header(skb); 727 skb_reset_transport_header(skb);
727 err = ops->gso_send_check(skb); 728 err = ops->gso_send_check(skb);
@@ -757,7 +758,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
757 segs = ERR_PTR(-EPROTONOSUPPORT); 758 segs = ERR_PTR(-EPROTONOSUPPORT);
758 759
759 rcu_read_lock(); 760 rcu_read_lock();
760 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); 761 ops = rcu_dereference(inet6_protos[
762 ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
763
761 if (likely(ops && ops->gso_segment)) { 764 if (likely(ops && ops->gso_segment)) {
762 skb_reset_transport_header(skb); 765 skb_reset_transport_header(skb);
763 segs = ops->gso_segment(skb, features); 766 segs = ops->gso_segment(skb, features);
@@ -777,11 +780,105 @@ out:
777 return segs; 780 return segs;
778} 781}
779 782
783struct ipv6_gro_cb {
784 struct napi_gro_cb napi;
785 int proto;
786};
787
788#define IPV6_GRO_CB(skb) ((struct ipv6_gro_cb *)(skb)->cb)
789
790static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
791 struct sk_buff *skb)
792{
793 struct inet6_protocol *ops;
794 struct sk_buff **pp = NULL;
795 struct sk_buff *p;
796 struct ipv6hdr *iph;
797 unsigned int nlen;
798 int flush = 1;
799 int proto;
800
801 if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
802 goto out;
803
804 iph = ipv6_hdr(skb);
805 __skb_pull(skb, sizeof(*iph));
806
807 flush += ntohs(iph->payload_len) != skb->len;
808
809 rcu_read_lock();
810 proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr);
811 IPV6_GRO_CB(skb)->proto = proto;
812 ops = rcu_dereference(inet6_protos[proto]);
813 if (!ops || !ops->gro_receive)
814 goto out_unlock;
815
816 flush--;
817 skb_reset_transport_header(skb);
818 nlen = skb_network_header_len(skb);
819
820 for (p = *head; p; p = p->next) {
821 struct ipv6hdr *iph2;
822
823 if (!NAPI_GRO_CB(p)->same_flow)
824 continue;
825
826 iph2 = ipv6_hdr(p);
827
828 /* All fields must match except length. */
829 if (nlen != skb_network_header_len(p) ||
830 memcmp(iph, iph2, offsetof(struct ipv6hdr, payload_len)) ||
831 memcmp(&iph->nexthdr, &iph2->nexthdr,
832 nlen - offsetof(struct ipv6hdr, nexthdr))) {
833 NAPI_GRO_CB(p)->same_flow = 0;
834 continue;
835 }
836
837 NAPI_GRO_CB(p)->flush |= flush;
838 }
839
840 NAPI_GRO_CB(skb)->flush |= flush;
841
842 pp = ops->gro_receive(head, skb);
843
844out_unlock:
845 rcu_read_unlock();
846
847out:
848 NAPI_GRO_CB(skb)->flush |= flush;
849
850 return pp;
851}
852
853static int ipv6_gro_complete(struct sk_buff *skb)
854{
855 struct inet6_protocol *ops;
856 struct ipv6hdr *iph = ipv6_hdr(skb);
857 int err = -ENOSYS;
858
859 iph->payload_len = htons(skb->len - skb_network_offset(skb) -
860 sizeof(*iph));
861
862 rcu_read_lock();
863 ops = rcu_dereference(inet6_protos[IPV6_GRO_CB(skb)->proto]);
864 if (WARN_ON(!ops || !ops->gro_complete))
865 goto out_unlock;
866
867 err = ops->gro_complete(skb);
868
869out_unlock:
870 rcu_read_unlock();
871
872 return err;
873}
874
780static struct packet_type ipv6_packet_type = { 875static struct packet_type ipv6_packet_type = {
781 .type = __constant_htons(ETH_P_IPV6), 876 .type = __constant_htons(ETH_P_IPV6),
782 .func = ipv6_rcv, 877 .func = ipv6_rcv,
783 .gso_send_check = ipv6_gso_send_check, 878 .gso_send_check = ipv6_gso_send_check,
784 .gso_segment = ipv6_gso_segment, 879 .gso_segment = ipv6_gso_segment,
880 .gro_receive = ipv6_gro_receive,
881 .gro_complete = ipv6_gro_complete,
785}; 882};
786 883
787static int __init ipv6_packet_init(void) 884static int __init ipv6_packet_init(void)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 76f06b94ab9f..c4a59824ac2c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2752,7 +2752,7 @@ int __init ip6_route_init(void)
2752 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, 2752 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
2753 SLAB_HWCACHE_ALIGN, NULL); 2753 SLAB_HWCACHE_ALIGN, NULL);
2754 if (!ip6_dst_ops_template.kmem_cachep) 2754 if (!ip6_dst_ops_template.kmem_cachep)
2755 goto out;; 2755 goto out;
2756 2756
2757 ret = register_pernet_subsys(&ip6_route_net_ops); 2757 ret = register_pernet_subsys(&ip6_route_net_ops);
2758 if (ret) 2758 if (ret)
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 9048fe7e7ea7..a031034720b4 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -128,7 +128,7 @@ static struct ctl_table_header *ip6_header;
128 128
129int ipv6_sysctl_register(void) 129int ipv6_sysctl_register(void)
130{ 130{
131 int err = -ENOMEM;; 131 int err = -ENOMEM;
132 132
133 ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table); 133 ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table);
134 if (ip6_header == NULL) 134 if (ip6_header == NULL)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index e8b8337a8310..e5b85d45bee8 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -101,7 +101,7 @@ static void tcp_v6_hash(struct sock *sk)
101 } 101 }
102} 102}
103 103
104static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len, 104static __inline__ __sum16 tcp_v6_check(int len,
105 struct in6_addr *saddr, 105 struct in6_addr *saddr,
106 struct in6_addr *daddr, 106 struct in6_addr *daddr,
107 __wsum base) 107 __wsum base)
@@ -501,7 +501,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
501 if (skb) { 501 if (skb) {
502 struct tcphdr *th = tcp_hdr(skb); 502 struct tcphdr *th = tcp_hdr(skb);
503 503
504 th->check = tcp_v6_check(th, skb->len, 504 th->check = tcp_v6_check(skb->len,
505 &treq->loc_addr, &treq->rmt_addr, 505 &treq->loc_addr, &treq->rmt_addr,
506 csum_partial(th, skb->len, skb->csum)); 506 csum_partial(th, skb->len, skb->csum));
507 507
@@ -942,6 +942,41 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
942 return 0; 942 return 0;
943} 943}
944 944
945struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb)
946{
947 struct ipv6hdr *iph = ipv6_hdr(skb);
948
949 switch (skb->ip_summed) {
950 case CHECKSUM_COMPLETE:
951 if (!tcp_v6_check(skb->len, &iph->saddr, &iph->daddr,
952 skb->csum)) {
953 skb->ip_summed = CHECKSUM_UNNECESSARY;
954 break;
955 }
956
957 /* fall through */
958 case CHECKSUM_NONE:
959 NAPI_GRO_CB(skb)->flush = 1;
960 return NULL;
961 }
962
963 return tcp_gro_receive(head, skb);
964}
965EXPORT_SYMBOL(tcp6_gro_receive);
966
967int tcp6_gro_complete(struct sk_buff *skb)
968{
969 struct ipv6hdr *iph = ipv6_hdr(skb);
970 struct tcphdr *th = tcp_hdr(skb);
971
972 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
973 &iph->saddr, &iph->daddr, 0);
974 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
975
976 return tcp_gro_complete(skb);
977}
978EXPORT_SYMBOL(tcp6_gro_complete);
979
945static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, 980static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
946 u32 ts, struct tcp_md5sig_key *key, int rst) 981 u32 ts, struct tcp_md5sig_key *key, int rst)
947{ 982{
@@ -1429,14 +1464,14 @@ out:
1429static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) 1464static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1430{ 1465{
1431 if (skb->ip_summed == CHECKSUM_COMPLETE) { 1466 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1432 if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr, 1467 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1433 &ipv6_hdr(skb)->daddr, skb->csum)) { 1468 &ipv6_hdr(skb)->daddr, skb->csum)) {
1434 skb->ip_summed = CHECKSUM_UNNECESSARY; 1469 skb->ip_summed = CHECKSUM_UNNECESSARY;
1435 return 0; 1470 return 0;
1436 } 1471 }
1437 } 1472 }
1438 1473
1439 skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len, 1474 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1440 &ipv6_hdr(skb)->saddr, 1475 &ipv6_hdr(skb)->saddr,
1441 &ipv6_hdr(skb)->daddr, 0)); 1476 &ipv6_hdr(skb)->daddr, 0));
1442 1477
@@ -1640,7 +1675,7 @@ process:
1640#ifdef CONFIG_NET_DMA 1675#ifdef CONFIG_NET_DMA
1641 struct tcp_sock *tp = tcp_sk(sk); 1676 struct tcp_sock *tp = tcp_sk(sk);
1642 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1677 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1643 tp->ucopy.dma_chan = get_softnet_dma(); 1678 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1644 if (tp->ucopy.dma_chan) 1679 if (tp->ucopy.dma_chan)
1645 ret = tcp_v6_do_rcv(sk, skb); 1680 ret = tcp_v6_do_rcv(sk, skb);
1646 else 1681 else
@@ -2062,6 +2097,8 @@ static struct inet6_protocol tcpv6_protocol = {
2062 .err_handler = tcp_v6_err, 2097 .err_handler = tcp_v6_err,
2063 .gso_send_check = tcp_v6_gso_send_check, 2098 .gso_send_check = tcp_v6_gso_send_check,
2064 .gso_segment = tcp_tso_segment, 2099 .gso_segment = tcp_tso_segment,
2100 .gro_receive = tcp6_gro_receive,
2101 .gro_complete = tcp6_gro_complete,
2065 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 2102 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2066}; 2103};
2067 2104
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index b0ceac2d6cd1..6a91a32a80c1 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -227,6 +227,13 @@ static int gprs_set_mtu(struct net_device *dev, int new_mtu)
227 return 0; 227 return 0;
228} 228}
229 229
230static const struct net_device_ops gprs_netdev_ops = {
231 .ndo_open = gprs_open,
232 .ndo_stop = gprs_close,
233 .ndo_start_xmit = gprs_xmit,
234 .ndo_change_mtu = gprs_set_mtu,
235};
236
230static void gprs_setup(struct net_device *dev) 237static void gprs_setup(struct net_device *dev)
231{ 238{
232 dev->features = NETIF_F_FRAGLIST; 239 dev->features = NETIF_F_FRAGLIST;
@@ -237,11 +244,8 @@ static void gprs_setup(struct net_device *dev)
237 dev->addr_len = 0; 244 dev->addr_len = 0;
238 dev->tx_queue_len = 10; 245 dev->tx_queue_len = 10;
239 246
247 dev->netdev_ops = &gprs_netdev_ops;
240 dev->destructor = free_netdev; 248 dev->destructor = free_netdev;
241 dev->open = gprs_open;
242 dev->stop = gprs_close;
243 dev->hard_start_xmit = gprs_xmit; /* mandatory */
244 dev->change_mtu = gprs_set_mtu;
245} 249}
246 250
247/* 251/*
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index f3965df00559..33133d27b539 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -435,7 +435,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
435 int i; 435 int i;
436 436
437 q->perturb_timer.function = sfq_perturbation; 437 q->perturb_timer.function = sfq_perturbation;
438 q->perturb_timer.data = (unsigned long)sch;; 438 q->perturb_timer.data = (unsigned long)sch;
439 init_timer_deferrable(&q->perturb_timer); 439 init_timer_deferrable(&q->perturb_timer);
440 440
441 for (i = 0; i < SFQ_HASH_DIVISOR; i++) 441 for (i = 0; i < SFQ_HASH_DIVISOR; i++)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index cfc8e7caba62..ec697cebb63b 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -289,9 +289,9 @@ restart:
289 289
290 do { 290 do {
291 struct net_device *slave = qdisc_dev(q); 291 struct net_device *slave = qdisc_dev(q);
292 struct netdev_queue *slave_txq; 292 struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0);
293 const struct net_device_ops *slave_ops = slave->netdev_ops;
293 294
294 slave_txq = netdev_get_tx_queue(slave, 0);
295 if (slave_txq->qdisc_sleeping != q) 295 if (slave_txq->qdisc_sleeping != q)
296 continue; 296 continue;
297 if (__netif_subqueue_stopped(slave, subq) || 297 if (__netif_subqueue_stopped(slave, subq) ||
@@ -305,7 +305,7 @@ restart:
305 if (__netif_tx_trylock(slave_txq)) { 305 if (__netif_tx_trylock(slave_txq)) {
306 if (!netif_tx_queue_stopped(slave_txq) && 306 if (!netif_tx_queue_stopped(slave_txq) &&
307 !netif_tx_queue_frozen(slave_txq) && 307 !netif_tx_queue_frozen(slave_txq) &&
308 slave->hard_start_xmit(skb, slave) == 0) { 308 slave_ops->ndo_start_xmit(skb, slave) == 0) {
309 __netif_tx_unlock(slave_txq); 309 __netif_tx_unlock(slave_txq);
310 master->slaves = NEXT_SLAVE(q); 310 master->slaves = NEXT_SLAVE(q);
311 netif_wake_queue(dev); 311 netif_wake_queue(dev);
@@ -420,6 +420,14 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
420 return 0; 420 return 0;
421} 421}
422 422
423static const struct net_device_ops teql_netdev_ops = {
424 .ndo_open = teql_master_open,
425 .ndo_stop = teql_master_close,
426 .ndo_start_xmit = teql_master_xmit,
427 .ndo_get_stats = teql_master_stats,
428 .ndo_change_mtu = teql_master_mtu,
429};
430
423static __init void teql_master_setup(struct net_device *dev) 431static __init void teql_master_setup(struct net_device *dev)
424{ 432{
425 struct teql_master *master = netdev_priv(dev); 433 struct teql_master *master = netdev_priv(dev);
@@ -436,11 +444,7 @@ static __init void teql_master_setup(struct net_device *dev)
436 ops->destroy = teql_destroy; 444 ops->destroy = teql_destroy;
437 ops->owner = THIS_MODULE; 445 ops->owner = THIS_MODULE;
438 446
439 dev->open = teql_master_open; 447 dev->netdev_ops = &teql_netdev_ops;
440 dev->hard_start_xmit = teql_master_xmit;
441 dev->stop = teql_master_close;
442 dev->get_stats = teql_master_stats;
443 dev->change_mtu = teql_master_mtu;
444 dev->type = ARPHRD_VOID; 448 dev->type = ARPHRD_VOID;
445 dev->mtu = 1500; 449 dev->mtu = 1500;
446 dev->tx_queue_len = 100; 450 dev->tx_queue_len = 100;
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 20c576f530fa..56935bbc1496 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -489,7 +489,7 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
489 return 0; 489 return 0;
490 490
491out_err: 491out_err:
492 /* Clean up any successfull allocations */ 492 /* Clean up any successful allocations */
493 sctp_auth_destroy_hmacs(ep->auth_hmacs); 493 sctp_auth_destroy_hmacs(ep->auth_hmacs);
494 return -ENOMEM; 494 return -ENOMEM;
495} 495}
diff --git a/net/wimax/Kconfig b/net/wimax/Kconfig
index 0bdbb6928205..18495cdcd10d 100644
--- a/net/wimax/Kconfig
+++ b/net/wimax/Kconfig
@@ -1,9 +1,23 @@
1# 1#
2# WiMAX LAN device configuration 2# WiMAX LAN device configuration
3# 3#
4# Note the ugly 'depends on' on WIMAX: that disallows RFKILL to be a
5# module if WIMAX is to be linked in. The WiMAX code is done in such a
6# way that it doesn't require and explicit dependency on RFKILL in
7# case an embedded system wants to rip it out.
8#
9# As well, enablement of the RFKILL code means we need the INPUT layer
10# support to inject events coming from hw rfkill switches. That
11# dependency could be killed if input.h provided appropiate means to
12# work when input is disabled.
13
14comment "WiMAX Wireless Broadband support requires CONFIG_INPUT enabled"
15 depends on INPUT = n && RFKILL != n
4 16
5menuconfig WIMAX 17menuconfig WIMAX
6 tristate "WiMAX Wireless Broadband support" 18 tristate "WiMAX Wireless Broadband support"
19 depends on (y && RFKILL != m) || m
20 depends on (INPUT && RFKILL != n) || RFKILL = n
7 help 21 help
8 22
9 Select to configure support for devices that provide 23 Select to configure support for devices that provide
diff --git a/net/wimax/id-table.c b/net/wimax/id-table.c
index d3b88558682c..5e685f7eda90 100644
--- a/net/wimax/id-table.c
+++ b/net/wimax/id-table.c
@@ -123,15 +123,17 @@ void wimax_id_table_rm(struct wimax_dev *wimax_dev)
123/* 123/*
124 * Release the gennetlink family id / mapping table 124 * Release the gennetlink family id / mapping table
125 * 125 *
126 * On debug, verify that the table is empty upon removal. 126 * On debug, verify that the table is empty upon removal. We want the
127 * code always compiled, to ensure it doesn't bit rot. It will be
128 * compiled out if CONFIG_BUG is disabled.
127 */ 129 */
128void wimax_id_table_release(void) 130void wimax_id_table_release(void)
129{ 131{
132 struct wimax_dev *wimax_dev;
133
130#ifndef CONFIG_BUG 134#ifndef CONFIG_BUG
131 return; 135 return;
132#endif 136#endif
133 struct wimax_dev *wimax_dev;
134
135 spin_lock(&wimax_id_table_lock); 137 spin_lock(&wimax_id_table_lock);
136 list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) { 138 list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
137 printk(KERN_ERR "BUG: %s wimax_dev %p ifindex %d not cleared\n", 139 printk(KERN_ERR "BUG: %s wimax_dev %p ifindex %d not cleared\n",
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index 8745bac173f1..2b75aee04217 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -71,7 +71,7 @@
71#define D_SUBMODULE op_rfkill 71#define D_SUBMODULE op_rfkill
72#include "debug-levels.h" 72#include "debug-levels.h"
73 73
74#ifdef CONFIG_RFKILL 74#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
75 75
76 76
77/** 77/**
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index e49a2d1ef1e4..cb6a5bb85d80 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -1055,8 +1055,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
1055 return private(dev, iwr, cmd, info, handler); 1055 return private(dev, iwr, cmd, info, handler);
1056 } 1056 }
1057 /* Old driver API : call driver ioctl handler */ 1057 /* Old driver API : call driver ioctl handler */
1058 if (dev->do_ioctl) 1058 if (dev->netdev_ops->ndo_do_ioctl)
1059 return dev->do_ioctl(dev, ifr, cmd); 1059 return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
1060 return -EOPNOTSUPP; 1060 return -EOPNOTSUPP;
1061} 1061}
1062 1062
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 5ba78701adc3..3aacd0fe7179 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -513,11 +513,14 @@ int devcgroup_inode_mknod(int mode, dev_t dev)
513 struct dev_cgroup *dev_cgroup; 513 struct dev_cgroup *dev_cgroup;
514 struct dev_whitelist_item *wh; 514 struct dev_whitelist_item *wh;
515 515
516 if (!S_ISBLK(mode) && !S_ISCHR(mode))
517 return 0;
518
516 rcu_read_lock(); 519 rcu_read_lock();
517 520
518 dev_cgroup = task_devcgroup(current); 521 dev_cgroup = task_devcgroup(current);
519 522
520 list_for_each_entry(wh, &dev_cgroup->whitelist, list) { 523 list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) {
521 if (wh->type & DEV_ALL) 524 if (wh->type & DEV_ALL)
522 goto acc_check; 525 goto acc_check;
523 if ((wh->type & DEV_BLOCK) && !S_ISBLK(mode)) 526 if ((wh->type & DEV_BLOCK) && !S_ISBLK(mode))
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index bf107a389ac1..71e2b914363e 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -569,7 +569,7 @@ static ssize_t smk_write_cipso(struct file *file, const char __user *buf,
569 if (skp == NULL) 569 if (skp == NULL)
570 goto out; 570 goto out;
571 571
572 rule += SMK_LABELLEN;; 572 rule += SMK_LABELLEN;
573 ret = sscanf(rule, "%d", &maplevel); 573 ret = sscanf(rule, "%d", &maplevel);
574 if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL) 574 if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL)
575 goto out; 575 goto out;
diff --git a/sound/soc/au1x/dbdma2.c b/sound/soc/au1x/dbdma2.c
index 74c823d60f91..bc8d654576c0 100644
--- a/sound/soc/au1x/dbdma2.c
+++ b/sound/soc/au1x/dbdma2.c
@@ -187,7 +187,7 @@ static int au1x_pcm_dbdma_realloc(struct au1xpsc_audio_dmadata *pcd,
187 au1x_pcm_dmatx_cb, (void *)pcd); 187 au1x_pcm_dmatx_cb, (void *)pcd);
188 188
189 if (!pcd->ddma_chan) 189 if (!pcd->ddma_chan)
190 return -ENOMEM;; 190 return -ENOMEM;
191 191
192 au1xxx_dbdma_set_devwidth(pcd->ddma_chan, msbits); 192 au1xxx_dbdma_set_devwidth(pcd->ddma_chan, msbits);
193 au1xxx_dbdma_ring_alloc(pcd->ddma_chan, 2); 193 au1xxx_dbdma_ring_alloc(pcd->ddma_chan, 2);
diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c
index 74abc9b4f1cc..366049d8578c 100644
--- a/sound/soc/davinci/davinci-pcm.c
+++ b/sound/soc/davinci/davinci-pcm.c
@@ -212,7 +212,7 @@ davinci_pcm_pointer(struct snd_pcm_substream *substream)
212 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 212 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
213 count = src - runtime->dma_addr; 213 count = src - runtime->dma_addr;
214 else 214 else
215 count = dst - runtime->dma_addr;; 215 count = dst - runtime->dma_addr;
216 216
217 spin_unlock(&prtd->lock); 217 spin_unlock(&prtd->lock);
218 218